System information
Describe the current behavior
When calling a model during training (e.g., from a batch generator), an attribute error is raised related to thread-local data. This occurs in Keras 2.3.0 but not in Keras <= 2.2.5. It appears the thread-local storage elements were added in Keras 2.3.0.
Describe the expected behavior
I expect the model to be callable during training, as has been the case in prior Keras versions. The test code is intentionally trivial to demonstrate the issue. In practice, being able to call a model during batch generation is useful for cases where we generate batches based on current model behavior (e.g., selecting hard examples for a model using triplet loss).
Code to reproduce the issue
import keras
import numpy as np
input_layer = keras.layers.Input((1, ))
x = keras.layers.Dense(1, activation='sigmoid')(input_layer)
model = keras.models.Model(inputs=input_layer, outputs=x)
model.compile(loss='binary_crossentropy', optimizer='sgd')
model._make_predict_function()
def generate():
while True:
# If you comment this next line out, no error is raised.
yt = model.predict(np.random.randn(5, 1))
yield np.random.randn(5, 1), np.ones((5, 1))
model.fit_generator(generate(), epochs=3, steps_per_epoch=10)
Other info / logs
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-1-79d7cb1668dc> in <module>
12 yield np.random.randn(5, 1), np.ones((5, 1))
13
---> 14 model.fit_generator(generate(), epochs=3, steps_per_epoch=10)
/usr/src/.venv/lib/python3.7/site-packages/keras/legacy/interfaces.py in wrapper(*args, **kwargs)
89 warnings.warn('Update your `' + object_name + '` call to the ' +
90 'Keras 2 API: ' + signature, stacklevel=2)
---> 91 return func(*args, **kwargs)
92 wrapper._original_function = func
93 return wrapper
/usr/src/.venv/lib/python3.7/site-packages/keras/engine/training.py in fit_generator(self, generator, steps_per_epoch, epochs, verbose, callbacks, validation_data, validation_steps, validation_freq, class_weight, max_queue_size, workers, use_multiprocessing, shuffle, initial_epoch)
1730 use_multiprocessing=use_multiprocessing,
1731 shuffle=shuffle,
-> 1732 initial_epoch=initial_epoch)
1733
1734 @interfaces.legacy_generator_methods_support
/usr/src/.venv/lib/python3.7/site-packages/keras/engine/training_generator.py in fit_generator(model, generator, steps_per_epoch, epochs, verbose, callbacks, validation_data, validation_steps, validation_freq, class_weight, max_queue_size, workers, use_multiprocessing, shuffle, initial_epoch)
183 batch_index = 0
184 while steps_done < steps_per_epoch:
--> 185 generator_output = next(output_generator)
186
187 if not hasattr(generator_output, '__len__'):
/usr/src/.venv/lib/python3.7/site-packages/keras/utils/data_utils.py in get(self)
740 "`use_multiprocessing=False, workers > 1`."
741 "For more information see issue #1638.")
--> 742 six.reraise(*sys.exc_info())
/usr/src/.venv/lib/python3.7/site-packages/six.py in reraise(tp, value, tb)
691 if value.__traceback__ is not tb:
692 raise value.with_traceback(tb)
--> 693 raise value
694 finally:
695 value = None
/usr/src/.venv/lib/python3.7/site-packages/keras/utils/data_utils.py in get(self)
709 try:
710 future = self.queue.get(block=True)
--> 711 inputs = future.get(timeout=30)
712 self.queue.task_done()
713 except mp.TimeoutError:
/usr/local/lib/python3.7/multiprocessing/pool.py in get(self, timeout)
655 return self._value
656 else:
--> 657 raise self._value
658
659 def _set(self, i, obj):
/usr/local/lib/python3.7/multiprocessing/pool.py in worker(inqueue, outqueue, initializer, initargs, maxtasks, wrap_exception)
119 job, i, func, args, kwds = task
120 try:
--> 121 result = (True, func(*args, **kwds))
122 except Exception as e:
123 if wrap_exception and func is not _helper_reraises_exception:
/usr/src/.venv/lib/python3.7/site-packages/keras/utils/data_utils.py in next_sample(uid)
648 The next value of generator `uid`.
649 """
--> 650 return six.next(_SHARED_SEQUENCES[uid])
651
652
<ipython-input-1-79d7cb1668dc> in generate()
9 def generate():
10 while True:
---> 11 yt = model.predict(np.random.randn(5, 1))
12 yield np.random.randn(5, 1), np.ones((5, 1))
13
/usr/src/.venv/lib/python3.7/site-packages/keras/engine/training.py in predict(self, x, batch_size, verbose, steps, callbacks, max_queue_size, workers, use_multiprocessing)
1460 verbose=verbose,
1461 steps=steps,
-> 1462 callbacks=callbacks)
1463
1464 def train_on_batch(self, x, y,
/usr/src/.venv/lib/python3.7/site-packages/keras/engine/training_arrays.py in predict_loop(model, f, ins, batch_size, verbose, steps, callbacks)
274 indices_for_conversion_to_dense.append(i)
275
--> 276 callbacks.model.stop_training = False
277 callbacks._call_begin_hook('predict')
278
/usr/src/.venv/lib/python3.7/site-packages/keras/engine/network.py in __setattr__(self, name, value)
321 'forgot to call `super(YourClass, self).__init__()`.'
322 ' Always start with this line.')
--> 323 super(Network, self).__setattr__(name, value)
324
325 @property
/usr/src/.venv/lib/python3.7/site-packages/keras/engine/base_layer.py in __setattr__(self, name, value)
1213 # We do this so that we can maintain the correct order of metrics by adding
1214 # the instance to the `metrics` list as soon as it is created.
-> 1215 if not _DISABLE_TRACKING.value:
1216 from .. import metrics as metrics_module
1217 if isinstance(value, metrics_module.Metric):
AttributeError: '_thread._local' object has no attribute 'value'
I just realized that this issue can be resolved by passing workers=0 to fit_generator. 馃う鈥嶁檪If this is the intended behavior, please do close this issue. Sorry for adding noise to the project.
I'm going to leave this here just in case others run into the same issue.
I have fixed this issue on master. Please install from master and check.
@fchollet First, thank you for taking a look so quickly. I saw the commit (977d55ccc7c6315abdecd645aea8d389a5b53ffc) and expected it would solve the problem. Instead, a new exception is now raised that is possibly related to https://github.com/keras-team/keras/issues/1638. The same test code as in the original (re-pasted for completeness) now yields InvalidArgumentError: Tensor input_1:0, specified in either feed_devices or fetch_devices was not found in the Graph. The full traceback is also provided below.
Code to reproduce the issue
import keras
import numpy as np
input_layer = keras.layers.Input((1, ))
x = keras.layers.Dense(1, activation='sigmoid')(input_layer)
model = keras.models.Model(inputs=input_layer, outputs=x)
model.compile(loss='binary_crossentropy', optimizer='sgd')
model._make_predict_function()
def generate():
while True:
# If you comment this next line out, no error is raised.
yt = model.predict(np.random.randn(5, 1))
yield np.random.randn(5, 1), np.ones((5, 1))
model.fit_generator(generate(), epochs=3, steps_per_epoch=10)
Traceback
Epoch 1/3
---------------------------------------------------------------------------
InvalidArgumentError Traceback (most recent call last)
<ipython-input-1-1944052025d1> in <module>
13 yield np.random.randn(5, 1), np.ones((5, 1))
14
---> 15 model.fit_generator(generate(), epochs=3, steps_per_epoch=10)
/usr/src/.venv/lib/python3.7/site-packages/keras/legacy/interfaces.py in wrapper(*args, **kwargs)
89 warnings.warn('Update your `' + object_name + '` call to the ' +
90 'Keras 2 API: ' + signature, stacklevel=2)
---> 91 return func(*args, **kwargs)
92 wrapper._original_function = func
93 return wrapper
/usr/src/.venv/lib/python3.7/site-packages/keras/engine/training.py in fit_generator(self, generator, steps_per_epoch, epochs, verbose, callbacks, validation_data, validation_steps, validation_freq, class_weight, max_queue_size, workers, use_multiprocessing, shuffle, initial_epoch)
1730 use_multiprocessing=use_multiprocessing,
1731 shuffle=shuffle,
-> 1732 initial_epoch=initial_epoch)
1733
1734 @interfaces.legacy_generator_methods_support
/usr/src/.venv/lib/python3.7/site-packages/keras/engine/training_generator.py in fit_generator(model, generator, steps_per_epoch, epochs, verbose, callbacks, validation_data, validation_steps, validation_freq, class_weight, max_queue_size, workers, use_multiprocessing, shuffle, initial_epoch)
183 batch_index = 0
184 while steps_done < steps_per_epoch:
--> 185 generator_output = next(output_generator)
186
187 if not hasattr(generator_output, '__len__'):
/usr/src/.venv/lib/python3.7/site-packages/keras/utils/data_utils.py in get(self)
740 "`use_multiprocessing=False, workers > 1`."
741 "For more information see issue #1638.")
--> 742 six.reraise(*sys.exc_info())
/usr/src/.venv/lib/python3.7/site-packages/six.py in reraise(tp, value, tb)
691 if value.__traceback__ is not tb:
692 raise value.with_traceback(tb)
--> 693 raise value
694 finally:
695 value = None
/usr/src/.venv/lib/python3.7/site-packages/keras/utils/data_utils.py in get(self)
709 try:
710 future = self.queue.get(block=True)
--> 711 inputs = future.get(timeout=30)
712 self.queue.task_done()
713 except mp.TimeoutError:
/usr/local/lib/python3.7/multiprocessing/pool.py in get(self, timeout)
655 return self._value
656 else:
--> 657 raise self._value
658
659 def _set(self, i, obj):
/usr/local/lib/python3.7/multiprocessing/pool.py in worker(inqueue, outqueue, initializer, initargs, maxtasks, wrap_exception)
119 job, i, func, args, kwds = task
120 try:
--> 121 result = (True, func(*args, **kwds))
122 except Exception as e:
123 if wrap_exception and func is not _helper_reraises_exception:
/usr/src/.venv/lib/python3.7/site-packages/keras/utils/data_utils.py in next_sample(uid)
648 The next value of generator `uid`.
649 """
--> 650 return six.next(_SHARED_SEQUENCES[uid])
651
652
<ipython-input-1-1944052025d1> in generate()
10 while True:
11 # If you comment this next line out, no error is raised.
---> 12 yt = model.predict(np.random.randn(5, 1))
13 yield np.random.randn(5, 1), np.ones((5, 1))
14
/usr/src/.venv/lib/python3.7/site-packages/keras/engine/training.py in predict(self, x, batch_size, verbose, steps, callbacks, max_queue_size, workers, use_multiprocessing)
1460 verbose=verbose,
1461 steps=steps,
-> 1462 callbacks=callbacks)
1463
1464 def train_on_batch(self, x, y,
/usr/src/.venv/lib/python3.7/site-packages/keras/engine/training_arrays.py in predict_loop(model, f, ins, batch_size, verbose, steps, callbacks)
322 batch_logs = {'batch': batch_index, 'size': len(batch_ids)}
323 callbacks._call_batch_hook('predict', 'begin', batch_index, batch_logs)
--> 324 batch_outs = f(ins_batch)
325 batch_outs = to_list(batch_outs)
326 if batch_index == 0:
/usr/src/.venv/lib/python3.7/site-packages/tensorflow/python/keras/backend.py in __call__(self, inputs)
3287 feed_symbols != self._feed_symbols or self.fetches != self._fetches or
3288 session != self._session):
-> 3289 self._make_callable(feed_arrays, feed_symbols, symbol_vals, session)
3290
3291 fetched = self._callable_fn(*array_vals,
/usr/src/.venv/lib/python3.7/site-packages/tensorflow/python/keras/backend.py in _make_callable(self, feed_arrays, feed_symbols, symbol_vals, session)
3220 callable_opts.run_options.CopyFrom(self.run_options)
3221 # Create callable.
-> 3222 callable_fn = session._make_callable_from_options(callable_opts)
3223 # Cache parameters corresponding to the generated callable, so that
3224 # we can detect future mismatches and refresh the callable.
/usr/src/.venv/lib/python3.7/site-packages/tensorflow/python/client/session.py in _make_callable_from_options(self, callable_options)
1487 """
1488 self._extend_graph()
-> 1489 return BaseSession._Callable(self, callable_options)
1490
1491
/usr/src/.venv/lib/python3.7/site-packages/tensorflow/python/client/session.py in __init__(self, session, callable_options)
1444 try:
1445 self._handle = tf_session.TF_SessionMakeCallable(
-> 1446 session._session, options_ptr)
1447 finally:
1448 tf_session.TF_DeleteBuffer(options_ptr)
InvalidArgumentError: Tensor input_1:0, specified in either feed_devices or fetch_devices was not found in the Graph
The error is now limited to tensorflow==1.14.0. No exception is raised with tensorflow==2.0.0rc1.
I am also having this issue. Downgrading tensorflow to 1.13.1 and Keras to 2.2.4 is another workaround.
I am also having this issue. Downgrading tensorflow to 1.13.1 and Keras to 2.2.4 is another workaround.
This helped me since upgrading to tensorflow==2.0.0rc1 caused other breaking changes.
The original issue that I raised here (the thread-local storage error) was fully resolved so I think it makes sense to close this issue.
The second issue that arose appears to be addressed by upgrading or downgrading TensorFlow depending on your specific need. If upgrading or downgrading are unsuitable for you, it may make sense to open a new issue focused on the TensorFlow compatibility issue.
I am getting this error
if _SYMBOLIC_SCOPE.value:
AttributeError: '_thread._local' object has no attribute 'value'
while I am trying to predict using the model
TensorFlow==2.2.0 , Keras==2.3.1
Traceback (most recent call last):
File "/opt/anaconda3/lib/python3.7/site-packages/flask/app.py", line 2463, in __call__
return self.wsgi_app(environ, start_response)
File "/opt/anaconda3/lib/python3.7/site-packages/flask/app.py", line 2449, in wsgi_app
response = self.handle_exception(e)
File "/opt/anaconda3/lib/python3.7/site-packages/flask/app.py", line 1866, in handle_exception
reraise(exc_type, exc_value, tb)
File "/opt/anaconda3/lib/python3.7/site-packages/flask/_compat.py", line 39, in reraise
raise value
File "/opt/anaconda3/lib/python3.7/site-packages/flask/app.py", line 2446, in wsgi_app
response = self.full_dispatch_request()
File "/opt/anaconda3/lib/python3.7/site-packages/flask/app.py", line 1951, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/opt/anaconda3/lib/python3.7/site-packages/flask/app.py", line 1820, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/opt/anaconda3/lib/python3.7/site-packages/flask/_compat.py", line 39, in reraise
raise value
File "/opt/anaconda3/lib/python3.7/site-packages/flask/app.py", line 1949, in full_dispatch_request
rv = self.dispatch_request()
File "/opt/anaconda3/lib/python3.7/site-packages/flask/app.py", line 1935, in dispatch_request
return self.view_functionsrule.endpoint
File "/Users/sidhartha7/Desktop/Text-Generator/app.py", line 223, in get_data
predict = generate_output(input, int(length))
File "/Users/sidhartha7/Desktop/Text-Generator/app.py", line 189, in generate_output
preds = model.predict(x_pred, verbose=0)[0]
File "/opt/anaconda3/lib/python3.7/site-packages/keras/engine/training.py", line 1452, in predict
if self._uses_dynamic_learning_phase():
File "/opt/anaconda3/lib/python3.7/site-packages/keras/engine/training.py", line 382, in _uses_dynamic_learning_phase
not isinstance(K.learning_phase(), int))
File "/opt/anaconda3/lib/python3.7/site-packages/keras/backend/tensorflow_backend.py", line 73, in symbolic_fn_wrapper
if _SYMBOLIC_SCOPE.value:
AttributeError: '_thread._local' object has no attribute 'value'
I am getting this error
if _SYMBOLIC_SCOPE.value:
AttributeError: '_thread._local' object has no attribute 'value'while I am trying to predict using the model
TensorFlow==2.2.0 , Keras==2.3.1Traceback (most recent call last):
File "/opt/anaconda3/lib/python3.7/site-packages/flask/app.py", line 2463, in call
return self.wsgi_app(environ, start_response)
File "/opt/anaconda3/lib/python3.7/site-packages/flask/app.py", line 2449, in wsgi_app
response = self.handle_exception(e)
File "/opt/anaconda3/lib/python3.7/site-packages/flask/app.py", line 1866, in handle_exception
reraise(exc_type, exc_value, tb)
File "/opt/anaconda3/lib/python3.7/site-packages/flask/_compat.py", line 39, in reraise
raise value
File "/opt/anaconda3/lib/python3.7/site-packages/flask/app.py", line 2446, in wsgi_app
response = self.full_dispatch_request()
File "/opt/anaconda3/lib/python3.7/site-packages/flask/app.py", line 1951, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/opt/anaconda3/lib/python3.7/site-packages/flask/app.py", line 1820, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/opt/anaconda3/lib/python3.7/site-packages/flask/_compat.py", line 39, in reraise
raise value
File "/opt/anaconda3/lib/python3.7/site-packages/flask/app.py", line 1949, in full_dispatch_request
rv = self.dispatch_request()
File "/opt/anaconda3/lib/python3.7/site-packages/flask/app.py", line 1935, in dispatch_request
return self.view_functionsrule.endpoint
File "/Users/sidhartha7/Desktop/Text-Generator/app.py", line 223, in get_data
predict = generate_output(input, int(length))
File "/Users/sidhartha7/Desktop/Text-Generator/app.py", line 189, in generate_output
preds = model.predict(x_pred, verbose=0)[0]
File "/opt/anaconda3/lib/python3.7/site-packages/keras/engine/training.py", line 1452, in predict
if self._uses_dynamic_learning_phase():
File "/opt/anaconda3/lib/python3.7/site-packages/keras/engine/training.py", line 382, in _uses_dynamic_learning_phase
not isinstance(K.learning_phase(), int))
File "/opt/anaconda3/lib/python3.7/site-packages/keras/backend/tensorflow_backend.py", line 73, in symbolic_fn_wrapper
if _SYMBOLIC_SCOPE.value:
AttributeError: '_thread._local' object has no attribute 'value'
I have got my one working with tensorflow==2.0.0 and keras==2.3.1.
I have used load_model imported from tensorflow as follows:.
from tensorflow.keras.models import load_model
Don't import load_model from keras as new version of keras doesn't support it.
Hope it helps.
indeed, it solved my problem
threaded=False in app.run
Example:
app.run(port=5000,threaded=False)
from tensorflow.keras.models import load_model - solved my problem
Most helpful comment
I have got my one working with
tensorflow==2.0.0andkeras==2.3.1.I have used
load_modelimported from tensorflow as follows:.Don't import
load_modelfrom keras as new version of keras doesn't support it.Hope it helps.