Error when activating use_multiprocessing in fit_generator on windows - python

I'm trying to run my CNN python code with use_multiprocessing=True in fit_generator function but i get error, and its work just fine with single process but the CPU load: 20% and GPU: 8%.
I'm running on MSI laptop with windows 10 core i7-7820HK CPU and NVIDIA GTX 1080 , using tensorflow backend
this is my code:
# Part 1 - Building the CNN
# Importing the Keras libraries and packages
from keras.models import Sequential
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense
from keras.preprocessing.image import ImageDataGenerator
# Initialising the CNN
classifier = Sequential()
# Step 1 - Convolution
classifier.add(Conv2D(32, (3, 3), input_shape=(64, 64, 3), activation='relu'))
# Step 2 - Pooling
classifier.add(MaxPooling2D(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(Conv2D(32, (3, 3), activation='relu'))
classifier.add(MaxPooling2D(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(Flatten())
# Step 4 - Full connection
classifier.add(Dense(units=128, activation='relu'))
classifier.add(Dense(units=1, activation='sigmoid'))
# Compiling the CNN
classifier.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
# Part 2 - Fitting the CNN to the images
train_datagen = ImageDataGenerator(rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale = 1./255)
training_set = train_datagen.flow_from_directory('dataset\\training_set',
target_size = (64, 64),
batch_size = 32,
class_mode = 'binary')
test_set = test_datagen.flow_from_directory('dataset\\test_set',
target_size = (64, 64),
batch_size = 32,
class_mode = 'binary')
if __name__ == '__main__':
classifier.fit_generator(training_set,
workers=8,
max_queue_size=100,
use_multiprocessing=True,
steps_per_epoch=(8000 / 32),
epochs=25,
validation_data=test_set,
validation_steps=(2000 / 32))
and i get this error:
Using TensorFlow backend. Found 8000 images belonging to 2 classes. Found 2000 images belonging to 2 classes. Epoch 1/25 Exception in thread Thread-24: Traceback (most recent call last): File "C:\Users\MSI-GT75\Anaconda3\envs\cnn\lib\threading.py", line 916, in
_bootstrap_inner
self.run() File "C:\Users\MSI-GT75\Anaconda3\envs\cnn\lib\threading.py", line 864, in run
self._target(*self._args, **self._kwargs) File "C:\Users\MSI-GT75\Anaconda3\envs\cnn\lib\site-packages\keras\utils\data_utils.py", line 548, in _run
with closing(self.executor_fn(_SHARED_SEQUENCES)) as executor: File "C:\Users\MSI-GT75\Anaconda3\envs\cnn\lib\site-packages\keras\utils\data_utils.py", line 522, in <lambda>
initargs=(seqs,)) File "C:\Users\MSI-GT75\Anaconda3\envs\cnn\lib\multiprocessing\context.py", line 119, in Pool
context=self.get_context()) File "C:\Users\MSI-GT75\Anaconda3\envs\cnn\lib\multiprocessing\pool.py", line 174, in __init__
self._repopulate_pool() File "C:\Users\MSI-GT75\Anaconda3\envs\cnn\lib\multiprocessing\pool.py", line 239, in _repopulate_pool
w.start() File "C:\Users\MSI-GT75\Anaconda3\envs\cnn\lib\multiprocessing\process.py", line 105, in start
self._popen = self._Popen(self) File "C:\Users\MSI-GT75\Anaconda3\envs\cnn\lib\multiprocessing\context.py", line 322, in _Popen
return Popen(process_obj) File "C:\Users\MSI-GT75\Anaconda3\envs\cnn\lib\multiprocessing\popen_spawn_win32.py", line 65, in __init__
reduction.dump(process_obj, to_child) File "C:\Users\MSI-GT75\Anaconda3\envs\cnn\lib\multiprocessing\reduction.py", line 60, in dump
ForkingPickler(file, protocol).dump(obj) TypeError: can't pickle _thread.lock objects
Exception in thread Thread-23: Traceback (most recent call last): File "C:\Users\MSI-GT75\Anaconda3\envs\cnn\lib\threading.py", line 916, in _bootstrap_inner
self.run() File "C:\Users\MSI-GT75\Anaconda3\envs\cnn\lib\threading.py", line 864, in run
self._target(*self._args, **self._kwargs) File "C:\Users\MSI-GT75\Anaconda3\envs\cnn\lib\site-packages\keras\utils\data_utils.py", line 548, in _run
with closing(self.executor_fn(_SHARED_SEQUENCES)) as executor: File "C:\Users\MSI-GT75\Anaconda3\envs\cnn\lib\site-packages\keras\utils\data_utils.py", line 522, in <lambda>
initargs=(seqs,)) File "C:\Users\MSI-GT75\Anaconda3\envs\cnn\lib\multiprocessing\context.py", line 119, in Pool
context=self.get_context()) File "C:\Users\MSI-GT75\Anaconda3\envs\cnn\lib\multiprocessing\pool.py", line 174, in __init__
self._repopulate_pool() File "C:\Users\MSI-GT75\Anaconda3\envs\cnn\lib\multiprocessing\pool.py", line 239, in _repopulate_pool
w.start() File "C:\Users\MSI-GT75\Anaconda3\envs\cnn\lib\multiprocessing\process.py", line 105, in start
self._popen = self._Popen(self) File "C:\Users\MSI-GT75\Anaconda3\envs\cnn\lib\multiprocessing\context.py", line 322, in _Popen
return Popen(process_obj) File "C:\Users\MSI-GT75\Anaconda3\envs\cnn\lib\multiprocessing\popen_spawn_win32.py", line 65, in __init__
reduction.dump(process_obj, to_child) File "C:\Users\MSI-GT75\Anaconda3\envs\cnn\lib\multiprocessing\reduction.py", line 60, in dump
ForkingPickler(file, protocol).dump(obj) TypeError: can't pickle _thread.lock objects
after updating all packages this error shows instead of the above one:
ValueError: Using a generator with `use_multiprocessing=True` is not supported on Windows (no marshalling of generators across process boundaries). Instead, use single thread/process or multithreading.

Related

Fit generator with yield generator. Cannot Pickle 'generator' object

I have the following code:
def generator_train(x_train_df, y_train_df, batch_size):
for i in range(int(len(x_train_df) / batch_size)):
x_train = x_train_df[i * batch_size:(i + 1) * batch_size]
y_train = y_train_df[i * batch_size:(i + 1) * batch_size]
yield np.array(x_train), np.array(y_train)
train_generator = generator_train(x_train_df, y_train_df, batch_size)
history = model.fit(train_generator,
epochs=epochs_no,
steps_per_epoch=number_of_rows_input/batch_size,
verbose=1,
max_queue_size=100,
validation_data=None,
workers=8,
use_multiprocessing=True
)
The x_train_df, y_train_df are pandas.DataFrame both.
I'm still getting the following error referring to pickle. However, the fit_generator should have noting to do with dumping/loading pickled data.
Exception in thread Traceback (most recent call last):
Thread-2 File "<string>", line 1, in <module>
Traceback (most recent call last):
File "C:\Users\Admin\AppData\Local\Programs\Python\Python39\lib\threading.py", line 954, in _bootstrap_inner
File "C:\Users\Admin\AppData\Local\Programs\Python\Python39\lib\multiprocessing\spawn.py", line 116, in spawn_main
self.run()
File "C:\Users\Admin\AppData\Local\Programs\Python\Python39\lib\threading.py", line 892, in run
exitcode = _main(fd, parent_sentinel)
File "C:\Users\Admin\AppData\Local\Programs\Python\Python39\lib\multiprocessing\spawn.py", line 126, in _main
self = reduction.pickle.load(from_parent)
EOFError: Ran out of input
self._target(*self._args, **self._kwargs)
File "E:\Tut\pythonProject5_MachineLearning\venv\lib\site-packages\keras\utils\data_utils.py", line 868, in _run
with closing(self.executor_fn(_SHARED_SEQUENCES)) as executor:
File "E:\Tut\pythonProject5_MachineLearning\venv\lib\site-packages\keras\utils\data_utils.py", line 858, in pool_fn
pool = get_pool_class(True)(
File "C:\Users\Admin\AppData\Local\Programs\Python\Python39\lib\multiprocessing\context.py", line 119, in Pool
return Pool(processes, initializer, initargs, maxtasksperchild,
File "C:\Users\Admin\AppData\Local\Programs\Python\Python39\lib\multiprocessing\pool.py", line 212, in __init__
self._repopulate_pool()
File "C:\Users\Admin\AppData\Local\Programs\Python\Python39\lib\multiprocessing\pool.py", line 303, in _repopulate_pool
return self._repopulate_pool_static(self._ctx, self.Process,
File "C:\Users\Admin\AppData\Local\Programs\Python\Python39\lib\multiprocessing\pool.py", line 326, in _repopulate_pool_static
w.start()
File "C:\Users\Admin\AppData\Local\Programs\Python\Python39\lib\multiprocessing\process.py", line 121, in start
self._popen = self._Popen(self)
File "C:\Users\Admin\AppData\Local\Programs\Python\Python39\lib\multiprocessing\context.py", line 327, in _Popen
return Popen(process_obj)
File "C:\Users\Admin\AppData\Local\Programs\Python\Python39\lib\multiprocessing\popen_spawn_win32.py", line 93, in __init__
reduction.dump(process_obj, to_child)
File "C:\Users\Admin\AppData\Local\Programs\Python\Python39\lib\multiprocessing\reduction.py", line 60, in dump
ForkingPickler(file, protocol).dump(obj)
TypeError: cannot pickle 'generator' object
What am I missing?
One solution would be by using MirroredStrategy() for the neural network and the date should be preprocessed using the functions from tensorflow.data.Dataset
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
model = Sequential()
model.add(Dense.....
.....
model.compile(loss='mae', optimizer='sgd')
def dataset_fn(dummy_argument):
x = np.array(x_train_df).astype(np.float32)
y = np.array(y_train_df).astype(np.float32)
dataset = tf.data.Dataset.from_tensor_slices((x, y))
return dataset.repeat().batch(batch_size=batch_size, drop_remainder=True)
dist_dataset = strategy.experimental_distribute_datasets_from_function(dataset_fn)
history = model.fit(
dist_dataset,
epochs=epochs,
steps_per_epoch=number_of_batches_in_the_x_set,
verbose=1,
max_queue_size=max_queue_size,
validation_data=None,
workers=number_of_workers,
use_multiprocessing=True
)
You are pickling: because you're using multiprocessing, and multiprocessing needs to pickle anything it runs to send it to the new python processes. Since your train_generator is needed in each process, it will be sent, i.e. pickled.
As the linked question notes, avoid this by not using a generator: trivially, cast to list and evaluate before sending; but more sensibly, rewrite your generator to return the list for you.

TypeError: __array__() takes 1 positional argument but 2 were given?

I am working on transfer learning for an image classification task.
The training generator is as follows:
train_generator = train_datagen.flow_from_directory(
'/home/idu/Desktop/COV19D/train/',
color_mode = "grayscale",
target_size=(512, 512), # All images are 512 * 512
batch_size=batch_size,
classes = ['covid','non-covid'],
class_mode='binary')
The transferred model code is as follows:
SIZE = 512
VGG_model = VGG16(include_top=False, weights=None, input_shape=(SIZE, SIZE, 1))
for layer in VGG_model.layers:
layer.trainable = False
feature_extractor=VGG_model.predict(train_generator)
The last command throws the error:
Traceback (most recent call last):
File "<ipython-input-28-b9bad68819ec>", line 1, in <module>
feature_extractor=VGG_model.predict(train_generator)
File "/home/idu/.local/lib/python3.6/site-packages/keras/engine/training.py", line 1681, in predict
steps_per_execution=self._steps_per_execution)
File "/home/idu/.local/lib/python3.6/site-packages/keras/engine/data_adapter.py", line 1348, in get_data_handler
return DataHandler(*args, **kwargs)
File "/home/idu/.local/lib/python3.6/site-packages/keras/engine/data_adapter.py", line 1150, in __init__
model=model)
File "/home/idu/.local/lib/python3.6/site-packages/keras/engine/data_adapter.py", line 793, in __init__
peek, x = self._peek_and_restore(x)
File "/home/idu/.local/lib/python3.6/site-packages/keras/engine/data_adapter.py", line 850, in _peek_and_restore
peek = next(x)
File "/home/idu/.local/lib/python3.6/site-packages/keras_preprocessing/image/iterator.py", line 104, in __next__
return self.next(*args, **kwargs)
File "/home/idu/.local/lib/python3.6/site-packages/keras_preprocessing/image/iterator.py", line 116, in next
return self._get_batches_of_transformed_samples(index_array)
File "/home/idu/.local/lib/python3.6/site-packages/keras_preprocessing/image/iterator.py", line 231, in _get_batches_of_transformed_samples
x = img_to_array(img, data_format=self.data_format)
File "/home/idu/.local/lib/python3.6/site-packages/keras_preprocessing/image/utils.py", line 309, in img_to_array
x = np.asarray(img, dtype=dtype)
File "/home/idu/.local/lib/python3.6/site-packages/numpy/core/_asarray.py", line 83, in asarray
return array(a, dtype, copy=False, order=order)
TypeError: __array__() takes 1 positional argument but 2 were given
How can I overcome this error to do the feature exctraction?
Thank you.
I tried to downgrade tensorflow to 2.4, but that did not work. I downgraded my python version from 3.10.2 to 3.9.9 and re-installed scipy using the following command: python -m pip install --user numpy scipy matplotlib ipython jupyter pandas sympy nose. This command solved the issue.

get 'TypeError: Caught exception' for using 'accuracy' in Tensorflow Federated

This is my model, and I have implemented it once in TensorFlow.
def create_compiled_keras_model():
inputs = Input(shape=(7, 20, 1))
l0_c = Conv2D(32, kernel_size=(7, 7), padding='valid', activation='relu')(inputs)
l1_c = Conv2D(32, kernel_size=(1, 5), padding='same', activation='relu')(l0_c)
l1_p = AveragePooling2D(pool_size=(1, 2), strides=2, padding='same')(l1_c)
l2_c = Conv2D(64, kernel_size=(1, 4), padding='same', activation='relu')(l1_p)
l2_p = AveragePooling2D(pool_size=(1, 2), strides=2, padding='same')
l3_c = Conv2D(2, kernel_size=(1, 1), padding='valid', activation='sigmoid')(l2_p)
predictions = Flatten()(l3_c)
predictions = tf.cast(predictions, dtype='float32')
model = Model(inputs=inputs, outputs=predictions)
opt = Adam(lr=0.0005)
print(model.summary())
def loss_fn(y_true, y_pred):
return tf.reduce_mean(tf.keras.losses.binary_crossentropy(y_pred, y_true))
model.compile(optimizer=opt,
loss=loss_fn,
metrics=['accuracy'])
return model
I get this error in TensorFlow Federated.
Traceback (most recent call last):
File "/Users/amir/tensorflow/lib/python3.7/site-packages/tensorflow_federated/python/learning/keras_utils.py", line 270, in report
keras_metric = metric_type.from_config(metric_config)
File "/Users/amir/tensorflow/lib/python3.7/site-packages/tensorflow_core/python/keras/engine/base_layer.py", line 594, in from_config
return cls(**config)
TypeError: __init__() missing 1 required positional argument: 'fn'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/amir/Documents/CODE/Python/FL/fl_dataset_khudemon/fl.py", line 203, in <module>
quantization_part = FedAvgQ.build_federated_averaging_process(model_fn)
File "/Users/amir/Documents/CODE/Python/FL/fl_dataset_khudemon/new_fedavg_keras.py", line 195, in build_federated_averaging_process
stateful_delta_aggregate_fn, stateful_model_broadcast_fn)
File "/Users/amir/tensorflow/lib/python3.7/site-packages/tensorflow_federated/python/learning/framework/optimizer_utils.py", line 351, in build_model_delta_optimizer_process
dummy_model_for_metadata = model_utils.enhance(model_fn())
File "/Users/amir/Documents/CODE/Python/FL/fl_dataset_khudemon/fl.py", line 196, in model_fn
return tff.learning.from_compiled_keras_model(keras_model, sample_batch)
File "/Users/amir/tensorflow/lib/python3.7/site-packages/tensorflow_federated/python/learning/keras_utils.py", line 216, in from_compiled_keras_model
return model_utils.enhance(_TrainableKerasModel(keras_model, dummy_tensors))
File "/Users/amir/tensorflow/lib/python3.7/site-packages/tensorflow_federated/python/learning/keras_utils.py", line 491, in __init__
inner_model.loss_weights, inner_model.metrics)
File "/Users/amir/tensorflow/lib/python3.7/site-packages/tensorflow_federated/python/learning/keras_utils.py", line 381, in __init__
federated_output, federated_local_outputs_type)
File "/Users/amir/tensorflow/lib/python3.7/site-packages/tensorflow_federated/python/core/api/computations.py", line 223, in federated_computation
return computation_wrapper_instances.federated_computation_wrapper(*args)
File "/Users/amir/tensorflow/lib/python3.7/site-packages/tensorflow_federated/python/core/impl/wrappers/computation_wrapper.py", line 410, in __call__
self._wrapper_fn)
File "/Users/amir/tensorflow/lib/python3.7/site-packages/tensorflow_federated/python/core/impl/wrappers/computation_wrapper.py", line 103, in _wrap
concrete_fn = wrapper_fn(fn, parameter_type, unpack=None)
File "/Users/amir/tensorflow/lib/python3.7/site-packages/tensorflow_federated/python/core/impl/wrappers/computation_wrapper_instances.py", line 78, in _federated_computation_wrapper_fn
suggested_name=name))
File "/Users/amir/tensorflow/lib/python3.7/site-packages/tensorflow_federated/python/core/impl/federated_computation_utils.py", line 76, in zero_or_one_arg_fn_to_building_block
context_stack))
File "/Users/amir/tensorflow/lib/python3.7/site-packages/tensorflow_federated/python/core/impl/utils/function_utils.py", line 652, in <lambda>
return lambda arg: _call(fn, parameter_type, arg)
File "/Users/amir/tensorflow/lib/python3.7/site-packages/tensorflow_federated/python/core/impl/utils/function_utils.py", line 645, in _call
return fn(arg)
File "/Users/amir/tensorflow/lib/python3.7/site-packages/tensorflow_federated/python/learning/keras_utils.py", line 377, in federated_output
type(metric), metric.get_config(), variables)
File "/Users/amir/tensorflow/lib/python3.7/site-packages/tensorflow_federated/python/learning/keras_utils.py", line 260, in federated_aggregate_keras_metric
#tff.tf_computation(member_type)
File "/Users/amir/tensorflow/lib/python3.7/site-packages/tensorflow_federated/python/core/impl/wrappers/computation_wrapper.py", line 415, in <lambda>
return lambda fn: _wrap(fn, arg_type, self._wrapper_fn)
File "/Users/amir/tensorflow/lib/python3.7/site-packages/tensorflow_federated/python/core/impl/wrappers/computation_wrapper.py", line 103, in _wrap
concrete_fn = wrapper_fn(fn, parameter_type, unpack=None)
File "/Users/amir/tensorflow/lib/python3.7/site-packages/tensorflow_federated/python/core/impl/wrappers/computation_wrapper_instances.py", line 44, in _tf_wrapper_fn
target_fn, parameter_type, ctx_stack)
File "/Users/amir/tensorflow/lib/python3.7/site-packages/tensorflow_federated/python/core/impl/tensorflow_serialization.py", line 278, in serialize_py_fn_as_tf_computation
result = target(*args)
File "/Users/amir/tensorflow/lib/python3.7/site-packages/tensorflow_federated/python/core/impl/utils/function_utils.py", line 652, in <lambda>
return lambda arg: _call(fn, parameter_type, arg)
File "/Users/amir/tensorflow/lib/python3.7/site-packages/tensorflow_federated/python/core/impl/utils/function_utils.py", line 645, in _call
return fn(arg)
File "/Users/amir/tensorflow/lib/python3.7/site-packages/tensorflow_federated/python/learning/keras_utils.py", line 278, in report
t=metric_type, c=metric_config, e=e))
TypeError: Caught exception trying to call `<class 'tensorflow.python.keras.metrics.MeanMetricWrapper'>.from_config()` with config {'name': 'accuracy', 'dtype': 'float32'}. Confirm that <class 'tensorflow.python.keras.metrics.MeanMetricWrapper'>.__init__() has an argument for each member of the config.
Exception: __init__() missing 1 required positional argument: 'fn'
My dataset's label is a kind of two labels [0. 1.] and I used binary_crossentropy for loss function. But the accuracy gets back the error. I am sure it is related to multiple labels. The loss calculated without any problem when I remove the accuracy. Any help would be greatly appreciated.
TensorFlow Federated unfortunately isn't able to understand Keras models that have been compiled with string arguments. TFF requires the compile() call on the model be given instances of tf.keras.losses.Loss or tf.keras.metrics.Metric. It should be possible to change the last part of the code in question to:
model.compile(optimizer=opt,
loss=tf.keras.losses.BinaryCrossentropy(),
metrics=[tf.keras.metrics.Accuracy()])
Note that there shouldn't be a need to define a custom loss function, Keras provides a canned binary crossentropy.

TensorType does not support iteration. Maybe you are using builtin.sum instead of theano.tensor.sum? (Maybe .max?)?

I try to train data based with a convolutional neural network,
//Reshaping_data
batch_size = 256
nb_epoch = 50
Nrb_filter= 64
Filter_Size= 3
print('Build model...')
model = Sequential()
input_traces = Input(shape=(nb_samples,))
model.add(Conv1D(Nrb_filter, Filter_Size, activation='relu',
input_shape=input_traces))
model.add(Conv1D(Nrb_filter, Filter_Size, activation='relu'))
model.add(MaxPooling1D(Filter_Size))
...
The error that I have is:
Defining CNN Model
Build model...
Traceback (most recent call last):
File "CNN_Based_Attack.py", line 136, in <module>
model.add(Conv1D(Nrb_filter, Filter_Size, activation='relu', input_shape=input_traces))
File "/home/user/.local/lib/python2.7/site-packages/keras/legacy/interfaces.py", line 88, in wrapper
return func(*args, **kwargs)
File "/home/user/.local/lib/python2.7/site-packages/keras/layers/convolutional.py", line 330, in __init__
**kwargs)
File "/home/user/.local/lib/python2.7/site-packages/keras/layers/convolutional.py", line 100, in __init__
super(_Conv, self).__init__(**kwargs)
File "/home/user/.local/lib/python2.7/site-packages/keras/engine/topology.py", line 299, in __init__
batch_input_shape = (batch_size,) + tuple(kwargs['input_shape'])
File "/usr/lib64/python2.7/site-packages/theano/tensor/var.py", line 600, in __iter__
raise TypeError(('TensorType does not support iteration. '
TypeError: TensorType does not support iteration. Maybe you are using builtin.sum instead of theano.tensor.sum? (Maybe .max?)
I really don't understand the error, and how to resolve ot, I would be very grateful if you could help me please?

Tensorflow: How should I properly handle graphs?

I am trying to set up a network using tf.contrib.learn functionalities:
#Imports, definition of directory paths..
def main(unused_argv):
hparams = seg_hparams.create_hparams()
input_fn_train = seg_inputs.create_input_fn(
hparams = hparams,
mode = tf.contrib.learn.ModeKeys.TRAIN,
input_dir = TRAIN_DATA)
model_fn = seg_model.create_model_fn(
hparams,
model_impl = forward_backward_model)
estimator = tf.contrib.learn.Estimator(
model_fn = model_fn,
model_dir = MODEL_DIR)
estimator.fit(input_fn=input_fn_train, steps=None)
if __name__ == "__main__":
tf.app.run()
For the input I am using a custom queue similar to the one described in the tutorial on https://indico.io/blog/tensorflow-data-input-part2-extensions/.
When running the program, I encounter following error:
Traceback (most recent call last):
File "train.py", line 54, in <module>
tf.app.run()
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/platform/app.py", line 30, in run
sys.exit(main(sys.argv[:1] + flags_passthrough))
File "train.py", line 49, in main
estimator.fit(input_fn=input_fn_train, steps=None)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/estimators/estimator.py", line 332, in fit
max_steps=max_steps)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/estimators/estimator.py", line 650, in _train_model
train_op, loss_op = self._get_train_ops(features, targets)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/estimators/estimator.py", line 951, in _get_train_ops
_, loss, train_op = self._call_model_fn(features, targets, ModeKeys.TRAIN)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/contrib/learn/python/learn/estimators/estimator.py", line 934, in _call_model_fn
....
ValueError: Tensor("random_shuffle_queue_DequeueMany:0", shape=(6, 256, 256, 1), dtype=float32, device=/device:CPU:0) must be from the same graph as Tensor("seg_net/conv1/weights:0", shape=(3, 3, 1, 32), dtype=float32_ref).
Essentially I am wondering how to make sure to use the same graph across different functions.
I was thinking of something like using
with tf.Graph().as_default():
inside input_fn_train and
with tf.get_default_graph():
inside model_fn.
However, I couldn't resolve the issue so far.

Categories