Fit generator with yield generator. Cannot Pickle 'generator' object - python

I have the following code:
def generator_train(x_train_df, y_train_df, batch_size):
for i in range(int(len(x_train_df) / batch_size)):
x_train = x_train_df[i * batch_size:(i + 1) * batch_size]
y_train = y_train_df[i * batch_size:(i + 1) * batch_size]
yield np.array(x_train), np.array(y_train)
train_generator = generator_train(x_train_df, y_train_df, batch_size)
history = model.fit(train_generator,
epochs=epochs_no,
steps_per_epoch=number_of_rows_input/batch_size,
verbose=1,
max_queue_size=100,
validation_data=None,
workers=8,
use_multiprocessing=True
)
The x_train_df, y_train_df are pandas.DataFrame both.
I'm still getting the following error referring to pickle. However, the fit_generator should have noting to do with dumping/loading pickled data.
Exception in thread Traceback (most recent call last):
Thread-2 File "<string>", line 1, in <module>
Traceback (most recent call last):
File "C:\Users\Admin\AppData\Local\Programs\Python\Python39\lib\threading.py", line 954, in _bootstrap_inner
File "C:\Users\Admin\AppData\Local\Programs\Python\Python39\lib\multiprocessing\spawn.py", line 116, in spawn_main
self.run()
File "C:\Users\Admin\AppData\Local\Programs\Python\Python39\lib\threading.py", line 892, in run
exitcode = _main(fd, parent_sentinel)
File "C:\Users\Admin\AppData\Local\Programs\Python\Python39\lib\multiprocessing\spawn.py", line 126, in _main
self = reduction.pickle.load(from_parent)
EOFError: Ran out of input
self._target(*self._args, **self._kwargs)
File "E:\Tut\pythonProject5_MachineLearning\venv\lib\site-packages\keras\utils\data_utils.py", line 868, in _run
with closing(self.executor_fn(_SHARED_SEQUENCES)) as executor:
File "E:\Tut\pythonProject5_MachineLearning\venv\lib\site-packages\keras\utils\data_utils.py", line 858, in pool_fn
pool = get_pool_class(True)(
File "C:\Users\Admin\AppData\Local\Programs\Python\Python39\lib\multiprocessing\context.py", line 119, in Pool
return Pool(processes, initializer, initargs, maxtasksperchild,
File "C:\Users\Admin\AppData\Local\Programs\Python\Python39\lib\multiprocessing\pool.py", line 212, in __init__
self._repopulate_pool()
File "C:\Users\Admin\AppData\Local\Programs\Python\Python39\lib\multiprocessing\pool.py", line 303, in _repopulate_pool
return self._repopulate_pool_static(self._ctx, self.Process,
File "C:\Users\Admin\AppData\Local\Programs\Python\Python39\lib\multiprocessing\pool.py", line 326, in _repopulate_pool_static
w.start()
File "C:\Users\Admin\AppData\Local\Programs\Python\Python39\lib\multiprocessing\process.py", line 121, in start
self._popen = self._Popen(self)
File "C:\Users\Admin\AppData\Local\Programs\Python\Python39\lib\multiprocessing\context.py", line 327, in _Popen
return Popen(process_obj)
File "C:\Users\Admin\AppData\Local\Programs\Python\Python39\lib\multiprocessing\popen_spawn_win32.py", line 93, in __init__
reduction.dump(process_obj, to_child)
File "C:\Users\Admin\AppData\Local\Programs\Python\Python39\lib\multiprocessing\reduction.py", line 60, in dump
ForkingPickler(file, protocol).dump(obj)
TypeError: cannot pickle 'generator' object
What am I missing?

One solution would be by using MirroredStrategy() for the neural network and the date should be preprocessed using the functions from tensorflow.data.Dataset
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
model = Sequential()
model.add(Dense.....
.....
model.compile(loss='mae', optimizer='sgd')
def dataset_fn(dummy_argument):
x = np.array(x_train_df).astype(np.float32)
y = np.array(y_train_df).astype(np.float32)
dataset = tf.data.Dataset.from_tensor_slices((x, y))
return dataset.repeat().batch(batch_size=batch_size, drop_remainder=True)
dist_dataset = strategy.experimental_distribute_datasets_from_function(dataset_fn)
history = model.fit(
dist_dataset,
epochs=epochs,
steps_per_epoch=number_of_batches_in_the_x_set,
verbose=1,
max_queue_size=max_queue_size,
validation_data=None,
workers=number_of_workers,
use_multiprocessing=True
)

You are pickling: because you're using multiprocessing, and multiprocessing needs to pickle anything it runs to send it to the new python processes. Since your train_generator is needed in each process, it will be sent, i.e. pickled.
As the linked question notes, avoid this by not using a generator: trivially, cast to list and evaluate before sending; but more sensibly, rewrite your generator to return the list for you.

Related

Problem with Dataloader object not subscriptable

I am now running a Python program using Pytorch. I use my own dataset, not torch.data.dataset. I download data from a pickle file extracted from feature extraction. But the following errors appear:
Traceback (most recent call last):
File "C:\Users\hp\Downloads\efficient_densenet_pytorch-master\demo-emotion.py", line 326, in <module>
fire.Fire(demo)
File "C:\Users\hp\Anaconda3\envs\tf-gpu\lib\site-packages\fire\core.py", line 138, in Fire
component_trace = _Fire(component, args, parsed_flag_args, context, name)
File "C:\Users\hp\Anaconda3\envs\tf-gpu\lib\site-packages\fire\core.py", line 468, in _Fire
target=component.__name__)
File "C:\Users\hp\Anaconda3\envs\tf-gpu\lib\site-packages\fire\core.py", line 672, in _CallAndUpdateTrace
component = fn(*varargs, **kwargs)
File "C:\Users\hp\Downloads\efficient_densenet_pytorch-master\demo-emotion.py", line 304, in demo
train(model,train_set1, valid_set=valid_set, test_set=test1, save=save, n_epochs=n_epochs,batch_size=batch_size,seed=seed)
File "C:\Users\hp\Downloads\efficient_densenet_pytorch-master\demo-emotion.py", line 172, in train
n_epochs=n_epochs,
File "C:\Users\hp\Downloads\efficient_densenet_pytorch-master\demo-emotion.py", line 37, in train_epoch
loader=np.asarray(list(loader))
File "C:\Users\hp\Anaconda3\envs\tf-gpu\lib\site-packages\torch\utils\data\dataloader.py", line 345, in __next__
data = self._next_data()
File "C:\Users\hp\Anaconda3\envs\tf-gpu\lib\site-packages\torch\utils\data\dataloader.py", line 385, in _next_data
data = self._dataset_fetcher.fetch(index) # may raise StopIteration
File "C:\Users\hp\Anaconda3\envs\tf-gpu\lib\site-packages\torch\utils\data\_utils\fetch.py", line 44, in fetch
data = [self.dataset[idx] for idx in possibly_batched_index]
File "C:\Users\hp\Anaconda3\envs\tf-gpu\lib\site-packages\torch\utils\data\_utils\fetch.py", line 44, in <listcomp>
data = [self.dataset[idx] for idx in possibly_batched_index]
File "C:\Users\hp\Anaconda3\envs\tf-gpu\lib\site-packages\torch\utils\data\dataset.py", line 257, in __getitem__
return self.dataset[self.indices[idx]]
TypeError: 'DataLoader' object is not subscriptable
The code is:
train_set1 = Owndata()
train1, test1 = train_set1 .get_splits()
# prepare data loaders
train_dl = torch.utils.data.DataLoader(train1, batch_size=32, shuffle=True)
test_dl =torch.utils.data.DataLoader(test1, batch_size=1024, shuffle=False)
test_set1 = Owndata()
'''print('test_set# ',test_set)'''
if valid_size:
valid_set = Owndata()
indices = torch.randperm(len(train_set1))
train_indices = indices[:len(indices) - valid_size]
valid_indices = indices[len(indices) - valid_size:]
train_set1 = torch.utils.data.Subset(train_dl, train_indices)
valid_set = torch.utils.data.Subset(valid_set, valid_indices)
else:
valid_set = None
model = DenseNet(
growth_rate=growth_rate,
block_config=block_config,
num_classes=10,
small_inputs=True,
efficient=efficient,
)
train(model,train_set1, valid_set=valid_set, test_set=test1, save=save, n_epochs=n_epochs, batch_size=batch_size, seed=seed)
Any help is appreciated! Thanks a lot in advance!!
It is not the line giving you an error as it's the very last train function you are not showing.
You are confusing two things:
torch.utils.data.Dataset object is indexable (dataset[5] works fine for example). It is a simple object which defines how to get a single (usually single) sample of data.
torch.utils.data.DataLoader - non-indexable, only iterable, usually returns batches of data from above Dataset. Can work in parallel using num_workers. It's what you are trying to index while you should use dataset for that.
Please see PyTorch documentation about data to get a better grasp on how those work.

pytorch train function Varibles and Tensors (read my introduction i dont know my problem as well it just dont work )

I started learning pytorch and started with videos about MNIST handwriting and learnt it with an video but the video is 2 years old and some things have changen since then i guess because it dosent work as in the video and i seriously dont know anything so i dont know whats my error or what i do wrong i just type everything the dude says in the video and want to understand and learn it this way(maybe you know better ways how to learn machine learning/deep learning would appreciate it) my code looks like this:
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import torch.optim as optim
import os
from torchvision import datasets, transforms
kwargs = {'num_workers': 1, 'pin_memory': True}
train_data = torch.utils.data.DataLoader(datasets.MNIST('data', train=True, download=True, transform=transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.1307,),(0.3081,))])),
batch_size=64, shuffle=True, **kwargs)
test_data = torch.utils.data.DataLoader(datasets.MNIST('data', train=False, transform=transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.1307,),(0.3081,))])),
batch_size=64, shuffle=True, **kwargs)
above everything works like in the video and i find the data in an folder now comes the class and it doesnt looks like theres an error but i dont know.
class Netz(nn.Module):
def __init__(self):
super(Netz, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size= 4)
self.conv2 = nn.Conv2d(10, 20, kernel_size= 4)
self.conv_dropout = nn.Dropout2d()
self.fc1 = nn.Linear(320, 60)
self.fc2 = nn.Linear(60, 10)
def forward(self, x):
x = self.conv1(x)
x = F.max_pool2d(x, 4)
x = F.relu(x)
x = self.conv2(x)
x = self.conv_dropout(x)
x = F.max_pool2d(x, 4)
x = F.relu(x)
print(x.size())
exit()
model = Netz()
model.cuda()
something with this Varibale function is wrong it just dont works and pycharm also shows me there has to be something wrong but i dont know what so i ask here maybe you can help i also googled abit about it and it looks like this varible thing got removed or so but i dont know what to write else
optimizer = optim.SGD(model.parameters(), lr=0.1, momentum=0.8)
def train(epoch):
model.train()
for batch_id, (data, target) in enumerate(train_data):
data = data.cuda()
target = target.cuda()
data = Variable(data)
target = Variable(target)
optimizer.zero_grad()
out = model(data)
criterion = F.nll_loss
loss = criterion(out, target)
loss.backward()
optimizer.step()
for epoch in range(1, 30):
train(epoch)
the error code looks like this :
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "C:\Users\Finnw\AppData\Local\Programs\Python\Python37\lib\multiprocessing\spawn.py", line 105, in spawn_main
exitcode = _main(fd)
File "C:\Users\Finnw\AppData\Local\Programs\Python\Python37\lib\multiprocessing\spawn.py", line 114, in _main
prepare(preparation_data)
File "C:\Users\Finnw\AppData\Local\Programs\Python\Python37\lib\multiprocessing\spawn.py", line 225, in prepare
_fixup_main_from_path(data['init_main_from_path'])
File "C:\Users\Finnw\AppData\Local\Programs\Python\Python37\lib\multiprocessing\spawn.py", line 277, in _fixup_main_from_path
run_name="__mp_main__")
File "C:\Users\Finnw\AppData\Local\Programs\Python\Python37\lib\runpy.py", line 263, in run_path
pkg_name=pkg_name, script_name=fname)
File "C:\Users\Finnw\AppData\Local\Programs\Python\Python37\lib\runpy.py", line 96, in _run_module_code
mod_name, mod_spec, pkg_name, script_name)
File "C:\Users\Finnw\AppData\Local\Programs\Python\Python37\lib\runpy.py", line 85, in _run_code
exec(code, run_globals)
File "C:\Users\Finnw\PycharmProjects\pytorch 3.7\mnist handwriting.py", line 60, in <module>
train(epoch)
File "C:\Users\Finnw\PycharmProjects\pytorch 3.7\mnist handwriting.py", line 46, in train
for batch_id, (data, target) in enumerate(train_data):
File "C:\Users\Finnw\PycharmProjects\pytorch 3.7\venv\lib\site-packages\torch\utils\data\dataloader.py", line 279, in __iter__
return _MultiProcessingDataLoaderIter(self)
File "C:\Users\Finnw\PycharmProjects\pytorch 3.7\venv\lib\site-packages\torch\utils\data\dataloader.py", line 719, in __init__
w.start()
File "C:\Users\Finnw\AppData\Local\Programs\Python\Python37\lib\multiprocessing\process.py", line 112, in start
self._popen = self._Popen(self)
File "C:\Users\Finnw\AppData\Local\Programs\Python\Python37\lib\multiprocessing\context.py", line 223, in _Popen
return _default_context.get_context().Process._Popen(process_obj)
File "C:\Users\Finnw\AppData\Local\Programs\Python\Python37\lib\multiprocessing\context.py", line 322, in _Popen
return Popen(process_obj)
File "C:\Users\Finnw\AppData\Local\Programs\Python\Python37\lib\multiprocessing\popen_spawn_win32.py", line 46, in __init__
prep_data = spawn.get_preparation_data(process_obj._name)
File "C:\Users\Finnw\AppData\Local\Programs\Python\Python37\lib\multiprocessing\spawn.py", line 143, in get_preparation_data
_check_not_importing_main()
File "C:\Users\Finnw\AppData\Local\Programs\Python\Python37\lib\multiprocessing\spawn.py", line 136, in _check_not_importing_main
is not going to be frozen to produce an executable.''')
RuntimeError:
An attempt has been made to start a new process before the
current process has finished its bootstrapping phase.
This probably means that you are not using fork to start your
child processes and you have forgotten to use the proper idiom
in the main module:
if __name__ == '__main__':
freeze_support()
...
The "freeze_support()" line can be omitted if the program
is not going to be frozen to produce an executable.
Traceback (most recent call last):
File "C:\Users\Finnw\PycharmProjects\pytorch 3.7\venv\lib\site-packages\torch\utils\data\dataloader.py", line 761, in _try_get_data
data = self._data_queue.get(timeout=timeout)
File "C:\Users\Finnw\AppData\Local\Programs\Python\Python37\lib\queue.py", line 178, in get
raise Empty
_queue.Empty
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "C:/Users/Finnw/PycharmProjects/pytorch 3.7/mnist handwriting.py", line 60, in <module>
train(epoch)
File "C:/Users/Finnw/PycharmProjects/pytorch 3.7/mnist handwriting.py", line 46, in train
for batch_id, (data, target) in enumerate(train_data):
File "C:\Users\Finnw\PycharmProjects\pytorch 3.7\venv\lib\site-packages\torch\utils\data\dataloader.py", line 345, in __next__
data = self._next_data()
File "C:\Users\Finnw\PycharmProjects\pytorch 3.7\venv\lib\site-packages\torch\utils\data\dataloader.py", line 841, in _next_data
idx, data = self._get_data()
File "C:\Users\Finnw\PycharmProjects\pytorch 3.7\venv\lib\site-packages\torch\utils\data\dataloader.py", line 798, in _get_data
success, data = self._try_get_data()
File "C:\Users\Finnw\PycharmProjects\pytorch 3.7\venv\lib\site-packages\torch\utils\data\dataloader.py", line 774, in _try_get_data
raise RuntimeError('DataLoader worker (pid(s) {}) exited unexpectedly'.format(pids_str))
RuntimeError: DataLoader worker (pid(s) 10444) exited unexpectedly
Process finished with exit code 1
I believe just setting num_workers to zero would solve your problem. One other thing that would solve your problem is to place your code in a main function.
The reasons for this can be found here:
https://docs.python.org/2/library/multiprocessing.html#multiprocessing-programming . The reason for this is that num_workers tells PyTorch to generate data samples in a multithreaded way, launching num_workers threads, such that they can be served as fast as possible to your training loop.
The error code you gave actually tells you pretty much the same thing:
An attempt has been made to start a new process before the
current process has finished its bootstrapping phase.
This probably means that you are not using fork to start your
child processes and you have forgotten to use the proper idiom
in the main module:
if __name__ == '__main__':
freeze_support()
...

Custom Callback is not working with a keras model

I try to evaluate my tf.keras model within a Callback. Doing model.evaluate after fit works, but within the Callback not.
class Eval(Callback):
def __init__(self, model,validation_data,eval_batch=1):
super(Callback, self).__init__()
self.validation_data=validation_data
self.eval_batch=eval_batch
self.model=model
#self.X_val, self.y_val = validation_data
def on_epoch_end(self, epoch, logs={}):
if epoch % 1 == 0:
print(epoch)
print(self.validation_data)
res=self.model.evaluate(self.validation_data, batch_size=self.eval_batch)
print(res)
precisions.append(res[3])
print("Evaluation - epoch: {:d} - Eval_Prec: {:.6f}".format(epoch, res[3]))
Calling
evaluation=Eval(keras_model,eval_input)
keras_model.fit(
x_train,y_train,
epochs=args.num_epochs,
validation_data=(x_test,y_test),
batch_size=args.batch_size,
verbose=1,
callbacks=[evaluation])
And here the full traceback:
Traceback (most recent call last):
File "keras_cloud.py", line 435, in <module>
train_and_evaluate(args)
File "keras_cloud.py", line 421, in train_and_evaluate
callbacks=[lr_decay_cb,evaluation])#,tensorboard_cb])
File "C:\Users\\Anaconda3\lib\site-packages\tensorflow\python\keras\engine\training.py", line 1639, in fit
validation_steps=validation_steps)
File "C:\Users\\Anaconda3\lib\site-packages\tensorflow\python\keras\engine\training_arrays.py", line 239, in fit_loop
callbacks.on_epoch_end(epoch, epoch_logs)
File "C:\Users\\Anaconda3\lib\site-packages\tensorflow\python\keras\callbacks.py", line 214, in on_epoch_end
callback.on_epoch_end(epoch, logs)
File "keras_cloud.py", line 312, in on_epoch_end
res=self.model.evaluate(self.validation_data, batch_size=self.eval_batch)
File "C:\Users\\Anaconda3\lib\site-packages\tensorflow\python\keras\engine\training.py", line 1751, in evaluate
steps=steps)
File "C:\Users\\Anaconda3\lib\site-packages\tensorflow\python\keras\engine\training.py", line 992, in _standardize_user_data
class_weight, batch_size)
File "C:\Users\\Anaconda3\lib\site-packages\tensorflow\python\keras\engine\training.py", line 1117, in _standardize_weights
exception_prefix='input')
File "C:\Users\\Anaconda3\lib\site-packages\tensorflow\python\keras\engine\training_utils.py", line 284, in standardize_input_data
data = [standardize_single_array(x) for x in data]
File "C:\Users\\Anaconda3\lib\site-packages\tensorflow\python\keras\engine\training_utils.py", line 284, in <listcomp>
data = [standardize_single_array(x) for x in data]
File "C:\Users\\Anaconda3\lib\site-packages\tensorflow\python\keras\engine\training_utils.py", line 218, in standardize_single_array
if x.shape is not None and len(x.shape) == 1:
AttributeError: 'float' object has no attribute 'shape'
But the following works:
keras_model.fit(...)
print(keras_model.evaluate(eval_input, steps=1))
I have no clue what is going on with that Callback?

Error when activating use_multiprocessing in fit_generator on windows

I'm trying to run my CNN python code with use_multiprocessing=True in fit_generator function but i get error, and its work just fine with single process but the CPU load: 20% and GPU: 8%.
I'm running on MSI laptop with windows 10 core i7-7820HK CPU and NVIDIA GTX 1080 , using tensorflow backend
this is my code:
# Part 1 - Building the CNN
# Importing the Keras libraries and packages
from keras.models import Sequential
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense
from keras.preprocessing.image import ImageDataGenerator
# Initialising the CNN
classifier = Sequential()
# Step 1 - Convolution
classifier.add(Conv2D(32, (3, 3), input_shape=(64, 64, 3), activation='relu'))
# Step 2 - Pooling
classifier.add(MaxPooling2D(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(Conv2D(32, (3, 3), activation='relu'))
classifier.add(MaxPooling2D(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(Flatten())
# Step 4 - Full connection
classifier.add(Dense(units=128, activation='relu'))
classifier.add(Dense(units=1, activation='sigmoid'))
# Compiling the CNN
classifier.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
# Part 2 - Fitting the CNN to the images
train_datagen = ImageDataGenerator(rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale = 1./255)
training_set = train_datagen.flow_from_directory('dataset\\training_set',
target_size = (64, 64),
batch_size = 32,
class_mode = 'binary')
test_set = test_datagen.flow_from_directory('dataset\\test_set',
target_size = (64, 64),
batch_size = 32,
class_mode = 'binary')
if __name__ == '__main__':
classifier.fit_generator(training_set,
workers=8,
max_queue_size=100,
use_multiprocessing=True,
steps_per_epoch=(8000 / 32),
epochs=25,
validation_data=test_set,
validation_steps=(2000 / 32))
and i get this error:
Using TensorFlow backend. Found 8000 images belonging to 2 classes. Found 2000 images belonging to 2 classes. Epoch 1/25 Exception in thread Thread-24: Traceback (most recent call last): File "C:\Users\MSI-GT75\Anaconda3\envs\cnn\lib\threading.py", line 916, in
_bootstrap_inner
self.run() File "C:\Users\MSI-GT75\Anaconda3\envs\cnn\lib\threading.py", line 864, in run
self._target(*self._args, **self._kwargs) File "C:\Users\MSI-GT75\Anaconda3\envs\cnn\lib\site-packages\keras\utils\data_utils.py", line 548, in _run
with closing(self.executor_fn(_SHARED_SEQUENCES)) as executor: File "C:\Users\MSI-GT75\Anaconda3\envs\cnn\lib\site-packages\keras\utils\data_utils.py", line 522, in <lambda>
initargs=(seqs,)) File "C:\Users\MSI-GT75\Anaconda3\envs\cnn\lib\multiprocessing\context.py", line 119, in Pool
context=self.get_context()) File "C:\Users\MSI-GT75\Anaconda3\envs\cnn\lib\multiprocessing\pool.py", line 174, in __init__
self._repopulate_pool() File "C:\Users\MSI-GT75\Anaconda3\envs\cnn\lib\multiprocessing\pool.py", line 239, in _repopulate_pool
w.start() File "C:\Users\MSI-GT75\Anaconda3\envs\cnn\lib\multiprocessing\process.py", line 105, in start
self._popen = self._Popen(self) File "C:\Users\MSI-GT75\Anaconda3\envs\cnn\lib\multiprocessing\context.py", line 322, in _Popen
return Popen(process_obj) File "C:\Users\MSI-GT75\Anaconda3\envs\cnn\lib\multiprocessing\popen_spawn_win32.py", line 65, in __init__
reduction.dump(process_obj, to_child) File "C:\Users\MSI-GT75\Anaconda3\envs\cnn\lib\multiprocessing\reduction.py", line 60, in dump
ForkingPickler(file, protocol).dump(obj) TypeError: can't pickle _thread.lock objects
Exception in thread Thread-23: Traceback (most recent call last): File "C:\Users\MSI-GT75\Anaconda3\envs\cnn\lib\threading.py", line 916, in _bootstrap_inner
self.run() File "C:\Users\MSI-GT75\Anaconda3\envs\cnn\lib\threading.py", line 864, in run
self._target(*self._args, **self._kwargs) File "C:\Users\MSI-GT75\Anaconda3\envs\cnn\lib\site-packages\keras\utils\data_utils.py", line 548, in _run
with closing(self.executor_fn(_SHARED_SEQUENCES)) as executor: File "C:\Users\MSI-GT75\Anaconda3\envs\cnn\lib\site-packages\keras\utils\data_utils.py", line 522, in <lambda>
initargs=(seqs,)) File "C:\Users\MSI-GT75\Anaconda3\envs\cnn\lib\multiprocessing\context.py", line 119, in Pool
context=self.get_context()) File "C:\Users\MSI-GT75\Anaconda3\envs\cnn\lib\multiprocessing\pool.py", line 174, in __init__
self._repopulate_pool() File "C:\Users\MSI-GT75\Anaconda3\envs\cnn\lib\multiprocessing\pool.py", line 239, in _repopulate_pool
w.start() File "C:\Users\MSI-GT75\Anaconda3\envs\cnn\lib\multiprocessing\process.py", line 105, in start
self._popen = self._Popen(self) File "C:\Users\MSI-GT75\Anaconda3\envs\cnn\lib\multiprocessing\context.py", line 322, in _Popen
return Popen(process_obj) File "C:\Users\MSI-GT75\Anaconda3\envs\cnn\lib\multiprocessing\popen_spawn_win32.py", line 65, in __init__
reduction.dump(process_obj, to_child) File "C:\Users\MSI-GT75\Anaconda3\envs\cnn\lib\multiprocessing\reduction.py", line 60, in dump
ForkingPickler(file, protocol).dump(obj) TypeError: can't pickle _thread.lock objects
after updating all packages this error shows instead of the above one:
ValueError: Using a generator with `use_multiprocessing=True` is not supported on Windows (no marshalling of generators across process boundaries). Instead, use single thread/process or multithreading.

Tensorflow: feed_dict key as Tensor error with threading

I am running a django server and just realised I am facing a problem when I start a daemon thread and load my weights.
Has anyone faced the same problem and how you managed to solve it?
I used to solve this problem with clear_session method from Keras but now it seems that it doesn't work.
Exception in thread Thread-7:
Traceback (most recent call last):
File "/Users/ale/.virtualenvs/LoceyeCloud/lib/python3.6/site-packages/tensorflow/python/client/session.py", line 1064, in _run
allow_operation=False)
File "/Users/ale/.virtualenvs/LoceyeCloud/lib/python3.6/site-packages/tensorflow/python/framework/ops.py", line 3035, in as_graph_element
return self._as_graph_element_locked(obj, allow_tensor, allow_operation)
File "/Users/ale/.virtualenvs/LoceyeCloud/lib/python3.6/site-packages/tensorflow/python/framework/ops.py", line 3114, in _as_graph_element_locked
raise ValueError("Tensor %s is not an element of this graph." % obj)
ValueError: Tensor Tensor("Placeholder:0", shape=(3, 3, 1, 64), dtype=float32) is not an element of this graph.
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/threading.py", line 864, in run
self._target(*self._args, **self._kwargs)
File "/Users/ale/platform/Server/server_back/server_backend.py", line 196, in execute
eyetracker = LoceyeEyetracker(self.screen_dimensions)
File "/Users/ale/platform/Server/server_back/LoceyeEyetracker.py", line 24, in __init__
self.iris_detector = IrisDetectorCnn(weights_filename, blink_filename)
File "/Users/ale/platform/Server/server_back/IrisDetectorCnn.py", line 17, in __init__
self.model = load_model(model_filename)
File "/Users/ale/.virtualenvs/LoceyeCloud/lib/python3.6/site-packages/keras/models.py", line 246, in load_model
topology.load_weights_from_hdf5_group(f['model_weights'], model.layers)
File "/Users/ale/.virtualenvs/LoceyeCloud/lib/python3.6/site-packages/keras/engine/topology.py", line 3166, in load_weights_from_hdf5_group
K.batch_set_value(weight_value_tuples)
File "/Users/ale/.virtualenvs/LoceyeCloud/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py", line 2370, in batch_set_value
get_session().run(assign_ops, feed_dict=feed_dict)
File "/Users/ale/.virtualenvs/LoceyeCloud/lib/python3.6/site-packages/tensorflow/python/client/session.py", line 889, in run
run_metadata_ptr)
File "/Users/ale/.virtualenvs/LoceyeCloud/lib/python3.6/site-packages/tensorflow/python/client/session.py", line 1067, in _run
+ e.args[0])
TypeError: Cannot interpret feed_dict key as Tensor: Tensor Tensor("Placeholder:0", shape=(3, 3, 1, 64), dtype=float32) is not an element of this graph.
and my code for loading initialising the object and loading the weights
def startPredict(self):
async = threading.Thread(target=self.execute)
async.setDaemon(False)
async.start()
def execute(self):
clear_session()
eyetracker = LoceyeEyetracker(self.screen_dimensions)
And my class is the following one.
class LoceyeEyetracker:
def __init__(self, screen_size,
weights_filename=CNN_MODEL_FILENAME,
blink_filename=CNN_BLINK_MODEL_FILENAME,
reference_filename=CNN_REFERENCE_MODEL_FILENAME):
self.iris_detector = IrisDetectorCnn(weights_filename, blink_filename)
self.screen_mapper = ScreenCoordinateMapper(screen_size)
self.reference_detector = ReferencePointDetector(reference_filename)
self.last_reference = PointPair(left=Point(x=0,y=0),right=Point(x=0,y=0))

Categories