Spacy trained model: input error dimension - python

here is where i load my model:
_model = r"C:\Users\evead\Desktop\spacy_model_config_03_15\model-best"
nlp = spacy.load(_model)
txt = "Below are my data loader and neural net. I have also included the output of my data loader when I retrieve a batch of data"
doc = nlp(txt)
here is the error I got
but I got the following error:
Exception has occurred: RuntimeError
input must have 3 dimensions, got 2
File "C:\Users\evead\Desktop\Issam\PIXITREND-CLASSIFIER\Annotation-Training\src\debug_entity.py", line 46, in <module>
doc = nlp(txt)

here is my complete error:
(env) PS C:\Users\evead\Desktop\Issam\PIXITREND-CLASSIFIER> python -m spacy evaluate C:\Users\evead\Desktop\spacy_model_config_03_15\model-best C:\Users\evead\Desktop\batchdata-3-15\train.spacy --output C:\Users\evead\Desktop\batchdata-3-15\res.json
Using CPU
C:\Users\evead\Desktop\Issam\PIXITREND-CLASSIFIER\Annotation-Training\env\lib\site-packages\torch\autocast_mode.py:141: UserWarning: User provided device_type of 'cuda', but CUDA is not available. Disabling
warnings.warn('User provided device_type of \'cuda\', but CUDA is not available. Disabling')
Traceback (most recent call last):
File "C:\Users\evead\AppData\Local\Programs\Python\Python39\lib\runpy.py", line 197, in _run_module_as_main
return _run_code(code, main_globals, None,
File "C:\Users\evead\AppData\Local\Programs\Python\Python39\lib\runpy.py", line 87, in _run_code
exec(code, run_globals)
File "C:\Users\evead\Desktop\Issam\PIXITREND-CLASSIFIER\Annotation-Training\env\lib\site-packages\spacy\__main__.py", line 4, in <module>
setup_cli()
File "C:\Users\evead\Desktop\Issam\PIXITREND-CLASSIFIER\Annotation-Training\env\lib\site-packages\spacy\cli\_util.py", line 71, in setup_cli
command(prog_name=COMMAND)
File "C:\Users\evead\Desktop\Issam\PIXITREND-CLASSIFIER\Annotation-Training\env\lib\site-packages\click\core.py", line 1128, in __call__
return self.main(*args, **kwargs)
File "C:\Users\evead\Desktop\Issam\PIXITREND-CLASSIFIER\Annotation-Training\env\lib\site-packages\click\core.py", line 1053, in main
rv = self.invoke(ctx)
File "C:\Users\evead\Desktop\Issam\PIXITREND-CLASSIFIER\Annotation-Training\env\lib\site-packages\click\core.py", line 1659, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "C:\Users\evead\Desktop\Issam\PIXITREND-CLASSIFIER\Annotation-Training\env\lib\site-packages\click\core.py", line 1395, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "C:\Users\evead\Desktop\Issam\PIXITREND-CLASSIFIER\Annotation-Training\env\lib\site-packages\click\core.py", line 754, in invoke
return __callback(*args, **kwargs)
File "C:\Users\evead\Desktop\Issam\PIXITREND-CLASSIFIER\Annotation-Training\env\lib\site-packages\typer\main.py", line 500, in wrapper
return callback(**use_params) # type: ignore
File "C:\Users\evead\Desktop\Issam\PIXITREND-CLASSIFIER\Annotation-Training\env\lib\site-packages\spacy\cli\evaluate.py", line 42, in evaluate_cli
evaluate(
File "C:\Users\evead\Desktop\Issam\PIXITREND-CLASSIFIER\Annotation-Training\env\lib\site-packages\spacy\cli\evaluate.py", line 78, in evaluate
scores = nlp.evaluate(dev_dataset)
File "C:\Users\evead\Desktop\Issam\PIXITREND-CLASSIFIER\Annotation-Training\env\lib\site-packages\spacy\language.py", line 1415, in evaluate
for eg, doc in zip(examples, docs):
File "C:\Users\evead\Desktop\Issam\PIXITREND-CLASSIFIER\Annotation-Training\env\lib\site-packages\spacy\language.py", line 1575, in pipe
for doc in docs:
File "C:\Users\evead\Desktop\Issam\PIXITREND-CLASSIFIER\Annotation-Training\env\lib\site-packages\spacy\util.py", line 1598, in _pipe
yield from proc.pipe(docs, **kwargs)
File "spacy\pipeline\transition_parser.pyx", line 230, in pipe
File "C:\Users\evead\Desktop\Issam\PIXITREND-CLASSIFIER\Annotation-Training\env\lib\site-packages\spacy\util.py", line 1547, in minibatch
batch = list(itertools.islice(items, int(batch_size)))
File "C:\Users\evead\Desktop\Issam\PIXITREND-CLASSIFIER\Annotation-Training\env\lib\site-packages\spacy\util.py", line 1598, in _pipe
yield from proc.pipe(docs, **kwargs)
File "spacy\pipeline\trainable_pipe.pyx", line 79, in pipe
File "C:\Users\evead\Desktop\Issam\PIXITREND-CLASSIFIER\Annotation-Training\env\lib\site-packages\spacy\util.py", line 1617, in raise_error
raise e
File "spacy\pipeline\trainable_pipe.pyx", line 75, in spacy.pipeline.trainable_pipe.TrainablePipe.pipe
File "C:\Users\evead\Desktop\Issam\PIXITREND-CLASSIFIER\Annotation-Training\env\lib\site-packages\spacy\pipeline\tok2vec.py", line 125, in predict
tokvecs = self.model.predict(docs)
File "C:\Users\evead\Desktop\Issam\PIXITREND-CLASSIFIER\Annotation-Training\env\lib\site-packages\thinc\model.py", line 315, in predict
return self._func(self, X, is_train=False)[0]
File "C:\Users\evead\Desktop\Issam\PIXITREND-CLASSIFIER\Annotation-Training\env\lib\site-packages\thinc\layers\chain.py", line 54, in forward
Y, inc_layer_grad = layer(X, is_train=is_train)
File "C:\Users\evead\Desktop\Issam\PIXITREND-CLASSIFIER\Annotation-Training\env\lib\site-packages\thinc\model.py", line 291, in __call__
return self._func(self, X, is_train=is_train)
File "C:\Users\evead\Desktop\Issam\PIXITREND-CLASSIFIER\Annotation-Training\env\lib\site-packages\thinc\layers\with_array.py", line 40, in forward
return _list_forward(cast(Model[List2d, List2d], model), Xseq, is_train)
File "C:\Users\evead\Desktop\Issam\PIXITREND-CLASSIFIER\Annotation-Training\env\lib\site-packages\thinc\layers\with_array.py", line 76, in _list_forward
Yf, get_dXf = layer(Xf, is_train)
File "C:\Users\evead\Desktop\Issam\PIXITREND-CLASSIFIER\Annotation-Training\env\lib\site-packages\thinc\model.py", line 291, in __call__
return self._func(self, X, is_train=is_train)
File "C:\Users\evead\Desktop\Issam\PIXITREND-CLASSIFIER\Annotation-Training\env\lib\site-packages\thinc\layers\with_padded.py", line 36, in forward
Y, backprop = _array_forward(layer, cast(Floats3d, Xseq), is_train)
File "C:\Users\evead\Desktop\Issam\PIXITREND-CLASSIFIER\Annotation-Training\env\lib\site-packages\thinc\layers\with_padded.py", line 76, in _array_forward
Yp, get_dXp = layer(Xp, is_train)
File "C:\Users\evead\Desktop\Issam\PIXITREND-CLASSIFIER\Annotation-Training\env\lib\site-packages\thinc\model.py", line 291, in __call__
return self._func(self, X, is_train=is_train)
File "C:\Users\evead\Desktop\Issam\PIXITREND-CLASSIFIER\Annotation-Training\env\lib\site-packages\thinc\layers\with_padded.py", line 30, in forward
Y, backprop = layer(Xseq, is_train)
File "C:\Users\evead\Desktop\Issam\PIXITREND-CLASSIFIER\Annotation-Training\env\lib\site-packages\thinc\model.py", line 291, in __call__
return self._func(self, X, is_train=is_train)
File "C:\Users\evead\Desktop\Issam\PIXITREND-CLASSIFIER\Annotation-Training\env\lib\site-packages\thinc\layers\pytorchwrapper.py", line 134, in forward
Ytorch, torch_backprop = model.shims[0](Xtorch, is_train)
File "C:\Users\evead\Desktop\Issam\PIXITREND-CLASSIFIER\Annotation-Training\env\lib\site-packages\thinc\shims\pytorch.py", line 56, in __call__
return self.predict(inputs), lambda a: ...
File "C:\Users\evead\Desktop\Issam\PIXITREND-CLASSIFIER\Annotation-Training\env\lib\site-packages\thinc\shims\pytorch.py", line 66, in predict
outputs = self._model(*inputs.args, **inputs.kwargs)
File "C:\Users\evead\Desktop\Issam\PIXITREND-CLASSIFIER\Annotation-Training\env\lib\site-packages\torch\nn\modules\module.py", line 1102, in _call_impl
return forward_call(*input, **kwargs)
File "C:\Users\evead\Desktop\Issam\PIXITREND-CLASSIFIER\Annotation-Training\env\lib\site-packages\torch\nn\modules\rnn.py", line 689, in forward
self.check_forward_args(input, hx, batch_sizes)
File "C:\Users\evead\Desktop\Issam\PIXITREND-CLASSIFIER\Annotation-Training\env\lib\site-packages\torch\nn\modules\rnn.py", line 632, in check_forward_args
self.check_input(input, batch_sizes)
File "C:\Users\evead\Desktop\Issam\PIXITREND-CLASSIFIER\Annotation-Training\env\lib\site-packages\torch\nn\modules\rnn.py", line 201, in check_input
raise RuntimeError(
RuntimeError: input must have 3 dimensions, got 2

Related

streamlit error in AttributeError: 'OneHotEncoder' object has no attribute '_infrequent_enabled'

Traceback:
File "C:\Users\spand\AppData\Local\Programs\Python\Python310\lib\site-packages\streamlit\runtime\scriptrunner\script_runner.py", line 565, in _run_script
exec(code, module.dict)
File "C:\Users\spand\Desktop\laptop price prediction\app.py", line 68, in
st.title(int(np.exp(pipe.predict(query))))
File "C:\Users\spand\AppData\Local\Programs\Python\Python310\lib\site-packages\sklearn\pipeline.py", line 457, in predict
Xt = transform.transform(Xt)
File "C:\Users\spand\AppData\Local\Programs\Python\Python310\lib\site-packages\sklearn\compose_column_transformer.py", line 763, in transform
Xs = self._fit_transform(
File "C:\Users\spand\AppData\Local\Programs\Python\Python310\lib\site-packages\sklearn\compose_column_transformer.py", line 621, in _fit_transform
return Parallel(n_jobs=self.n_jobs)(
File "C:\Users\spand\AppData\Local\Programs\Python\Python310\lib\site-packages\joblib\parallel.py", line 1085, in call
if self.dispatch_one_batch(iterator):
File "C:\Users\spand\AppData\Local\Programs\Python\Python310\lib\site-packages\joblib\parallel.py", line 901, in dispatch_one_batch
self._dispatch(tasks)
File "C:\Users\spand\AppData\Local\Programs\Python\Python310\lib\site-packages\joblib\parallel.py", line 819, in _dispatch
job = self._backend.apply_async(batch, callback=cb)
File "C:\Users\spand\AppData\Local\Programs\Python\Python310\lib\site-packages\joblib_parallel_backends.py", line 208, in apply_async
result = ImmediateResult(func)
File "C:\Users\spand\AppData\Local\Programs\Python\Python310\lib\site-packages\joblib_parallel_backends.py", line 597, in init
self.results = batch()
File "C:\Users\spand\AppData\Local\Programs\Python\Python310\lib\site-packages\joblib\parallel.py", line 288, in call
return [func(*args, **kwargs)
File "C:\Users\spand\AppData\Local\Programs\Python\Python310\lib\site-packages\joblib\parallel.py", line 288, in
return [func(*args, **kwargs)
File "C:\Users\spand\AppData\Local\Programs\Python\Python310\lib\site-packages\sklearn\utils\fixes.py", line 117, in call
return self.function(*args, **kwargs)
File "C:\Users\spand\AppData\Local\Programs\Python\Python310\lib\site-packages\sklearn\pipeline.py", line 853, in _transform_one
res = transformer.transform(X)
File "C:\Users\spand\AppData\Local\Programs\Python\Python310\lib\site-packages\sklearn\preprocessing_encoders.py", line 888, in transform
self._map_infrequent_categories(X_int, X_mask)
File "C:\Users\spand\AppData\Local\Programs\Python\Python310\lib\site-packages\sklearn\preprocessing_encoders.py", line 726, in _map_infrequent_categories
if not self._infrequent_enabled:
How to overcome this problem no i can't find the reason behind it
Please check the version of streamlit (update the sklearn version)
ColumnTransformer(transformers=[('col_tnf',OneHotEncoder(sparse=False,drop='first'),0,1,3,8,11])],remainder='passthrough')

keras returning Attribute error 'Dense' object has no attribute '_saved_model_inputs_spec'

I am trying to upgrade a code from tensorflow 1.x to tensorflow 2.x, since I need to use it with python>=3.8. The code is from someone else who is not updating it, you can find it here if needed: https://github.com/NetManAIOps/donut.
I have used the tools provided by tensorflow to upgrade it (i.e. running tf_upgrade_v2 in each file).
Already for a while I have been stuck with an error that I cannot track down.
File "test_model.py", line 227, in <module>
get_anomaly_score(test_files, KPI=args.KPI)
File "test_model.py", line 79, in get_anomaly_score
test_score = dm.predictor.get_score(test_values, test_missing)
File "/home/miguel/gitlab_projects/test/donut_haowen/donut/prediction.py", line 138, in get_score
b_r = sess.run(self._get_score(), feed_dict=feed_dict)
File "/home/miguel/gitlab_projects/test/donut_haowen/donut/prediction.py", line 64, in _get_score
self._score = self.model.get_score(
File "/home/miguel/gitlab_projects/test/donut_haowen/donut/model.py", line 197, in get_score
x_r = iterative_masked_reconstruct(
File "/home/miguel/gitlab_projects/test/donut_haowen/donut/reconstruction.py", line 101, in iterative_masked_reconstruct
x_r, _ = tf.while_loop(
File "/home/miguel/anaconda3/envs/test/lib/python3.8/site-packages/tensorflow/python/util/deprecation.py", line 629, in new_func
return func(*args, **kwargs)
File "/home/miguel/anaconda3/envs/test/lib/python3.8/site-packages/tensorflow/python/ops/control_flow_ops.py", line 2516, in while_loop_v2
return while_loop(
File "/home/miguel/anaconda3/envs/test/lib/python3.8/site-packages/tensorflow/python/ops/control_flow_ops.py", line 2716, in while_loop
return while_v2.while_loop(
File "/home/miguel/anaconda3/envs/test/lib/python3.8/site-packages/tensorflow/python/ops/while_v2.py", line 222, in while_loop
body_graph = func_graph_module.func_graph_from_py_func(
File "/home/miguel/anaconda3/envs/test/lib/python3.8/site-packages/tensorflow/python/framework/func_graph.py", line 1283, in func_graph_from_py_func
func_outputs = python_func(*func_args, **func_kwargs)
File "/home/miguel/anaconda3/envs/test/lib/python3.8/site-packages/tensorflow/python/ops/while_v2.py", line 200, in wrapped_body
outputs = body(
File "/home/miguel/gitlab_projects/test/donut_haowen/donut/reconstruction.py", line 103, in <lambda>
body=lambda x_i, i: (masked_reconstruct(reconstruct, x_i, mask), i + 1),
File "/home/miguel/gitlab_projects/test/donut_haowen/donut/reconstruction.py", line 65, in masked_reconstruct
r_x = reconstruct(x)
File "/home/miguel/gitlab_projects/test/src/tfsnippet/tfsnippet/modules/auto_encoders/vae.py", line 469, in reconstruct
model = self.model(z=q_net['z'], n_z=n_z, n_x=n_x)
File "/home/miguel/gitlab_projects/test/src/tfsnippet/tfsnippet/utils/reuse.py", line 179, in wrapper
return method(*args, **kwargs)
File "/home/miguel/gitlab_projects/test/src/tfsnippet/tfsnippet/modules/auto_encoders/vae.py", line 314, in model
x_params = self.h_for_p_x(z)
File "/home/miguel/gitlab_projects/test/src/tfsnippet/tfsnippet/modules/base.py", line 89, in __call__
return self._forward(inputs, **kwargs)
File "/home/miguel/gitlab_projects/test/src/tfsnippet/tfsnippet/modules/container/lambda_.py", line 47, in _forward
return self._factory(inputs, **kwargs)
File "/home/miguel/gitlab_projects/test/donut_haowen/donut/model.py", line 24, in wrap_params_net
h = h_for_dist(inputs)
File "/home/miguel/gitlab_projects/test/src/tfsnippet/tfsnippet/modules/base.py", line 89, in __call__
return self._forward(inputs, **kwargs)
File "/home/miguel/gitlab_projects/test/src/tfsnippet/tfsnippet/modules/container/sequential.py", line 76, in _forward
outputs = c(outputs, **kwargs)
File "/home/miguel/anaconda3/envs/test/lib/python3.8/site-packages/keras/utils/traceback_utils.py", line 70, in error_handler
raise e.with_traceback(filtered_tb) from None
File "/home/miguel/anaconda3/envs/test/lib/python3.8/site-packages/keras/engine/base_layer.py", line 1140, in __call__
if self._saved_model_inputs_spec is None:
AttributeError: 'Dense' object has no attribute '_saved_model_inputs_spec'
This occurs when I call the predictor.py file in the donut package. This will call another package called TFsnippet, which I also upgraded, and this will finally call the keras package.
Since it is not my code that I am using I have trouble to write any kind of minimum reproducible example.
I am looking for any idea of where could the issue be, and what parts of the code should I maybe check. At the moment I do not have any idea of why or how this error occurs.
Please, tell me if I should add something.

spacy.load error: RuntimeError: dictionary changed size during iteration

I am loading a spaCy model as part of a step in my Dataflow streaming pipeline. To load the pre-downloaded spaCy model for a specific language I am using
nlp_model = spacy.load(SPACY_KEYS[lang])
where SPACY_KEYS is a dictionary containing the names of the models for each language (e.g. 'en': 'en_core_web_sm').
This works without any issues for the majority of the jobs run by the pipeline, but for a few iterations I am getting the following error:
Error message from worker: generic::unknown: Traceback (most recent call last):
File "apache_beam/runners/common.py", line 1232, in apache_beam.runners.common.DoFnRunner.process
File "apache_beam/runners/common.py", line 752, in apache_beam.runners.common.PerWindowInvoker.invoke_process
File "apache_beam/runners/common.py", line 870, in apache_beam.runners.common.PerWindowInvoker._invoke_process_per_window
File "apache_beam/runners/common.py", line 1368, in apache_beam.runners.common._OutputProcessor.process_outputs
File "/usr/local/lib/python3.7/site-packages/submodules/entities_and_pii_removal.py", line 259, in entities_and_PII
nlp_model = spacy.load(SPACY_KEYS[lang]) # load spacy model
File "/usr/local/lib/python3.7/site-packages/spacy/__init__.py", line 52, in load
name, vocab=vocab, disable=disable, exclude=exclude, config=config
File "/usr/local/lib/python3.7/site-packages/spacy/util.py", line 420, in load_model
return load_model_from_package(name, **kwargs) # type: ignore[arg-type]
File "/usr/local/lib/python3.7/site-packages/spacy/util.py", line 453, in load_model_from_package
return cls.load(vocab=vocab, disable=disable, exclude=exclude, config=config) # type: ignore[attr-defined]
File "/usr/local/lib/python3.7/site-packages/de_core_news_sm/__init__.py", line 10, in load
return load_model_from_init_py(__file__, **overrides)
File "/usr/local/lib/python3.7/site-packages/spacy/util.py", line 621, in load_model_from_init_py
config=config,
File "/usr/local/lib/python3.7/site-packages/spacy/util.py", line 489, in load_model_from_path
return nlp.from_disk(model_path, exclude=exclude, overrides=overrides)
File "/usr/local/lib/python3.7/site-packages/spacy/language.py", line 2042, in from_disk
util.from_disk(path, deserializers, exclude) # type: ignore[arg-type]
File "/usr/local/lib/python3.7/site-packages/spacy/util.py", line 1299, in from_disk
reader(path / key)
File "/usr/local/lib/python3.7/site-packages/spacy/language.py", line 2037, in <lambda>
p, exclude=["vocab"]
File "spacy/pipeline/trainable_pipe.pyx", line 343, in spacy.pipeline.trainable_pipe.TrainablePipe.from_disk
File "/usr/local/lib/python3.7/site-packages/spacy/util.py", line 1299, in from_disk
reader(path / key)
File "spacy/pipeline/trainable_pipe.pyx", line 333, in spacy.pipeline.trainable_pipe.TrainablePipe.from_disk.load_model
File "spacy/pipeline/trainable_pipe.pyx", line 334, in spacy.pipeline.trainable_pipe.TrainablePipe.from_disk.load_model
File "/usr/local/lib/python3.7/site-packages/thinc/model.py", line 593, in from_bytes
return self.from_dict(msg)
File "/usr/local/lib/python3.7/site-packages/thinc/model.py", line 624, in from_dict
loaded_value = deserialize_attr(default_value, value, attr, node)
File "/usr/local/lib/python3.7/functools.py", line 840, in wrapper
return dispatch(args[0].__class__)(*args, **kw)
File "/usr/local/lib/python3.7/site-packages/thinc/model.py", line 804, in deserialize_attr
return srsly.msgpack_loads(value)
File "/usr/local/lib/python3.7/site-packages/srsly/_msgpack_api.py", line 27, in msgpack_loads
msg = msgpack.loads(data, raw=False, use_list=use_list)
File "/usr/local/lib/python3.7/site-packages/srsly/msgpack/__init__.py", line 76, in unpackb
for decoder in msgpack_decoders.get_all().values():
File "/usr/local/lib/python3.7/site-packages/catalogue/__init__.py", line 110, in get_all
for keys, value in REGISTRY.items():
RuntimeError: dictionary changed size during iteration
I have not been able to identify the cause of this problem. Is there a way of getting around it?

Autoencoder in Chainer issue

I am trying to train Autoencoder by Chainer in python and wrote below code. But it does not work. Why??
class Autoencoder(Chain):
def __init__(self):
super().__init__()
with self.init_scope():
self.l1 = L.Linear(3,2)
self.l2 = L.Linear(2,3)
def __call__(self,x):
h1 = self.l1(x)
h2 = self.l2(h1)
return h2
class Dataset(dataset.DatasetMixin):
def __init__(self,number_of_data, show_initial = False):
noise_level = 1
self.data = np.zeros((number_of_data,3),dtype = np.float32)
OA_vector = np.array([3,2,1])
OB_vector = np.array([2,-1,1])
t = np.random.uniform(-0.5,0.5,number_of_data)
s = np.random.uniform(-0.5,0.5,number_of_data)
for i in range(0,number_of_data):
noise = np.random.uniform(-noise_level, noise_level,3)
self.data[i] = t[i]*OA_vector + s[i]*OB_vector + noise
def __len__(self):
return self.data.shape[0]
def get_example(self,idx):
return self.data[idx]
if __name__ == "__main__":
n_epoch = 5
batch_size = 100
number_of_data = 1000 #データ数
train_data = Dataset(number_of_data,False)
model = Autoencoder()
optimizer = optimizers.SGD(lr=0.05).setup(model)
train_iter = iterators.SerialIterator(train_data,batch_size)
updater = training.StandardUpdater(train_iter,optimizer,device=0)
trainer = training.Trainer(updater,(n_epoch,"epoch"),out="result")
trainer.run()
I am using Chainer. And the Dataset makes 3 dimensitonal vectors. The number of the vectors is "number_of_data".
Should I do that without using trainer?
I don't understand where problem is.
EDIT
When we run above code with device=0, we have error like below.
Exception in main training loop: Unsupported type <class 'NoneType'>
Traceback (most recent call last):
File "/home/****/.local/lib/python3.5/site-packages/chainer/training/trainer.py", line 308, in run
update()
File "/home/****/.local/lib/python3.5/site-packages/chainer/training/updaters/standard_updater.py", line 149, in update
self.update_core()
File "/home/****/.local/lib/python3.5/site-packages/chainer/training/updaters/standard_updater.py", line 164, in update_core
optimizer.update(loss_func, in_arrays)
File "/home/****/.local/lib/python3.5/site-packages/chainer/optimizer.py", line 655, in update
loss.backward(loss_scale=self._loss_scale)
File "/home/****/.local/lib/python3.5/site-packages/chainer/variable.py", line 966, in backward
self._backward_main(retain_grad, loss_scale)
File "/home/****/.local/lib/python3.5/site-packages/chainer/variable.py", line 1095, in _backward_main
target_input_indexes, out_grad, in_grad)
File "/home/****/.local/lib/python3.5/site-packages/chainer/function_node.py", line 548, in backward_accumulate
gxs = self.backward(target_input_indexes, grad_outputs)
File "/home/****/.local/lib/python3.5/site-packages/chainer/functions/activation/relu.py", line 73, in backward
return ReLUGrad2(y).apply((gy,))
File "/home/****/.local/lib/python3.5/site-packages/chainer/function_node.py", line 258, in apply
outputs = self.forward(in_data)
File "/home/****/.local/lib/python3.5/site-packages/chainer/function_node.py", line 368, in forward
return self.forward_cpu(inputs)
File "/home/****/.local/lib/python3.5/site-packages/chainer/functions/activation/relu.py", line 97, in forward_cpu
y = (self.b > 0) * inputs[0]
File "cupy/core/core.pyx", line 1310, in cupy.core.core.ndarray.__mul__
File "cupy/core/elementwise.pxi", line 753, in cupy.core.core.ufunc.__call__
File "cupy/core/elementwise.pxi", line 68, in cupy.core.core._preprocess_args
Will finalize trainer extensions and updater before reraising the exception.
Traceback (most recent call last):
File "AC.py", line 70, in <module>
trainer.run()
File "/home/****/.local/lib/python3.5/site-packages/chainer/training/trainer.py", line 322, in run
six.reraise(*sys.exc_info())
File "/home/****/.local/lib/python3.5/site-packages/six.py", line 693, in reraise
raise value
File "/home/****/.local/lib/python3.5/site-packages/chainer/training/trainer.py", line 308, in run
update()
File "/home/****/.local/lib/python3.5/site-packages/chainer/training/updaters/standard_updater.py", line 149, in update
self.update_core()
File "/home/****/.local/lib/python3.5/site-packages/chainer/training/updaters/standard_updater.py", line 164, in update_core
optimizer.update(loss_func, in_arrays)
File "/home/****/.local/lib/python3.5/site-packages/chainer/optimizer.py", line 655, in update
loss.backward(loss_scale=self._loss_scale)
File "/home/****/.local/lib/python3.5/site-packages/chainer/variable.py", line 966, in backward
self._backward_main(retain_grad, loss_scale)
File "/home/****/.local/lib/python3.5/site-packages/chainer/variable.py", line 1095, in _backward_main
target_input_indexes, out_grad, in_grad)
File "/home/****/.local/lib/python3.5/site-packages/chainer/function_node.py", line 548, in backward_accumulate
gxs = self.backward(target_input_indexes, grad_outputs)
File "/home/****/.local/lib/python3.5/site-packages/chainer/functions/activation/relu.py", line 73, in backward
return ReLUGrad2(y).apply((gy,))
File "/home/****/.local/lib/python3.5/site-packages/chainer/function_node.py", line 258, in apply
outputs = self.forward(in_data)
File "/home/****/.local/lib/python3.5/site-packages/chainer/function_node.py", line 368, in forward
return self.forward_cpu(inputs)
File "/home/****/.local/lib/python3.5/site-packages/chainer/functions/activation/relu.py", line 97, in forward_cpu
y = (self.b > 0) * inputs[0]
File "cupy/core/core.pyx", line 1310, in cupy.core.core.ndarray.__mul__
File "cupy/core/elementwise.pxi", line 753, in cupy.core.core.ufunc.__call__
File "cupy/core/elementwise.pxi", line 68, in cupy.core.core._preprocess_args
TypeError: Unsupported type <class 'NoneType'>
When we run above code with device=-1, we have error like below.
Exception in main training loop: unsupported operand type(s) for *: 'bool' and 'NoneType'
Traceback (most recent call last):
File "/home/****/.local/lib/python3.5/site-packages/chainer/training/trainer.py", line 308, in run
update()
File "/home/****/.local/lib/python3.5/site-packages/chainer/training/updaters/standard_updater.py", line 149, in update
self.update_core()
File "/home/****/.local/lib/python3.5/site-packages/chainer/training/updaters/standard_updater.py", line 164, in update_core
optimizer.update(loss_func, in_arrays)
File "/home/****/.local/lib/python3.5/site-packages/chainer/optimizer.py", line 655, in update
loss.backward(loss_scale=self._loss_scale)
File "/home/****/.local/lib/python3.5/site-packages/chainer/variable.py", line 966, in backward
self._backward_main(retain_grad, loss_scale)
File "/home/****/.local/lib/python3.5/site-packages/chainer/variable.py", line 1095, in _backward_main
target_input_indexes, out_grad, in_grad)
File "/home/****/.local/lib/python3.5/site-packages/chainer/function_node.py", line 548, in backward_accumulate
gxs = self.backward(target_input_indexes, grad_outputs)
File "/home/****/.local/lib/python3.5/site-packages/chainer/functions/activation/relu.py", line 73, in backward
return ReLUGrad2(y).apply((gy,))
File "/home/****/.local/lib/python3.5/site-packages/chainer/function_node.py", line 258, in apply
outputs = self.forward(in_data)
File "/home/****/.local/lib/python3.5/site-packages/chainer/function_node.py", line 368, in forward
return self.forward_cpu(inputs)
File "/home/****/.local/lib/python3.5/site-packages/chainer/functions/activation/relu.py", line 97, in forward_cpu
y = (self.b > 0) * inputs[0]
Will finalize trainer extensions and updater before reraising the exception.
Traceback (most recent call last):
File "AC.py", line 70, in <module>
trainer.run()
File "/home/****/.local/lib/python3.5/site-packages/chainer/training/trainer.py", line 322, in run
six.reraise(*sys.exc_info())
File "/home/****/.local/lib/python3.5/site-packages/six.py", line 693, in reraise
raise value
File "/home/****/.local/lib/python3.5/site-packages/chainer/training/trainer.py", line 308, in run
update()
File "/home/****/.local/lib/python3.5/site-packages/chainer/training/updaters/standard_updater.py", line 149, in update
self.update_core()
File "/home/****/.local/lib/python3.5/site-packages/chainer/training/updaters/standard_updater.py", line 164, in update_core
optimizer.update(loss_func, in_arrays)
File "/home/****/.local/lib/python3.5/site-packages/chainer/optimizer.py", line 655, in update
loss.backward(loss_scale=self._loss_scale)
File "/home/****/.local/lib/python3.5/site-packages/chainer/variable.py", line 966, in backward
self._backward_main(retain_grad, loss_scale)
File "/home/****/.local/lib/python3.5/site-packages/chainer/variable.py", line 1095, in _backward_main
target_input_indexes, out_grad, in_grad)
File "/home/****/.local/lib/python3.5/site-packages/chainer/function_node.py", line 548, in backward_accumulate
gxs = self.backward(target_input_indexes, grad_outputs)
File "/home/****/.local/lib/python3.5/site-packages/chainer/functions/activation/relu.py", line 73, in backward
return ReLUGrad2(y).apply((gy,))
File "/home/****/.local/lib/python3.5/site-packages/chainer/function_node.py", line 258, in apply
outputs = self.forward(in_data)
File "/home/****/.local/lib/python3.5/site-packages/chainer/function_node.py", line 368, in forward
return self.forward_cpu(inputs)
File "/home/****/.local/lib/python3.5/site-packages/chainer/functions/activation/relu.py", line 97, in forward_cpu
y = (self.b > 0) * inputs[0]
TypeError: unsupported operand type(s) for *: 'bool' and 'NoneType'
I think the model need to return loss in __call__ method.
Sample modification is as follows:
class Autoencoder(Chain):
def __init__(self):
super().__init__()
with self.init_scope():
self.l1 = L.Linear(3,2)
self.l2 = L.Linear(2,3)
def forward(self,x):
h1 = self.l1(x)
h2 = self.l2(h1)
return h2
def __call__(self,x):
h = self.forward(x)
# Instead of h, __call__ should return loss.
loss = F.mean_squared_error(h, x)
return loss

Feeding data through an embedding wrapper in TensorFlow

I'm working on a text summarization network and need to implement an
encoder to use with tf.nn.seq2seq.embedding_attention_decoder. As part of that I need to encode varying batches of sequences into representing vectors but the innermost encoding doesn't go through.
Here's a simplified snippet giving the same error:
import tensorflow as tf
single_cell = tf.nn.rnn_cell.GRUCell(1024)
sentence_cell = tf.nn.rnn_cell.EmbeddingWrapper(single_cell,
embedding_classes = 40000)
batch = [tf.placeholder(tf.int32, [1,1]) for _ in range(250)]
(_ , state) = tf.nn.rnn(sentence_cell, batch, dtype= tf.int32)
This fails with the following stack trace:
Traceback (most recent call last):
File "/home/ubuntu/workspace/example.py", line 6, in <module>
(_ , state) = tf.nn.rnn(sentence_cell, batch, dtype= tf.int32)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/rnn.py", line 126, in rnn
(output, state) = call_cell()
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/rnn.py", line 119, in <lambda>
call_cell = lambda: cell(input_, state)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/rnn_cell.py", line 616, in __call__
return self._cell(embedded, state)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/rnn_cell.py", line 150, in __call__
2 * self._num_units, True, 1.0))
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/rnn_cell.py", line 706, in linear
res = math_ops.matmul(array_ops.concat(1, args), matrix)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/array_ops.py", line 314, in concat
name=name)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/gen_array_ops.py", line 70, in _concat
name=name)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/op_def_library.py", line 396, in apply_op
raise TypeError("%s that don't all match." % prefix)
TypeError: Tensors in list passed to 'values' of 'Concat' Op have types [float32, int32] that don't all match.
When debugging the input size of the sentence_cell is 1 and the elements in batch all have dimension [1,1] which is infact [batch_size, sentence_cell.input_size].
Switching to dtype = tf.float32 in the call to tf.nn.rnn() makes the snippet works but give me the following stack trace in my code:
[nltk_data] Downloading package punkt to /home/alex/nltk_data...
[nltk_data] Package punkt is already up-to-date!
Preparing news data in .
Creating 3 layers of 1024 units.
> /home/alex/Programmering/kandidatarbete/arbete/code/seq3seq/seq3seq_model.py(84)encode_sentence()
-> (_ ,state) = tf.nn.rnn(sentence_cell, sent, sequence_length = length, dtype= tf.float32)
(Pdb) c
Traceback (most recent call last):
File "translate.py", line 268, in <module>
tf.app.run()
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/platform/default/_app.py", line 30, in run
sys.exit(main(sys.argv))
File "translate.py", line 265, in main
train()
File "translate.py", line 161, in train
model = create_model(sess, False)
File "translate.py", line 136, in create_model
forward_only=forward_only)
File "/home/alex/Programmering/kandidatarbete/arbete/code/seq3seq/seq3seq_model.py", line 141, in __init__
softmax_loss_function=None)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/seq2seq.py", line 926, in model_with_buckets
decoder_inputs[:bucket[1]])
File "/home/alex/Programmering/kandidatarbete/arbete/code/seq3seq/seq3seq_model.py", line 140, in <lambda>
lambda x, y: seq3seq_f(x, y, False),
File "/home/alex/Programmering/kandidatarbete/arbete/code/seq3seq/seq3seq_model.py", line 98, in seq3seq_f
art_vecs = tfmap(encode_article, tf.pack(encoder_inputs))
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/control_flow_ops.py", line 1900, in map
_, r_a = While(lambda i, a: math_ops.less(i, n), compute, [i, acc_ta])
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/control_flow_ops.py", line 1557, in While
result = context.BuildLoop(cond, body, loop_vars)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/control_flow_ops.py", line 1474, in BuildLoop
body_result = body(*vars_for_body_with_tensor_arrays)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/control_flow_ops.py", line 1897, in compute
a = a.write(i, fn(elems_ta.read(i)))
File "/home/alex/Programmering/kandidatarbete/arbete/code/seq3seq/seq3seq_model.py", line 92, in encode_article
return tfmap(encode_sentence, article)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/control_flow_ops.py", line 1900, in map
_, r_a = While(lambda i, a: math_ops.less(i, n), compute, [i, acc_ta])
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/control_flow_ops.py", line 1557, in While
result = context.BuildLoop(cond, body, loop_vars)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/control_flow_ops.py", line 1474, in BuildLoop
body_result = body(*vars_for_body_with_tensor_arrays)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/control_flow_ops.py", line 1897, in compute
a = a.write(i, fn(elems_ta.read(i)))
File "/home/alex/Programmering/kandidatarbete/arbete/code/seq3seq/seq3seq_model.py", line 84, in encode_sentence
(_ ,state) = tf.nn.rnn(sentence_cell, sent, sequence_length = length, dtype= tf.float32)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/rnn.py", line 124, in rnn
zero_output, state, call_cell)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/rnn.py", line 212, in _rnn_step
time < max_sequence_length, call_cell, empty_update)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/control_flow_ops.py", line 1183, in cond
res_t = context_t.BuildCondBranch(fn1)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/control_flow_ops.py", line 1106, in BuildCondBranch
r = fn()
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/rnn.py", line 119, in <lambda>
call_cell = lambda: cell(input_, state)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/rnn_cell.py", line 615, in __call__
embedding, array_ops.reshape(inputs, [-1]))
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/embedding_ops.py", line 86, in embedding_lookup
validate_indices=validate_indices)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/gen_array_ops.py", line 423, in gather
validate_indices=validate_indices, name=name)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/op_def_library.py", line 486, in apply_op
_Attr(op_def, input_arg.type_attr))
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/op_def_library.py", line 59, in _SatisfiesTypeConstraint
", ".join(dtypes.as_dtype(x).name for x in allowed_list)))
TypeError: DataType float32 for attr 'Tindices' not in list of allowed values: int32, int64
What am I missing?

Categories