Sympy matrix exponential raises ValueError - python

I have a 3x3 symbolic matrix and I want to calculate the matrix exponential of it using Sympy.
from sympy import *
a0,a1,y0,y1,k,t = symbols('a0 a1 y0 y1 k t')
A = Matrix([[-y1*t-a1*t,-y0*t,0],[t,-a1*t,-k*t],[0,t,-a1*t]])
res = exp(A)
print(res)
This gives the following error:
Warning (from warnings module):
File "/usr/lib/python3/dist-packages/apport/report.py", line 13
import fnmatch, glob, traceback, errno, sys, atexit, locale, imp, stat
DeprecationWarning: the imp module is deprecated in favour of importlib; see the module's documentation for alternative uses
Traceback (most recent call last):
File "/usr/lib/python3/dist-packages/sympy/core/cache.py", line 94, in wrapper
retval = cfunc(*args, **kwargs)
TypeError: unhashable type: 'MutableDenseMatrix'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/lib/python3/dist-packages/sympy/core/cache.py", line 94, in wrapper
retval = cfunc(*args, **kwargs)
TypeError: unhashable type: 'MutableDenseMatrix'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/lib/python3/dist-packages/sympy/core/compatibility.py", line 419, in as_int
raise TypeError
TypeError
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/lib/python3.8/idlelib/run.py", line 559, in runcode
exec(code, self.locals)
File "/home/kaguro/Documents/PROJECT/Paper-simulation/Finite-time/Model B/extra.py", line 7, in <module>
res = exp(At)
File "/usr/lib/python3/dist-packages/sympy/core/cache.py", line 96, in wrapper
retval = func(*args, **kwargs)
File "/usr/lib/python3/dist-packages/sympy/core/function.py", line 473, in __new__
result = super(Function, cls).__new__(cls, *args, **options)
File "/usr/lib/python3/dist-packages/sympy/core/cache.py", line 96, in wrapper
retval = func(*args, **kwargs)
File "/usr/lib/python3/dist-packages/sympy/core/function.py", line 288, in __new__
evaluated = cls.eval(*args)
File "/usr/lib/python3/dist-packages/sympy/functions/elementary/exponential.py", line 320, in eval
return arg.exp()
File "/usr/lib/python3/dist-packages/sympy/matrices/matrices.py", line 3208, in exp
P, J = self.jordan_form()
File "/usr/lib/python3/dist-packages/sympy/matrices/matrices.py", line 1811, in jordan_form
jordan_basis = [eig_mat(eig, 1).nullspace()[0] for eig in blocks]
File "/usr/lib/python3/dist-packages/sympy/matrices/matrices.py", line 1811, in <listcomp>
jordan_basis = [eig_mat(eig, 1).nullspace()[0] for eig in blocks]
File "/usr/lib/python3/dist-packages/sympy/matrices/matrices.py", line 1011, in nullspace
reduced, pivots = self.rref(iszerofunc=iszerofunc, simplify=simplify)
File "/usr/lib/python3/dist-packages/sympy/matrices/matrices.py", line 941, in rref
ret, pivot_cols = self._eval_rref(iszerofunc=iszerofunc,
File "/usr/lib/python3/dist-packages/sympy/matrices/matrices.py", line 593, in _eval_rref
reduced, pivot_cols, swaps = self._row_reduce(iszerofunc, simpfunc,
File "/usr/lib/python3/dist-packages/sympy/matrices/matrices.py", line 713, in _row_reduce
assumed_nonzero, newly_determined = _find_reasonable_pivot(
File "/usr/lib/python3/dist-packages/sympy/matrices/matrices.py", line 5327, in _find_reasonable_pivot
if x.equals(S.Zero):
File "/usr/lib/python3/dist-packages/sympy/core/expr.py", line 757, in equals
constant = diff.is_constant(simplify=False, failing_number=True)
File "/usr/lib/python3/dist-packages/sympy/core/expr.py", line 683, in is_constant
if b is not None and b is not S.NaN and b.equals(a) is False:
File "/usr/lib/python3/dist-packages/sympy/core/expr.py", line 834, in equals
mp = minimal_polynomial(diff)
File "/usr/lib/python3/dist-packages/sympy/polys/numberfields.py", line 655, in minimal_polynomial
result = _minpoly_compose(ex, x, domain)
File "/usr/lib/python3/dist-packages/sympy/polys/numberfields.py", line 534, in _minpoly_compose
res = _minpoly_add(x, dom, *ex.args)
File "/usr/lib/python3/dist-packages/sympy/polys/numberfields.py", line 362, in _minpoly_add
mp = _minpoly_op_algebraic_element(Add, a[0], a[1], x, dom)
File "/usr/lib/python3/dist-packages/sympy/polys/numberfields.py", line 242, in _minpoly_op_algebraic_element
mp1 = _minpoly_compose(ex1, x, dom)
File "/usr/lib/python3/dist-packages/sympy/polys/numberfields.py", line 547, in _minpoly_compose
mp1 = minimal_polynomial(ex1, x)
File "/usr/lib/python3/dist-packages/sympy/polys/numberfields.py", line 655, in minimal_polynomial
result = _minpoly_compose(ex, x, domain)
File "/usr/lib/python3/dist-packages/sympy/polys/numberfields.py", line 557, in _minpoly_compose
res = _minpoly_mul(x, dom, *ex.args)
File "/usr/lib/python3/dist-packages/sympy/polys/numberfields.py", line 374, in _minpoly_mul
mp = _minpoly_op_algebraic_element(Mul, a[0], a[1], x, dom)
File "/usr/lib/python3/dist-packages/sympy/polys/numberfields.py", line 244, in _minpoly_op_algebraic_element
mp2 = _minpoly_compose(ex2, y, dom)
File "/usr/lib/python3/dist-packages/sympy/polys/numberfields.py", line 559, in _minpoly_compose
res = _minpoly_pow(ex.base, ex.exp, x, dom)
File "/usr/lib/python3/dist-packages/sympy/polys/numberfields.py", line 337, in _minpoly_pow
mp = _minpoly_compose(ex, x, dom)
File "/usr/lib/python3/dist-packages/sympy/polys/numberfields.py", line 534, in _minpoly_compose
res = _minpoly_add(x, dom, *ex.args)
File "/usr/lib/python3/dist-packages/sympy/polys/numberfields.py", line 362, in _minpoly_add
mp = _minpoly_op_algebraic_element(Add, a[0], a[1], x, dom)
File "/usr/lib/python3/dist-packages/sympy/polys/numberfields.py", line 279, in _minpoly_op_algebraic_element
res = _choose_factor(factors, x, op(ex1, ex2), dom)
File "/usr/lib/python3/dist-packages/sympy/polys/numberfields.py", line 70, in _choose_factor
if abs(f.as_expr().evalf(prec1, points)) < eps:
File "/usr/lib/python3/dist-packages/sympy/core/expr.py", line 171, in __abs__
return Abs(self)
File "/usr/lib/python3/dist-packages/sympy/core/cache.py", line 94, in wrapper
retval = cfunc(*args, **kwargs)
File "/usr/lib/python3/dist-packages/sympy/core/function.py", line 473, in __new__
result = super(Function, cls).__new__(cls, *args, **options)
File "/usr/lib/python3/dist-packages/sympy/core/cache.py", line 94, in wrapper
retval = cfunc(*args, **kwargs)
File "/usr/lib/python3/dist-packages/sympy/core/function.py", line 288, in __new__
evaluated = cls.eval(*args)
File "/usr/lib/python3/dist-packages/sympy/functions/elementary/complexes.py", line 473, in eval
arg = signsimp(arg, evaluate=False)
File "/usr/lib/python3/dist-packages/sympy/simplify/simplify.py", line 383, in signsimp
e = sub_post(sub_pre(expr))
File "/usr/lib/python3/dist-packages/sympy/simplify/cse_opts.py", line 16, in sub_pre
adds = [a for a in e.atoms(Add) if a.could_extract_minus_sign()]
File "/usr/lib/python3/dist-packages/sympy/simplify/cse_opts.py", line 16, in <listcomp>
adds = [a for a in e.atoms(Add) if a.could_extract_minus_sign()]
File "/usr/lib/python3/dist-packages/sympy/core/expr.py", line 2386, in could_extract_minus_sign
(negative_self).extract_multiplicatively(-1) is not None)
File "/usr/lib/python3/dist-packages/sympy/core/expr.py", line 2208, in extract_multiplicatively
newarg = arg.extract_multiplicatively(c)
File "/usr/lib/python3/dist-packages/sympy/core/expr.py", line 2221, in extract_multiplicatively
newarg = arg.extract_multiplicatively(c)
File "/usr/lib/python3/dist-packages/sympy/core/expr.py", line 2134, in extract_multiplicatively
elif c == self:
File "/usr/lib/python3/dist-packages/sympy/core/numbers.py", line 2248, in __eq__
return Rational.__eq__(self, other)
File "/usr/lib/python3/dist-packages/sympy/core/numbers.py", line 1906, in __eq__
integer_log(self.p//m, 2) == (t, True)
File "/usr/lib/python3/dist-packages/sympy/core/power.py", line 147, in integer_log
y = as_int(y)
File "/usr/lib/python3/dist-packages/sympy/core/compatibility.py", line 425, in as_int
raise ValueError('%s is not an integer' % (n,))
ValueError: 1 is not an integer
I don't understand why is it not working. Earlier I had used a 2x2 matrix with similar kind of form, and it worked perfectly (albeit very complicated looking).
The eigenvalues can be found of this matrix, but asking for eigenvectors gives the same error. So I can't use the diagonalisation method manually either (didn't expect this to work, because then Sympy would have done that).
Any help would be greatly appreciated.

Related

Spacy trained model: input error dimension

here is where i load my model:
_model = r"C:\Users\evead\Desktop\spacy_model_config_03_15\model-best"
nlp = spacy.load(_model)
txt = "Below are my data loader and neural net. I have also included the output of my data loader when I retrieve a batch of data"
doc = nlp(txt)
here is the error I got
but I got the following error:
Exception has occurred: RuntimeError
input must have 3 dimensions, got 2
File "C:\Users\evead\Desktop\Issam\PIXITREND-CLASSIFIER\Annotation-Training\src\debug_entity.py", line 46, in <module>
doc = nlp(txt)
here is my complete error:
(env) PS C:\Users\evead\Desktop\Issam\PIXITREND-CLASSIFIER> python -m spacy evaluate C:\Users\evead\Desktop\spacy_model_config_03_15\model-best C:\Users\evead\Desktop\batchdata-3-15\train.spacy --output C:\Users\evead\Desktop\batchdata-3-15\res.json
Using CPU
C:\Users\evead\Desktop\Issam\PIXITREND-CLASSIFIER\Annotation-Training\env\lib\site-packages\torch\autocast_mode.py:141: UserWarning: User provided device_type of 'cuda', but CUDA is not available. Disabling
warnings.warn('User provided device_type of \'cuda\', but CUDA is not available. Disabling')
Traceback (most recent call last):
File "C:\Users\evead\AppData\Local\Programs\Python\Python39\lib\runpy.py", line 197, in _run_module_as_main
return _run_code(code, main_globals, None,
File "C:\Users\evead\AppData\Local\Programs\Python\Python39\lib\runpy.py", line 87, in _run_code
exec(code, run_globals)
File "C:\Users\evead\Desktop\Issam\PIXITREND-CLASSIFIER\Annotation-Training\env\lib\site-packages\spacy\__main__.py", line 4, in <module>
setup_cli()
File "C:\Users\evead\Desktop\Issam\PIXITREND-CLASSIFIER\Annotation-Training\env\lib\site-packages\spacy\cli\_util.py", line 71, in setup_cli
command(prog_name=COMMAND)
File "C:\Users\evead\Desktop\Issam\PIXITREND-CLASSIFIER\Annotation-Training\env\lib\site-packages\click\core.py", line 1128, in __call__
return self.main(*args, **kwargs)
File "C:\Users\evead\Desktop\Issam\PIXITREND-CLASSIFIER\Annotation-Training\env\lib\site-packages\click\core.py", line 1053, in main
rv = self.invoke(ctx)
File "C:\Users\evead\Desktop\Issam\PIXITREND-CLASSIFIER\Annotation-Training\env\lib\site-packages\click\core.py", line 1659, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "C:\Users\evead\Desktop\Issam\PIXITREND-CLASSIFIER\Annotation-Training\env\lib\site-packages\click\core.py", line 1395, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "C:\Users\evead\Desktop\Issam\PIXITREND-CLASSIFIER\Annotation-Training\env\lib\site-packages\click\core.py", line 754, in invoke
return __callback(*args, **kwargs)
File "C:\Users\evead\Desktop\Issam\PIXITREND-CLASSIFIER\Annotation-Training\env\lib\site-packages\typer\main.py", line 500, in wrapper
return callback(**use_params) # type: ignore
File "C:\Users\evead\Desktop\Issam\PIXITREND-CLASSIFIER\Annotation-Training\env\lib\site-packages\spacy\cli\evaluate.py", line 42, in evaluate_cli
evaluate(
File "C:\Users\evead\Desktop\Issam\PIXITREND-CLASSIFIER\Annotation-Training\env\lib\site-packages\spacy\cli\evaluate.py", line 78, in evaluate
scores = nlp.evaluate(dev_dataset)
File "C:\Users\evead\Desktop\Issam\PIXITREND-CLASSIFIER\Annotation-Training\env\lib\site-packages\spacy\language.py", line 1415, in evaluate
for eg, doc in zip(examples, docs):
File "C:\Users\evead\Desktop\Issam\PIXITREND-CLASSIFIER\Annotation-Training\env\lib\site-packages\spacy\language.py", line 1575, in pipe
for doc in docs:
File "C:\Users\evead\Desktop\Issam\PIXITREND-CLASSIFIER\Annotation-Training\env\lib\site-packages\spacy\util.py", line 1598, in _pipe
yield from proc.pipe(docs, **kwargs)
File "spacy\pipeline\transition_parser.pyx", line 230, in pipe
File "C:\Users\evead\Desktop\Issam\PIXITREND-CLASSIFIER\Annotation-Training\env\lib\site-packages\spacy\util.py", line 1547, in minibatch
batch = list(itertools.islice(items, int(batch_size)))
File "C:\Users\evead\Desktop\Issam\PIXITREND-CLASSIFIER\Annotation-Training\env\lib\site-packages\spacy\util.py", line 1598, in _pipe
yield from proc.pipe(docs, **kwargs)
File "spacy\pipeline\trainable_pipe.pyx", line 79, in pipe
File "C:\Users\evead\Desktop\Issam\PIXITREND-CLASSIFIER\Annotation-Training\env\lib\site-packages\spacy\util.py", line 1617, in raise_error
raise e
File "spacy\pipeline\trainable_pipe.pyx", line 75, in spacy.pipeline.trainable_pipe.TrainablePipe.pipe
File "C:\Users\evead\Desktop\Issam\PIXITREND-CLASSIFIER\Annotation-Training\env\lib\site-packages\spacy\pipeline\tok2vec.py", line 125, in predict
tokvecs = self.model.predict(docs)
File "C:\Users\evead\Desktop\Issam\PIXITREND-CLASSIFIER\Annotation-Training\env\lib\site-packages\thinc\model.py", line 315, in predict
return self._func(self, X, is_train=False)[0]
File "C:\Users\evead\Desktop\Issam\PIXITREND-CLASSIFIER\Annotation-Training\env\lib\site-packages\thinc\layers\chain.py", line 54, in forward
Y, inc_layer_grad = layer(X, is_train=is_train)
File "C:\Users\evead\Desktop\Issam\PIXITREND-CLASSIFIER\Annotation-Training\env\lib\site-packages\thinc\model.py", line 291, in __call__
return self._func(self, X, is_train=is_train)
File "C:\Users\evead\Desktop\Issam\PIXITREND-CLASSIFIER\Annotation-Training\env\lib\site-packages\thinc\layers\with_array.py", line 40, in forward
return _list_forward(cast(Model[List2d, List2d], model), Xseq, is_train)
File "C:\Users\evead\Desktop\Issam\PIXITREND-CLASSIFIER\Annotation-Training\env\lib\site-packages\thinc\layers\with_array.py", line 76, in _list_forward
Yf, get_dXf = layer(Xf, is_train)
File "C:\Users\evead\Desktop\Issam\PIXITREND-CLASSIFIER\Annotation-Training\env\lib\site-packages\thinc\model.py", line 291, in __call__
return self._func(self, X, is_train=is_train)
File "C:\Users\evead\Desktop\Issam\PIXITREND-CLASSIFIER\Annotation-Training\env\lib\site-packages\thinc\layers\with_padded.py", line 36, in forward
Y, backprop = _array_forward(layer, cast(Floats3d, Xseq), is_train)
File "C:\Users\evead\Desktop\Issam\PIXITREND-CLASSIFIER\Annotation-Training\env\lib\site-packages\thinc\layers\with_padded.py", line 76, in _array_forward
Yp, get_dXp = layer(Xp, is_train)
File "C:\Users\evead\Desktop\Issam\PIXITREND-CLASSIFIER\Annotation-Training\env\lib\site-packages\thinc\model.py", line 291, in __call__
return self._func(self, X, is_train=is_train)
File "C:\Users\evead\Desktop\Issam\PIXITREND-CLASSIFIER\Annotation-Training\env\lib\site-packages\thinc\layers\with_padded.py", line 30, in forward
Y, backprop = layer(Xseq, is_train)
File "C:\Users\evead\Desktop\Issam\PIXITREND-CLASSIFIER\Annotation-Training\env\lib\site-packages\thinc\model.py", line 291, in __call__
return self._func(self, X, is_train=is_train)
File "C:\Users\evead\Desktop\Issam\PIXITREND-CLASSIFIER\Annotation-Training\env\lib\site-packages\thinc\layers\pytorchwrapper.py", line 134, in forward
Ytorch, torch_backprop = model.shims[0](Xtorch, is_train)
File "C:\Users\evead\Desktop\Issam\PIXITREND-CLASSIFIER\Annotation-Training\env\lib\site-packages\thinc\shims\pytorch.py", line 56, in __call__
return self.predict(inputs), lambda a: ...
File "C:\Users\evead\Desktop\Issam\PIXITREND-CLASSIFIER\Annotation-Training\env\lib\site-packages\thinc\shims\pytorch.py", line 66, in predict
outputs = self._model(*inputs.args, **inputs.kwargs)
File "C:\Users\evead\Desktop\Issam\PIXITREND-CLASSIFIER\Annotation-Training\env\lib\site-packages\torch\nn\modules\module.py", line 1102, in _call_impl
return forward_call(*input, **kwargs)
File "C:\Users\evead\Desktop\Issam\PIXITREND-CLASSIFIER\Annotation-Training\env\lib\site-packages\torch\nn\modules\rnn.py", line 689, in forward
self.check_forward_args(input, hx, batch_sizes)
File "C:\Users\evead\Desktop\Issam\PIXITREND-CLASSIFIER\Annotation-Training\env\lib\site-packages\torch\nn\modules\rnn.py", line 632, in check_forward_args
self.check_input(input, batch_sizes)
File "C:\Users\evead\Desktop\Issam\PIXITREND-CLASSIFIER\Annotation-Training\env\lib\site-packages\torch\nn\modules\rnn.py", line 201, in check_input
raise RuntimeError(
RuntimeError: input must have 3 dimensions, got 2

Why does python's numba run 2D array program on Spyder but not on remote server?

I created a small program that uses numba on a method containing a 2D array (list of lists). This program runs just fine on Spyder, but when I try to run the exact same program on a remote Ubuntu server, I get a long error saying "unsupported nested memory-managed object" (listed below the program code).
The code:
from numba import jit
#jit(nopython=True)
def test():
num1 = 10
num2 = 5
array = [ [ 1 for i in range(num1) ] for j in range(num2) ]
#array = [2 for i in range(num1) ]
sum = 0
for i in range(0,num1):
for j in range(0,num2):
sum = sum + array[i][j]
print(sum)
test()
The error:
Traceback (most recent call last):
File "/usr/lib/python3/dist-packages/numba/runtime/context.py", line 186, in _call_incref_decref
meminfo = data_model.get_nrt_meminfo(builder, value)
File "/usr/lib/python3/dist-packages/numba/datamodel/models.py", line 329, in get_nrt_meminfo
"unsupported nested memory-managed object")
NotImplementedError: unsupported nested memory-managed object
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/lib/python3/dist-packages/numba/errors.py", line 243, in new_error_context
yield
File "/usr/lib/python3/dist-packages/numba/lowering.py", line 216, in lower_block
self.lower_inst(inst)
File "/usr/lib/python3/dist-packages/numba/lowering.py", line 263, in lower_inst
self.storevar(val, inst.target.name)
File "/usr/lib/python3/dist-packages/numba/lowering.py", line 928, in storevar
self.decref(fetype, old)
File "/usr/lib/python3/dist-packages/numba/lowering.py", line 982, in decref
self.context.nrt.decref(self.builder, typ, val)
File "/usr/lib/python3/dist-packages/numba/runtime/context.py", line 208, in decref
self._call_incref_decref(builder, typ, typ, value, "NRT_decref")
File "/usr/lib/python3/dist-packages/numba/runtime/context.py", line 179, in _call_incref_decref
funcname, getters + (getter,))
File "/usr/lib/python3/dist-packages/numba/runtime/context.py", line 188, in _call_incref_decref
raise NotImplementedError("%s: %s" % (root_type, str(e)))
NotImplementedError: list(list(int64)): unsupported nested memory-managed object
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "2DArrayTest.py", line 15, in <module>
test()
File "/usr/lib/python3/dist-packages/numba/dispatcher.py", line 307, in _compile_for_args
return self.compile(tuple(argtypes))
File "/usr/lib/python3/dist-packages/numba/dispatcher.py", line 579, in compile
cres = self._compiler.compile(args, return_type)
File "/usr/lib/python3/dist-packages/numba/dispatcher.py", line 80, in compile
flags=flags, locals=self.locals)
File "/usr/lib/python3/dist-packages/numba/compiler.py", line 740, in compile_extra
return pipeline.compile_extra(func)
File "/usr/lib/python3/dist-packages/numba/compiler.py", line 360, in compile_extra
return self._compile_bytecode()
File "/usr/lib/python3/dist-packages/numba/compiler.py", line 699, in _compile_bytecode
return self._compile_core()
File "/usr/lib/python3/dist-packages/numba/compiler.py", line 686, in _compile_core
res = pm.run(self.status)
File "/usr/lib/python3/dist-packages/numba/compiler.py", line 246, in run
raise patched_exception
File "/usr/lib/python3/dist-packages/numba/compiler.py", line 238, in run
stage()
File "/usr/lib/python3/dist-packages/numba/compiler.py", line 621, in stage_nopython_backend
self._backend(lowerfn, objectmode=False)
File "/usr/lib/python3/dist-packages/numba/compiler.py", line 576, in _backend
lowered = lowerfn()
File "/usr/lib/python3/dist-packages/numba/compiler.py", line 563, in backend_nopython_mode
self.flags)
File "/usr/lib/python3/dist-packages/numba/compiler.py", line 858, in native_lowering_stage
lower.lower()
File "/usr/lib/python3/dist-packages/numba/lowering.py", line 135, in lower
self.lower_normal_function(self.fndesc)
File "/usr/lib/python3/dist-packages/numba/lowering.py", line 176, in lower_normal_function
entry_block_tail = self.lower_function_body()
File "/usr/lib/python3/dist-packages/numba/lowering.py", line 201, in lower_function_body
self.lower_block(block)
File "/usr/lib/python3/dist-packages/numba/lowering.py", line 216, in lower_block
self.lower_inst(inst)
File "/usr/lib/python3.6/contextlib.py", line 99, in __exit__
self.gen.throw(type, value, traceback)
File "/usr/lib/python3/dist-packages/numba/errors.py", line 249, in new_error_context
six.reraise(type(newerr), newerr, sys.exc_info()[2])
File "/usr/lib/python3/dist-packages/numba/six.py", line 658, in reraise
raise value.with_traceback(tb)
File "/usr/lib/python3/dist-packages/numba/errors.py", line 243, in new_error_context
yield
File "/usr/lib/python3/dist-packages/numba/lowering.py", line 216, in lower_block
self.lower_inst(inst)
File "/usr/lib/python3/dist-packages/numba/lowering.py", line 263, in lower_inst
self.storevar(val, inst.target.name)
File "/usr/lib/python3/dist-packages/numba/lowering.py", line 928, in storevar
self.decref(fetype, old)
File "/usr/lib/python3/dist-packages/numba/lowering.py", line 982, in decref
self.context.nrt.decref(self.builder, typ, val)
File "/usr/lib/python3/dist-packages/numba/runtime/context.py", line 208, in decref
self._call_incref_decref(builder, typ, typ, value, "NRT_decref")
File "/usr/lib/python3/dist-packages/numba/runtime/context.py", line 179, in _call_incref_decref
funcname, getters + (getter,))
File "/usr/lib/python3/dist-packages/numba/runtime/context.py", line 188, in _call_incref_decref
raise NotImplementedError("%s: %s" % (root_type, str(e)))
numba.errors.LoweringError: Failed at nopython (nopython mode backend)
list(list(int64)): unsupported nested memory-managed object
File "2DArrayTest.py", line 7
[1] During: lowering "$56 = build_list(items=[])" at 2DArrayTest.py (7)
I have seen elsewhere that numba doesn't like 2D arrays and lists of lists because of memory. Is there a way to make it work on the remote server, since it works on Spyder? What is it that Spyder does differently to make it work?

How to calculate values from functions without returning them or setting them global (for numba.cuda)?

I am trying to run this simple code on a CUDA GPU. The module I am using for this is numba.cuda:
import numba
from numba import cuda
#numba.cuda.jit
def function_4(j, k):
l = j + k
return l
l = function_4(1, 2)
print(l)
Output:
Traceback (most recent call last):
File "/home/amu/Desktop/RL_framework/help_functions/test2.py", line 9, in <module>
l = function_4(1, 2)
File "/home/amu/anaconda3/lib/python3.7/site-packages/numba/cuda/compiler.py", line 758, in __call__
kernel = self.specialize(*args)
File "/home/amu/anaconda3/lib/python3.7/site-packages/numba/cuda/compiler.py", line 769, in specialize
kernel = self.compile(argtypes)
File "/home/amu/anaconda3/lib/python3.7/site-packages/numba/cuda/compiler.py", line 785, in compile
**self.targetoptions)
File "/home/amu/anaconda3/lib/python3.7/site-packages/numba/core/compiler_lock.py", line 32, in _acquire_compile_lock
return func(*args, **kwargs)
File "/home/amu/anaconda3/lib/python3.7/site-packages/numba/cuda/compiler.py", line 57, in compile_kernel
cres = compile_cuda(pyfunc, types.void, args, debug=debug, inline=inline)
File "/home/amu/anaconda3/lib/python3.7/site-packages/numba/core/compiler_lock.py", line 32, in _acquire_compile_lock
return func(*args, **kwargs)
File "/home/amu/anaconda3/lib/python3.7/site-packages/numba/cuda/compiler.py", line 46, in compile_cuda
locals={})
File "/home/amu/anaconda3/lib/python3.7/site-packages/numba/core/compiler.py", line 568, in compile_extra
return pipeline.compile_extra(func)
File "/home/amu/anaconda3/lib/python3.7/site-packages/numba/core/compiler.py", line 339, in compile_extra
return self._compile_bytecode()
File "/home/amu/anaconda3/lib/python3.7/site-packages/numba/core/compiler.py", line 401, in _compile_bytecode
return self._compile_core()
File "/home/amu/anaconda3/lib/python3.7/site-packages/numba/core/compiler.py", line 381, in _compile_core
raise e
File "/home/amu/anaconda3/lib/python3.7/site-packages/numba/core/compiler.py", line 372, in _compile_core
pm.run(self.state)
File "/home/amu/anaconda3/lib/python3.7/site-packages/numba/core/compiler_machinery.py", line 341, in run
raise patched_exception
File "/home/amu/anaconda3/lib/python3.7/site-packages/numba/core/compiler_machinery.py", line 332, in run
self._runPass(idx, pass_inst, state)
File "/home/amu/anaconda3/lib/python3.7/site-packages/numba/core/compiler_lock.py", line 32, in _acquire_compile_lock
return func(*args, **kwargs)
File "/home/amu/anaconda3/lib/python3.7/site-packages/numba/core/compiler_machinery.py", line 291, in _runPass
mutated |= check(pss.run_pass, internal_state)
File "/home/amu/anaconda3/lib/python3.7/site-packages/numba/core/compiler_machinery.py", line 264, in check
mangled = func(compiler_state)
File "/home/amu/anaconda3/lib/python3.7/site-packages/numba/core/typed_passes.py", line 98, in run_pass
raise_errors=self._raise_errors)
File "/home/amu/anaconda3/lib/python3.7/site-packages/numba/core/typed_passes.py", line 70, in type_inference_stage
infer.propagate(raise_errors=raise_errors)
File "/home/amu/anaconda3/lib/python3.7/site-packages/numba/core/typeinfer.py", line 986, in propagate
raise errors[0]
numba.core.errors.TypingError: Failed in nopython mode pipeline (step: nopython frontend)
No conversion from int64 to none for '$12return_value.4', defined at None
File "test2.py", line 7:
def function_4(j, k):
<source elided>
l = j + k
return l
^
[1] During: typing of assignment at /home/amu/Desktop/RL_framework/help_functions/test2.py (7)
File "test2.py", line 7:
def function_4(j, k):
<source elided>
l = j + k
return l
^
numba.cuda does not support the return statement. So how do I use functions to calculate values? The global statement seems not to be supported either:
import numba
from numba import cuda
#numba.cuda.jit
def function_4(j, k):
global l
l = j + k
function_4(1, 2)
print(l)
Output:
Traceback (most recent call last):
File "/home/amu/Desktop/RL_framework/help_functions/test.py", line 9, in <module>
function_4(1, 2)
File "/home/amu/anaconda3/lib/python3.7/site-packages/numba/cuda/compiler.py", line 758, in __call__
kernel = self.specialize(*args)
File "/home/amu/anaconda3/lib/python3.7/site-packages/numba/cuda/compiler.py", line 769, in specialize
kernel = self.compile(argtypes)
File "/home/amu/anaconda3/lib/python3.7/site-packages/numba/cuda/compiler.py", line 785, in compile
**self.targetoptions)
File "/home/amu/anaconda3/lib/python3.7/site-packages/numba/core/compiler_lock.py", line 32, in _acquire_compile_lock
return func(*args, **kwargs)
File "/home/amu/anaconda3/lib/python3.7/site-packages/numba/cuda/compiler.py", line 57, in compile_kernel
cres = compile_cuda(pyfunc, types.void, args, debug=debug, inline=inline)
File "/home/amu/anaconda3/lib/python3.7/site-packages/numba/core/compiler_lock.py", line 32, in _acquire_compile_lock
return func(*args, **kwargs)
File "/home/amu/anaconda3/lib/python3.7/site-packages/numba/cuda/compiler.py", line 46, in compile_cuda
locals={})
File "/home/amu/anaconda3/lib/python3.7/site-packages/numba/core/compiler.py", line 568, in compile_extra
return pipeline.compile_extra(func)
File "/home/amu/anaconda3/lib/python3.7/site-packages/numba/core/compiler.py", line 339, in compile_extra
return self._compile_bytecode()
File "/home/amu/anaconda3/lib/python3.7/site-packages/numba/core/compiler.py", line 401, in _compile_bytecode
return self._compile_core()
File "/home/amu/anaconda3/lib/python3.7/site-packages/numba/core/compiler.py", line 381, in _compile_core
raise e
File "/home/amu/anaconda3/lib/python3.7/site-packages/numba/core/compiler.py", line 372, in _compile_core
pm.run(self.state)
File "/home/amu/anaconda3/lib/python3.7/site-packages/numba/core/compiler_machinery.py", line 341, in run
raise patched_exception
File "/home/amu/anaconda3/lib/python3.7/site-packages/numba/core/compiler_machinery.py", line 332, in run
self._runPass(idx, pass_inst, state)
File "/home/amu/anaconda3/lib/python3.7/site-packages/numba/core/compiler_lock.py", line 32, in _acquire_compile_lock
return func(*args, **kwargs)
File "/home/amu/anaconda3/lib/python3.7/site-packages/numba/core/compiler_machinery.py", line 291, in _runPass
mutated |= check(pss.run_pass, internal_state)
File "/home/amu/anaconda3/lib/python3.7/site-packages/numba/core/compiler_machinery.py", line 264, in check
mangled = func(compiler_state)
File "/home/amu/anaconda3/lib/python3.7/site-packages/numba/core/untyped_passes.py", line 86, in run_pass
func_ir = interp.interpret(bc)
File "/home/amu/anaconda3/lib/python3.7/site-packages/numba/core/interpreter.py", line 116, in interpret
flow.run()
File "/home/amu/anaconda3/lib/python3.7/site-packages/numba/core/byteflow.py", line 107, in run
runner.dispatch(state)
File "/home/amu/anaconda3/lib/python3.7/site-packages/numba/core/byteflow.py", line 269, in dispatch
raise UnsupportedError(msg, loc=self.get_debug_loc(inst.lineno))
numba.core.errors.UnsupportedError: Failed in nopython mode pipeline (step: analyzing bytecode)
Use of unsupported opcode (STORE_GLOBAL) found
File "test.py", line 7:
def function_4(j, k):
<source elided>
global l
l = j + k
^
Your code would need to look something like this:
import numpy as np
import numba
from numba import cuda
#cuda.jit
def function_4(i, j, k):
i[0] = j[0] + k[0]
j = np.array([1], dtype=np.int32)
k = np.array([2], dtype=np.int32)
i = np.zeros_like(j)
function_4[1,1](i, j, k)
print(i[0])
[Note code written on a phone in an airport departure lounge, never tested, use at own risk]
Basically everything had to be passed as arrays with explicit dtypes. If your intention is to write kernels, you would be better served starting in CUDAs native C++ dialect, which is well documented, and then coming back to Numba, which isn't. Then everything will be self-evident

Autoencoder in Chainer issue

I am trying to train Autoencoder by Chainer in python and wrote below code. But it does not work. Why??
class Autoencoder(Chain):
def __init__(self):
super().__init__()
with self.init_scope():
self.l1 = L.Linear(3,2)
self.l2 = L.Linear(2,3)
def __call__(self,x):
h1 = self.l1(x)
h2 = self.l2(h1)
return h2
class Dataset(dataset.DatasetMixin):
def __init__(self,number_of_data, show_initial = False):
noise_level = 1
self.data = np.zeros((number_of_data,3),dtype = np.float32)
OA_vector = np.array([3,2,1])
OB_vector = np.array([2,-1,1])
t = np.random.uniform(-0.5,0.5,number_of_data)
s = np.random.uniform(-0.5,0.5,number_of_data)
for i in range(0,number_of_data):
noise = np.random.uniform(-noise_level, noise_level,3)
self.data[i] = t[i]*OA_vector + s[i]*OB_vector + noise
def __len__(self):
return self.data.shape[0]
def get_example(self,idx):
return self.data[idx]
if __name__ == "__main__":
n_epoch = 5
batch_size = 100
number_of_data = 1000 #データ数
train_data = Dataset(number_of_data,False)
model = Autoencoder()
optimizer = optimizers.SGD(lr=0.05).setup(model)
train_iter = iterators.SerialIterator(train_data,batch_size)
updater = training.StandardUpdater(train_iter,optimizer,device=0)
trainer = training.Trainer(updater,(n_epoch,"epoch"),out="result")
trainer.run()
I am using Chainer. And the Dataset makes 3 dimensitonal vectors. The number of the vectors is "number_of_data".
Should I do that without using trainer?
I don't understand where problem is.
EDIT
When we run above code with device=0, we have error like below.
Exception in main training loop: Unsupported type <class 'NoneType'>
Traceback (most recent call last):
File "/home/****/.local/lib/python3.5/site-packages/chainer/training/trainer.py", line 308, in run
update()
File "/home/****/.local/lib/python3.5/site-packages/chainer/training/updaters/standard_updater.py", line 149, in update
self.update_core()
File "/home/****/.local/lib/python3.5/site-packages/chainer/training/updaters/standard_updater.py", line 164, in update_core
optimizer.update(loss_func, in_arrays)
File "/home/****/.local/lib/python3.5/site-packages/chainer/optimizer.py", line 655, in update
loss.backward(loss_scale=self._loss_scale)
File "/home/****/.local/lib/python3.5/site-packages/chainer/variable.py", line 966, in backward
self._backward_main(retain_grad, loss_scale)
File "/home/****/.local/lib/python3.5/site-packages/chainer/variable.py", line 1095, in _backward_main
target_input_indexes, out_grad, in_grad)
File "/home/****/.local/lib/python3.5/site-packages/chainer/function_node.py", line 548, in backward_accumulate
gxs = self.backward(target_input_indexes, grad_outputs)
File "/home/****/.local/lib/python3.5/site-packages/chainer/functions/activation/relu.py", line 73, in backward
return ReLUGrad2(y).apply((gy,))
File "/home/****/.local/lib/python3.5/site-packages/chainer/function_node.py", line 258, in apply
outputs = self.forward(in_data)
File "/home/****/.local/lib/python3.5/site-packages/chainer/function_node.py", line 368, in forward
return self.forward_cpu(inputs)
File "/home/****/.local/lib/python3.5/site-packages/chainer/functions/activation/relu.py", line 97, in forward_cpu
y = (self.b > 0) * inputs[0]
File "cupy/core/core.pyx", line 1310, in cupy.core.core.ndarray.__mul__
File "cupy/core/elementwise.pxi", line 753, in cupy.core.core.ufunc.__call__
File "cupy/core/elementwise.pxi", line 68, in cupy.core.core._preprocess_args
Will finalize trainer extensions and updater before reraising the exception.
Traceback (most recent call last):
File "AC.py", line 70, in <module>
trainer.run()
File "/home/****/.local/lib/python3.5/site-packages/chainer/training/trainer.py", line 322, in run
six.reraise(*sys.exc_info())
File "/home/****/.local/lib/python3.5/site-packages/six.py", line 693, in reraise
raise value
File "/home/****/.local/lib/python3.5/site-packages/chainer/training/trainer.py", line 308, in run
update()
File "/home/****/.local/lib/python3.5/site-packages/chainer/training/updaters/standard_updater.py", line 149, in update
self.update_core()
File "/home/****/.local/lib/python3.5/site-packages/chainer/training/updaters/standard_updater.py", line 164, in update_core
optimizer.update(loss_func, in_arrays)
File "/home/****/.local/lib/python3.5/site-packages/chainer/optimizer.py", line 655, in update
loss.backward(loss_scale=self._loss_scale)
File "/home/****/.local/lib/python3.5/site-packages/chainer/variable.py", line 966, in backward
self._backward_main(retain_grad, loss_scale)
File "/home/****/.local/lib/python3.5/site-packages/chainer/variable.py", line 1095, in _backward_main
target_input_indexes, out_grad, in_grad)
File "/home/****/.local/lib/python3.5/site-packages/chainer/function_node.py", line 548, in backward_accumulate
gxs = self.backward(target_input_indexes, grad_outputs)
File "/home/****/.local/lib/python3.5/site-packages/chainer/functions/activation/relu.py", line 73, in backward
return ReLUGrad2(y).apply((gy,))
File "/home/****/.local/lib/python3.5/site-packages/chainer/function_node.py", line 258, in apply
outputs = self.forward(in_data)
File "/home/****/.local/lib/python3.5/site-packages/chainer/function_node.py", line 368, in forward
return self.forward_cpu(inputs)
File "/home/****/.local/lib/python3.5/site-packages/chainer/functions/activation/relu.py", line 97, in forward_cpu
y = (self.b > 0) * inputs[0]
File "cupy/core/core.pyx", line 1310, in cupy.core.core.ndarray.__mul__
File "cupy/core/elementwise.pxi", line 753, in cupy.core.core.ufunc.__call__
File "cupy/core/elementwise.pxi", line 68, in cupy.core.core._preprocess_args
TypeError: Unsupported type <class 'NoneType'>
When we run above code with device=-1, we have error like below.
Exception in main training loop: unsupported operand type(s) for *: 'bool' and 'NoneType'
Traceback (most recent call last):
File "/home/****/.local/lib/python3.5/site-packages/chainer/training/trainer.py", line 308, in run
update()
File "/home/****/.local/lib/python3.5/site-packages/chainer/training/updaters/standard_updater.py", line 149, in update
self.update_core()
File "/home/****/.local/lib/python3.5/site-packages/chainer/training/updaters/standard_updater.py", line 164, in update_core
optimizer.update(loss_func, in_arrays)
File "/home/****/.local/lib/python3.5/site-packages/chainer/optimizer.py", line 655, in update
loss.backward(loss_scale=self._loss_scale)
File "/home/****/.local/lib/python3.5/site-packages/chainer/variable.py", line 966, in backward
self._backward_main(retain_grad, loss_scale)
File "/home/****/.local/lib/python3.5/site-packages/chainer/variable.py", line 1095, in _backward_main
target_input_indexes, out_grad, in_grad)
File "/home/****/.local/lib/python3.5/site-packages/chainer/function_node.py", line 548, in backward_accumulate
gxs = self.backward(target_input_indexes, grad_outputs)
File "/home/****/.local/lib/python3.5/site-packages/chainer/functions/activation/relu.py", line 73, in backward
return ReLUGrad2(y).apply((gy,))
File "/home/****/.local/lib/python3.5/site-packages/chainer/function_node.py", line 258, in apply
outputs = self.forward(in_data)
File "/home/****/.local/lib/python3.5/site-packages/chainer/function_node.py", line 368, in forward
return self.forward_cpu(inputs)
File "/home/****/.local/lib/python3.5/site-packages/chainer/functions/activation/relu.py", line 97, in forward_cpu
y = (self.b > 0) * inputs[0]
Will finalize trainer extensions and updater before reraising the exception.
Traceback (most recent call last):
File "AC.py", line 70, in <module>
trainer.run()
File "/home/****/.local/lib/python3.5/site-packages/chainer/training/trainer.py", line 322, in run
six.reraise(*sys.exc_info())
File "/home/****/.local/lib/python3.5/site-packages/six.py", line 693, in reraise
raise value
File "/home/****/.local/lib/python3.5/site-packages/chainer/training/trainer.py", line 308, in run
update()
File "/home/****/.local/lib/python3.5/site-packages/chainer/training/updaters/standard_updater.py", line 149, in update
self.update_core()
File "/home/****/.local/lib/python3.5/site-packages/chainer/training/updaters/standard_updater.py", line 164, in update_core
optimizer.update(loss_func, in_arrays)
File "/home/****/.local/lib/python3.5/site-packages/chainer/optimizer.py", line 655, in update
loss.backward(loss_scale=self._loss_scale)
File "/home/****/.local/lib/python3.5/site-packages/chainer/variable.py", line 966, in backward
self._backward_main(retain_grad, loss_scale)
File "/home/****/.local/lib/python3.5/site-packages/chainer/variable.py", line 1095, in _backward_main
target_input_indexes, out_grad, in_grad)
File "/home/****/.local/lib/python3.5/site-packages/chainer/function_node.py", line 548, in backward_accumulate
gxs = self.backward(target_input_indexes, grad_outputs)
File "/home/****/.local/lib/python3.5/site-packages/chainer/functions/activation/relu.py", line 73, in backward
return ReLUGrad2(y).apply((gy,))
File "/home/****/.local/lib/python3.5/site-packages/chainer/function_node.py", line 258, in apply
outputs = self.forward(in_data)
File "/home/****/.local/lib/python3.5/site-packages/chainer/function_node.py", line 368, in forward
return self.forward_cpu(inputs)
File "/home/****/.local/lib/python3.5/site-packages/chainer/functions/activation/relu.py", line 97, in forward_cpu
y = (self.b > 0) * inputs[0]
TypeError: unsupported operand type(s) for *: 'bool' and 'NoneType'
I think the model need to return loss in __call__ method.
Sample modification is as follows:
class Autoencoder(Chain):
def __init__(self):
super().__init__()
with self.init_scope():
self.l1 = L.Linear(3,2)
self.l2 = L.Linear(2,3)
def forward(self,x):
h1 = self.l1(x)
h2 = self.l2(h1)
return h2
def __call__(self,x):
h = self.forward(x)
# Instead of h, __call__ should return loss.
loss = F.mean_squared_error(h, x)
return loss

Feeding data through an embedding wrapper in TensorFlow

I'm working on a text summarization network and need to implement an
encoder to use with tf.nn.seq2seq.embedding_attention_decoder. As part of that I need to encode varying batches of sequences into representing vectors but the innermost encoding doesn't go through.
Here's a simplified snippet giving the same error:
import tensorflow as tf
single_cell = tf.nn.rnn_cell.GRUCell(1024)
sentence_cell = tf.nn.rnn_cell.EmbeddingWrapper(single_cell,
embedding_classes = 40000)
batch = [tf.placeholder(tf.int32, [1,1]) for _ in range(250)]
(_ , state) = tf.nn.rnn(sentence_cell, batch, dtype= tf.int32)
This fails with the following stack trace:
Traceback (most recent call last):
File "/home/ubuntu/workspace/example.py", line 6, in <module>
(_ , state) = tf.nn.rnn(sentence_cell, batch, dtype= tf.int32)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/rnn.py", line 126, in rnn
(output, state) = call_cell()
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/rnn.py", line 119, in <lambda>
call_cell = lambda: cell(input_, state)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/rnn_cell.py", line 616, in __call__
return self._cell(embedded, state)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/rnn_cell.py", line 150, in __call__
2 * self._num_units, True, 1.0))
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/rnn_cell.py", line 706, in linear
res = math_ops.matmul(array_ops.concat(1, args), matrix)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/array_ops.py", line 314, in concat
name=name)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/gen_array_ops.py", line 70, in _concat
name=name)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/op_def_library.py", line 396, in apply_op
raise TypeError("%s that don't all match." % prefix)
TypeError: Tensors in list passed to 'values' of 'Concat' Op have types [float32, int32] that don't all match.
When debugging the input size of the sentence_cell is 1 and the elements in batch all have dimension [1,1] which is infact [batch_size, sentence_cell.input_size].
Switching to dtype = tf.float32 in the call to tf.nn.rnn() makes the snippet works but give me the following stack trace in my code:
[nltk_data] Downloading package punkt to /home/alex/nltk_data...
[nltk_data] Package punkt is already up-to-date!
Preparing news data in .
Creating 3 layers of 1024 units.
> /home/alex/Programmering/kandidatarbete/arbete/code/seq3seq/seq3seq_model.py(84)encode_sentence()
-> (_ ,state) = tf.nn.rnn(sentence_cell, sent, sequence_length = length, dtype= tf.float32)
(Pdb) c
Traceback (most recent call last):
File "translate.py", line 268, in <module>
tf.app.run()
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/platform/default/_app.py", line 30, in run
sys.exit(main(sys.argv))
File "translate.py", line 265, in main
train()
File "translate.py", line 161, in train
model = create_model(sess, False)
File "translate.py", line 136, in create_model
forward_only=forward_only)
File "/home/alex/Programmering/kandidatarbete/arbete/code/seq3seq/seq3seq_model.py", line 141, in __init__
softmax_loss_function=None)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/seq2seq.py", line 926, in model_with_buckets
decoder_inputs[:bucket[1]])
File "/home/alex/Programmering/kandidatarbete/arbete/code/seq3seq/seq3seq_model.py", line 140, in <lambda>
lambda x, y: seq3seq_f(x, y, False),
File "/home/alex/Programmering/kandidatarbete/arbete/code/seq3seq/seq3seq_model.py", line 98, in seq3seq_f
art_vecs = tfmap(encode_article, tf.pack(encoder_inputs))
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/control_flow_ops.py", line 1900, in map
_, r_a = While(lambda i, a: math_ops.less(i, n), compute, [i, acc_ta])
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/control_flow_ops.py", line 1557, in While
result = context.BuildLoop(cond, body, loop_vars)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/control_flow_ops.py", line 1474, in BuildLoop
body_result = body(*vars_for_body_with_tensor_arrays)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/control_flow_ops.py", line 1897, in compute
a = a.write(i, fn(elems_ta.read(i)))
File "/home/alex/Programmering/kandidatarbete/arbete/code/seq3seq/seq3seq_model.py", line 92, in encode_article
return tfmap(encode_sentence, article)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/control_flow_ops.py", line 1900, in map
_, r_a = While(lambda i, a: math_ops.less(i, n), compute, [i, acc_ta])
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/control_flow_ops.py", line 1557, in While
result = context.BuildLoop(cond, body, loop_vars)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/control_flow_ops.py", line 1474, in BuildLoop
body_result = body(*vars_for_body_with_tensor_arrays)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/control_flow_ops.py", line 1897, in compute
a = a.write(i, fn(elems_ta.read(i)))
File "/home/alex/Programmering/kandidatarbete/arbete/code/seq3seq/seq3seq_model.py", line 84, in encode_sentence
(_ ,state) = tf.nn.rnn(sentence_cell, sent, sequence_length = length, dtype= tf.float32)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/rnn.py", line 124, in rnn
zero_output, state, call_cell)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/rnn.py", line 212, in _rnn_step
time < max_sequence_length, call_cell, empty_update)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/control_flow_ops.py", line 1183, in cond
res_t = context_t.BuildCondBranch(fn1)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/control_flow_ops.py", line 1106, in BuildCondBranch
r = fn()
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/rnn.py", line 119, in <lambda>
call_cell = lambda: cell(input_, state)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/rnn_cell.py", line 615, in __call__
embedding, array_ops.reshape(inputs, [-1]))
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/embedding_ops.py", line 86, in embedding_lookup
validate_indices=validate_indices)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/gen_array_ops.py", line 423, in gather
validate_indices=validate_indices, name=name)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/op_def_library.py", line 486, in apply_op
_Attr(op_def, input_arg.type_attr))
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/op_def_library.py", line 59, in _SatisfiesTypeConstraint
", ".join(dtypes.as_dtype(x).name for x in allowed_list)))
TypeError: DataType float32 for attr 'Tindices' not in list of allowed values: int32, int64
What am I missing?

Categories