Theano: MissingInputError - python

this is a semplification of the original code that i'm trying to fix. I have invented a problem that reflects what the original code does and which gives me the same error but is written in a minimal form:
class Test(object):
def __init__(self):
self.rate=0.01
def start(self, x, y):
W_val=40.89
W=theano.shared(value=W_val, borrow=True)
z=T.mean(x*W/y)
gz=T.grad(z, W)
updates=[(W, W-self.rate*gz)]
fz=theano.function([], z, updates=updates)
for i in range(100):
out=fz()
self.out=out
return out
x_set=np.random.rand(10)
y_set=np.random.randint(low=0, high=5, size=10, dtype=int)
batch_size=2
x = T.dvector('x')
y = T.ivector('y')
index = T.lscalar()
test = Test()
cost=test.start(x,y)
train = theano.function(
inputs=[index],
outputs=cost,
givens={
x: x_set[index * batch_size: (index + 1) * batch_size],
y: y_set[index * batch_size: (index + 1) * batch_size]
}
)
for i in range(5):
result=train(i)
print(result)
This is the TraceBack:
Traceback (most recent call last):
File "functions.py", line 33, in <module>
cost=test.start(x,y)
File "functions.py", line 18, in start
fz=theano.function([], z, updates=updates)
File "C:\Program Files\Anaconda3\lib\site-packages\theano\compile\function.py", line 320, in function
output_keys=output_keys)
File "C:\Program Files\Anaconda3\lib\site-packages\theano\compile\pfunc.py", line 479, in pfunc
output_keys=output_keys)
File "C:\Program Files\Anaconda3\lib\site-packages\theano\compile\function_module.py", line 1776, in orig_function
output_keys=output_keys).create(
File "C:\Program Files\Anaconda3\lib\site-packages\theano\compile\function_module.py", line 1428, in __init__
accept_inplace)
File "C:\Program Files\Anaconda3\lib\site-packages\theano\compile\function_module.py", line 177, in std_fgraph
update_mapping=update_mapping)
File "C:\Program Files\Anaconda3\lib\site-packages\theano\gof\fg.py", line 171, in __init__
self.__import_r__(output, reason="init")
File "C:\Program Files\Anaconda3\lib\site-packages\theano\gof\fg.py", line 360, in __import_r__
self.__import__(variable.owner, reason=reason)
File "C:\Program Files\Anaconda3\lib\site-packages\theano\gof\fg.py", line 465, in __import__
detailed_err_msg)
theano.gof.fg.MissingInputError: A variable that is an input to the graph was neither provided as an input to the function nor given a value. A chain of variables leading from this input to an output is [x, Elemwise{mul,no_inplace}.0, Elemwise{true_div,no_inplace}.0, Sum{acc_dtype=float64}.0, mean]. This chain may not be unique
Backtrace when the variable is created:
File "functions.py", line 28, in <module>
x = T.dvector('x')
I do not know where to slam my head again...
Thanks

Related

Backtrader giving IndexError: array assignment index out of range

I am trying to run the following strategy:
def max_n(array, n):
return np.argpartition(array, -n)[-n:]
class CrossSectionalMR(bt.Strategy):
params = (
('num_positions', 100),
)
def __init__(self, temp):
self.inds = {}
for d in self.datas:
self.inds[d] = {}
self.inds[d]["pct"] = bt.indicators.PercentChange(d.close, period=1)
def prenext(self):
self.next()
def next(self):
available = list(filter(lambda d: len(d), self.datas)) # only look at data that existed yesterday
rets = np.zeros(len(available))
for i, d in enumerate(available):
rets[i] = self.inds[d]['pct'][0]
market_ret = np.mean(rets)
weights = -(rets - market_ret)
max_weights_index = max_n(np.abs(weights), self.params.num_positions)
max_weights = weights[max_weights_index]
weights = weights / np.sum(np.abs(max_weights))
for i, d in enumerate(available):
if i in max_weights_index:
self.order_target_percent(d, target=weights[i])
else:
self.order_target_percent(d, 0)
The full error is:
Traceback (most recent call last):
File "/home/poblivsig/Software/pycharm-2020.3.1/plugins/python/helpers/pydev/pydevd.py", line 1477, in _exec
pydev_imports.execfile(file, globals, locals) # execute the script
File "/home/poblivsig/Software/pycharm-2020.3.1/plugins/python/helpers/pydev/_pydev_imps/_pydev_execfile.py", line 18, in execfile
exec(compile(contents+"\n", file, 'exec'), glob, loc)
File "/home/poblivsig/Dropbox/meanrev/main.py", line 190, in <module>
dd, cagr, sharpe = backtest(datas, CrossSectionalMR, plot=True, num_positions=100)
File "/home/poblivsig/Dropbox/meanrev/main.py", line 181, in backtest
results = cerebro.run()
File "/home/poblivsig/Dropbox/meanrev/venv/lib/python3.8/site-packages/backtrader/cerebro.py", line 1127, in run
runstrat = self.runstrategies(iterstrat)
File "/home/poblivsig/Dropbox/meanrev/venv/lib/python3.8/site-packages/backtrader/cerebro.py", line 1293, in runstrategies
self._runonce(runstrats)
File "/home/poblivsig/Dropbox/meanrev/venv/lib/python3.8/site-packages/backtrader/cerebro.py", line 1652, in _runonce
strat._once()
File "/home/poblivsig/Dropbox/meanrev/venv/lib/python3.8/site-packages/backtrader/lineiterator.py", line 297, in _once
indicator._once()
File "/home/poblivsig/Dropbox/meanrev/venv/lib/python3.8/site-packages/backtrader/lineiterator.py", line 297, in _once
indicator._once()
File "/home/poblivsig/Dropbox/meanrev/venv/lib/python3.8/site-packages/backtrader/linebuffer.py", line 630, in _once
self.oncestart(self._minperiod - 1, self._minperiod)
File "/home/poblivsig/Dropbox/meanrev/venv/lib/python3.8/site-packages/backtrader/lineroot.py", line 165, in oncestart
self.once(start, end)
File "/home/poblivsig/Dropbox/meanrev/venv/lib/python3.8/site-packages/backtrader/linebuffer.py", line 672, in once
dst[i] = src[i + ago]
IndexError: array assignment index out of range
python-BaseExceptio
Any help would be greatly appreciated.
I grab the data from Yahoo and store it in csv files which are then loaded up and added to Cerebro. Sometimes, the code cannot get the full list of the SPY, but I don't think that is the problem here.

python Error: memoryError when apply ARIMA.fit with high order q=367 for MA

hi All stackoverflow Forum experts
i am using the software pyCharm2018.1.1
i have tried to build ARIMA model in python, my model has been identified by the parameters
(p=0, d=0, q=367), here is the code:
enter code here def arima_Model_Static_PlotErrorAC_PAC(series):
train, expctd =series , series
arima_orders = (0, 0, 367)
model = ARIMA(series, order=arima_orders)
results_MA = model.fit(disp=-1, start_params=[.1 for i in range(1 + arima_orders[2])])
yhatList=results_MA.fittedvalues
residuals = [expctd[i] - yhatList[i] for i in range(len(expctd))]
mse = mean_squared_error(expctd, yhatList)
rmse = sqrt(mse)
print(results_MA.summary())
print(rmse)
this model is called as follow:
enter code here series=DataSetDiff #DataSetDiff is a series with a length of 3652 values outputResidualError=arima_Model_Static_PlotErrorAC_PAC(series)
an error is loaded with this high q order which is:
Blockquote C:\109_personel\112_pyCharmArima\venv\Scripts\python.exe C:/109_personel/112_pyCharmArima/Presentation_Vers2_ModelOneFunct_3_5.py
Traceback (most recent call last):
File "C:/109_personel/112_pyCharmArima/Presentation_Vers2_ModelOneFunct_3_5.py", line 243, in arima_Model_Static_PlotErrorAC_PAC
results_MA = model.fit(disp=-1, start_params=[.1 for i in range(1 + arima_orders[2])], solver='bfgs')
File "C:\109_personel\112_pyCharmArima\venv\lib\site-packages\statsmodels\tsa\arima_model.py", line 959, in fit
callback=callback, **kwargs)
File "C:\109_personel\112_pyCharmArima\venv\lib\site-packages\statsmodels\base\model.py", line 466, in fit
full_output=full_output)
File "C:\109_personel\112_pyCharmArima\venv\lib\site-packages\statsmodels\base\optimizer.py", line 191, in _fit
hess=hessian)
File "C:\109_personel\112_pyCharmArima\venv\lib\site-packages\statsmodels\base\optimizer.py", line 327, in _fit_bfgs
disp=disp, retall=retall, callback=callback)
File "C:\109_personel\112_pyCharmArima\venv\lib\site-packages\scipy\optimize\optimize.py", line 916, in fmin_bfgs
res = _minimize_bfgs(f, x0, args, fprime, callback=callback, **opts)
File "C:\109_personel\112_pyCharmArima\venv\lib\site-packages\scipy\optimize\optimize.py", line 970, in _minimize_bfgs
gfk = myfprime(x0)
File "C:\109_personel\112_pyCharmArima\venv\lib\site-packages\scipy\optimize\optimize.py", line 300, in function_wrapper
return function(*(wrapper_args + args))
File "C:\109_personel\112_pyCharmArima\venv\lib\site-packages\statsmodels\base\model.py", line 451, in score
return -self.score(params, *args) / nobs
File "C:\109_personel\112_pyCharmArima\venv\lib\site-packages\statsmodels\tsa\arima_model.py", line 583, in score
return approx_fprime_cs(params, self.loglike, args=(False,))
File "C:\109_personel\112_pyCharmArima\venv\lib\site-packages\statsmodels\tools\numdiff.py", line 202, in approx_fprime_cs
for i, ih in enumerate(increments)]
File "C:\109_personel\112_pyCharmArima\venv\lib\site-packages\statsmodels\tools\numdiff.py", line 202, in
for i, ih in enumerate(increments)]
File "C:\109_personel\112_pyCharmArima\venv\lib\site-packages\statsmodels\tsa\arima_model.py", line 780, in loglike
return self.loglike_kalman(params, set_sigma2)
File "C:\109_personel\112_pyCharmArima\venv\lib\site-packages\statsmodels\tsa\arima_model.py", line 790, in loglike_kalman
return KalmanFilter.loglike(params, self, set_sigma2)
File "C:\109_personel\112_pyCharmArima\venv\lib\site-packages\statsmodels\tsa\kalmanf\kalmanfilter.py", line 654, in loglike
R_mat, T_mat)
File "kalman_loglike.pyx", line 359, in statsmodels.tsa.kalmanf.kalman_loglike.kalman_loglike_complex
File "kalman_loglike.pyx", line 228, in statsmodels.tsa.kalmanf.kalman_loglike.kalman_filter_complex
File "C:\109_personel\112_pyCharmArima\venv\lib\site-packages\numpy\core\numeric.py", line 2200, in identity
return eye(n, dtype=dtype)
File "C:\109_personel\112_pyCharmArima\venv\lib\site-packages\numpy\lib\twodim_base.py", line 186, in eye
m = zeros((N, M), dtype=dtype, order=order)
MemoryError
Process finished with exit code 1
my model is well running and forecasting until q order MA 150 that mean(0,0,150). the error memoryError raised on the selection of q=367 as order
is any one can help me to solve this error, i have googled this error many time and i did not found a suitable solutions
Thank you for any help.

Autoencoder in Chainer issue

I am trying to train Autoencoder by Chainer in python and wrote below code. But it does not work. Why??
class Autoencoder(Chain):
def __init__(self):
super().__init__()
with self.init_scope():
self.l1 = L.Linear(3,2)
self.l2 = L.Linear(2,3)
def __call__(self,x):
h1 = self.l1(x)
h2 = self.l2(h1)
return h2
class Dataset(dataset.DatasetMixin):
def __init__(self,number_of_data, show_initial = False):
noise_level = 1
self.data = np.zeros((number_of_data,3),dtype = np.float32)
OA_vector = np.array([3,2,1])
OB_vector = np.array([2,-1,1])
t = np.random.uniform(-0.5,0.5,number_of_data)
s = np.random.uniform(-0.5,0.5,number_of_data)
for i in range(0,number_of_data):
noise = np.random.uniform(-noise_level, noise_level,3)
self.data[i] = t[i]*OA_vector + s[i]*OB_vector + noise
def __len__(self):
return self.data.shape[0]
def get_example(self,idx):
return self.data[idx]
if __name__ == "__main__":
n_epoch = 5
batch_size = 100
number_of_data = 1000 #データ数
train_data = Dataset(number_of_data,False)
model = Autoencoder()
optimizer = optimizers.SGD(lr=0.05).setup(model)
train_iter = iterators.SerialIterator(train_data,batch_size)
updater = training.StandardUpdater(train_iter,optimizer,device=0)
trainer = training.Trainer(updater,(n_epoch,"epoch"),out="result")
trainer.run()
I am using Chainer. And the Dataset makes 3 dimensitonal vectors. The number of the vectors is "number_of_data".
Should I do that without using trainer?
I don't understand where problem is.
EDIT
When we run above code with device=0, we have error like below.
Exception in main training loop: Unsupported type <class 'NoneType'>
Traceback (most recent call last):
File "/home/****/.local/lib/python3.5/site-packages/chainer/training/trainer.py", line 308, in run
update()
File "/home/****/.local/lib/python3.5/site-packages/chainer/training/updaters/standard_updater.py", line 149, in update
self.update_core()
File "/home/****/.local/lib/python3.5/site-packages/chainer/training/updaters/standard_updater.py", line 164, in update_core
optimizer.update(loss_func, in_arrays)
File "/home/****/.local/lib/python3.5/site-packages/chainer/optimizer.py", line 655, in update
loss.backward(loss_scale=self._loss_scale)
File "/home/****/.local/lib/python3.5/site-packages/chainer/variable.py", line 966, in backward
self._backward_main(retain_grad, loss_scale)
File "/home/****/.local/lib/python3.5/site-packages/chainer/variable.py", line 1095, in _backward_main
target_input_indexes, out_grad, in_grad)
File "/home/****/.local/lib/python3.5/site-packages/chainer/function_node.py", line 548, in backward_accumulate
gxs = self.backward(target_input_indexes, grad_outputs)
File "/home/****/.local/lib/python3.5/site-packages/chainer/functions/activation/relu.py", line 73, in backward
return ReLUGrad2(y).apply((gy,))
File "/home/****/.local/lib/python3.5/site-packages/chainer/function_node.py", line 258, in apply
outputs = self.forward(in_data)
File "/home/****/.local/lib/python3.5/site-packages/chainer/function_node.py", line 368, in forward
return self.forward_cpu(inputs)
File "/home/****/.local/lib/python3.5/site-packages/chainer/functions/activation/relu.py", line 97, in forward_cpu
y = (self.b > 0) * inputs[0]
File "cupy/core/core.pyx", line 1310, in cupy.core.core.ndarray.__mul__
File "cupy/core/elementwise.pxi", line 753, in cupy.core.core.ufunc.__call__
File "cupy/core/elementwise.pxi", line 68, in cupy.core.core._preprocess_args
Will finalize trainer extensions and updater before reraising the exception.
Traceback (most recent call last):
File "AC.py", line 70, in <module>
trainer.run()
File "/home/****/.local/lib/python3.5/site-packages/chainer/training/trainer.py", line 322, in run
six.reraise(*sys.exc_info())
File "/home/****/.local/lib/python3.5/site-packages/six.py", line 693, in reraise
raise value
File "/home/****/.local/lib/python3.5/site-packages/chainer/training/trainer.py", line 308, in run
update()
File "/home/****/.local/lib/python3.5/site-packages/chainer/training/updaters/standard_updater.py", line 149, in update
self.update_core()
File "/home/****/.local/lib/python3.5/site-packages/chainer/training/updaters/standard_updater.py", line 164, in update_core
optimizer.update(loss_func, in_arrays)
File "/home/****/.local/lib/python3.5/site-packages/chainer/optimizer.py", line 655, in update
loss.backward(loss_scale=self._loss_scale)
File "/home/****/.local/lib/python3.5/site-packages/chainer/variable.py", line 966, in backward
self._backward_main(retain_grad, loss_scale)
File "/home/****/.local/lib/python3.5/site-packages/chainer/variable.py", line 1095, in _backward_main
target_input_indexes, out_grad, in_grad)
File "/home/****/.local/lib/python3.5/site-packages/chainer/function_node.py", line 548, in backward_accumulate
gxs = self.backward(target_input_indexes, grad_outputs)
File "/home/****/.local/lib/python3.5/site-packages/chainer/functions/activation/relu.py", line 73, in backward
return ReLUGrad2(y).apply((gy,))
File "/home/****/.local/lib/python3.5/site-packages/chainer/function_node.py", line 258, in apply
outputs = self.forward(in_data)
File "/home/****/.local/lib/python3.5/site-packages/chainer/function_node.py", line 368, in forward
return self.forward_cpu(inputs)
File "/home/****/.local/lib/python3.5/site-packages/chainer/functions/activation/relu.py", line 97, in forward_cpu
y = (self.b > 0) * inputs[0]
File "cupy/core/core.pyx", line 1310, in cupy.core.core.ndarray.__mul__
File "cupy/core/elementwise.pxi", line 753, in cupy.core.core.ufunc.__call__
File "cupy/core/elementwise.pxi", line 68, in cupy.core.core._preprocess_args
TypeError: Unsupported type <class 'NoneType'>
When we run above code with device=-1, we have error like below.
Exception in main training loop: unsupported operand type(s) for *: 'bool' and 'NoneType'
Traceback (most recent call last):
File "/home/****/.local/lib/python3.5/site-packages/chainer/training/trainer.py", line 308, in run
update()
File "/home/****/.local/lib/python3.5/site-packages/chainer/training/updaters/standard_updater.py", line 149, in update
self.update_core()
File "/home/****/.local/lib/python3.5/site-packages/chainer/training/updaters/standard_updater.py", line 164, in update_core
optimizer.update(loss_func, in_arrays)
File "/home/****/.local/lib/python3.5/site-packages/chainer/optimizer.py", line 655, in update
loss.backward(loss_scale=self._loss_scale)
File "/home/****/.local/lib/python3.5/site-packages/chainer/variable.py", line 966, in backward
self._backward_main(retain_grad, loss_scale)
File "/home/****/.local/lib/python3.5/site-packages/chainer/variable.py", line 1095, in _backward_main
target_input_indexes, out_grad, in_grad)
File "/home/****/.local/lib/python3.5/site-packages/chainer/function_node.py", line 548, in backward_accumulate
gxs = self.backward(target_input_indexes, grad_outputs)
File "/home/****/.local/lib/python3.5/site-packages/chainer/functions/activation/relu.py", line 73, in backward
return ReLUGrad2(y).apply((gy,))
File "/home/****/.local/lib/python3.5/site-packages/chainer/function_node.py", line 258, in apply
outputs = self.forward(in_data)
File "/home/****/.local/lib/python3.5/site-packages/chainer/function_node.py", line 368, in forward
return self.forward_cpu(inputs)
File "/home/****/.local/lib/python3.5/site-packages/chainer/functions/activation/relu.py", line 97, in forward_cpu
y = (self.b > 0) * inputs[0]
Will finalize trainer extensions and updater before reraising the exception.
Traceback (most recent call last):
File "AC.py", line 70, in <module>
trainer.run()
File "/home/****/.local/lib/python3.5/site-packages/chainer/training/trainer.py", line 322, in run
six.reraise(*sys.exc_info())
File "/home/****/.local/lib/python3.5/site-packages/six.py", line 693, in reraise
raise value
File "/home/****/.local/lib/python3.5/site-packages/chainer/training/trainer.py", line 308, in run
update()
File "/home/****/.local/lib/python3.5/site-packages/chainer/training/updaters/standard_updater.py", line 149, in update
self.update_core()
File "/home/****/.local/lib/python3.5/site-packages/chainer/training/updaters/standard_updater.py", line 164, in update_core
optimizer.update(loss_func, in_arrays)
File "/home/****/.local/lib/python3.5/site-packages/chainer/optimizer.py", line 655, in update
loss.backward(loss_scale=self._loss_scale)
File "/home/****/.local/lib/python3.5/site-packages/chainer/variable.py", line 966, in backward
self._backward_main(retain_grad, loss_scale)
File "/home/****/.local/lib/python3.5/site-packages/chainer/variable.py", line 1095, in _backward_main
target_input_indexes, out_grad, in_grad)
File "/home/****/.local/lib/python3.5/site-packages/chainer/function_node.py", line 548, in backward_accumulate
gxs = self.backward(target_input_indexes, grad_outputs)
File "/home/****/.local/lib/python3.5/site-packages/chainer/functions/activation/relu.py", line 73, in backward
return ReLUGrad2(y).apply((gy,))
File "/home/****/.local/lib/python3.5/site-packages/chainer/function_node.py", line 258, in apply
outputs = self.forward(in_data)
File "/home/****/.local/lib/python3.5/site-packages/chainer/function_node.py", line 368, in forward
return self.forward_cpu(inputs)
File "/home/****/.local/lib/python3.5/site-packages/chainer/functions/activation/relu.py", line 97, in forward_cpu
y = (self.b > 0) * inputs[0]
TypeError: unsupported operand type(s) for *: 'bool' and 'NoneType'
I think the model need to return loss in __call__ method.
Sample modification is as follows:
class Autoencoder(Chain):
def __init__(self):
super().__init__()
with self.init_scope():
self.l1 = L.Linear(3,2)
self.l2 = L.Linear(2,3)
def forward(self,x):
h1 = self.l1(x)
h2 = self.l2(h1)
return h2
def __call__(self,x):
h = self.forward(x)
# Instead of h, __call__ should return loss.
loss = F.mean_squared_error(h, x)
return loss

Feeding data through an embedding wrapper in TensorFlow

I'm working on a text summarization network and need to implement an
encoder to use with tf.nn.seq2seq.embedding_attention_decoder. As part of that I need to encode varying batches of sequences into representing vectors but the innermost encoding doesn't go through.
Here's a simplified snippet giving the same error:
import tensorflow as tf
single_cell = tf.nn.rnn_cell.GRUCell(1024)
sentence_cell = tf.nn.rnn_cell.EmbeddingWrapper(single_cell,
embedding_classes = 40000)
batch = [tf.placeholder(tf.int32, [1,1]) for _ in range(250)]
(_ , state) = tf.nn.rnn(sentence_cell, batch, dtype= tf.int32)
This fails with the following stack trace:
Traceback (most recent call last):
File "/home/ubuntu/workspace/example.py", line 6, in <module>
(_ , state) = tf.nn.rnn(sentence_cell, batch, dtype= tf.int32)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/rnn.py", line 126, in rnn
(output, state) = call_cell()
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/rnn.py", line 119, in <lambda>
call_cell = lambda: cell(input_, state)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/rnn_cell.py", line 616, in __call__
return self._cell(embedded, state)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/rnn_cell.py", line 150, in __call__
2 * self._num_units, True, 1.0))
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/rnn_cell.py", line 706, in linear
res = math_ops.matmul(array_ops.concat(1, args), matrix)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/array_ops.py", line 314, in concat
name=name)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/gen_array_ops.py", line 70, in _concat
name=name)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/op_def_library.py", line 396, in apply_op
raise TypeError("%s that don't all match." % prefix)
TypeError: Tensors in list passed to 'values' of 'Concat' Op have types [float32, int32] that don't all match.
When debugging the input size of the sentence_cell is 1 and the elements in batch all have dimension [1,1] which is infact [batch_size, sentence_cell.input_size].
Switching to dtype = tf.float32 in the call to tf.nn.rnn() makes the snippet works but give me the following stack trace in my code:
[nltk_data] Downloading package punkt to /home/alex/nltk_data...
[nltk_data] Package punkt is already up-to-date!
Preparing news data in .
Creating 3 layers of 1024 units.
> /home/alex/Programmering/kandidatarbete/arbete/code/seq3seq/seq3seq_model.py(84)encode_sentence()
-> (_ ,state) = tf.nn.rnn(sentence_cell, sent, sequence_length = length, dtype= tf.float32)
(Pdb) c
Traceback (most recent call last):
File "translate.py", line 268, in <module>
tf.app.run()
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/platform/default/_app.py", line 30, in run
sys.exit(main(sys.argv))
File "translate.py", line 265, in main
train()
File "translate.py", line 161, in train
model = create_model(sess, False)
File "translate.py", line 136, in create_model
forward_only=forward_only)
File "/home/alex/Programmering/kandidatarbete/arbete/code/seq3seq/seq3seq_model.py", line 141, in __init__
softmax_loss_function=None)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/seq2seq.py", line 926, in model_with_buckets
decoder_inputs[:bucket[1]])
File "/home/alex/Programmering/kandidatarbete/arbete/code/seq3seq/seq3seq_model.py", line 140, in <lambda>
lambda x, y: seq3seq_f(x, y, False),
File "/home/alex/Programmering/kandidatarbete/arbete/code/seq3seq/seq3seq_model.py", line 98, in seq3seq_f
art_vecs = tfmap(encode_article, tf.pack(encoder_inputs))
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/control_flow_ops.py", line 1900, in map
_, r_a = While(lambda i, a: math_ops.less(i, n), compute, [i, acc_ta])
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/control_flow_ops.py", line 1557, in While
result = context.BuildLoop(cond, body, loop_vars)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/control_flow_ops.py", line 1474, in BuildLoop
body_result = body(*vars_for_body_with_tensor_arrays)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/control_flow_ops.py", line 1897, in compute
a = a.write(i, fn(elems_ta.read(i)))
File "/home/alex/Programmering/kandidatarbete/arbete/code/seq3seq/seq3seq_model.py", line 92, in encode_article
return tfmap(encode_sentence, article)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/control_flow_ops.py", line 1900, in map
_, r_a = While(lambda i, a: math_ops.less(i, n), compute, [i, acc_ta])
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/control_flow_ops.py", line 1557, in While
result = context.BuildLoop(cond, body, loop_vars)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/control_flow_ops.py", line 1474, in BuildLoop
body_result = body(*vars_for_body_with_tensor_arrays)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/control_flow_ops.py", line 1897, in compute
a = a.write(i, fn(elems_ta.read(i)))
File "/home/alex/Programmering/kandidatarbete/arbete/code/seq3seq/seq3seq_model.py", line 84, in encode_sentence
(_ ,state) = tf.nn.rnn(sentence_cell, sent, sequence_length = length, dtype= tf.float32)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/rnn.py", line 124, in rnn
zero_output, state, call_cell)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/rnn.py", line 212, in _rnn_step
time < max_sequence_length, call_cell, empty_update)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/control_flow_ops.py", line 1183, in cond
res_t = context_t.BuildCondBranch(fn1)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/control_flow_ops.py", line 1106, in BuildCondBranch
r = fn()
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/rnn.py", line 119, in <lambda>
call_cell = lambda: cell(input_, state)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/rnn_cell.py", line 615, in __call__
embedding, array_ops.reshape(inputs, [-1]))
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/embedding_ops.py", line 86, in embedding_lookup
validate_indices=validate_indices)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/gen_array_ops.py", line 423, in gather
validate_indices=validate_indices, name=name)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/op_def_library.py", line 486, in apply_op
_Attr(op_def, input_arg.type_attr))
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/op_def_library.py", line 59, in _SatisfiesTypeConstraint
", ".join(dtypes.as_dtype(x).name for x in allowed_list)))
TypeError: DataType float32 for attr 'Tindices' not in list of allowed values: int32, int64
What am I missing?

ipython parallel load balanced view failing randomly

Here's my code:
from IPython.parallel import Client
from sklearn.datasets import load_digits
def mytask(data, labels, id):
# ...
pass
engines = Client()
bview = engines.load_balanced_view()
bview.block = False
digits = load_digits()
X, y = digits.data, digits.target
job = bview.apply(mytask, X, y, 1)
while not job.ready(): # line 242
time.sleep(2)
print job.result
Occasionally with the same input my code fails with this:
Traceback (most recent call last):
File "task.py", line 242, in <module>
while not job.ready():
File "/usr/lib/python2.7/dist-packages/IPython/parallel/client/asyncresult.py", line 111, in ready
self.wait(0)
File "/usr/lib/python2.7/dist-packages/IPython/parallel/client/asyncresult.py", line 121, in wait
self._ready = self._client.wait(self.msg_ids, timeout)
File "/usr/lib/python2.7/dist-packages/IPython/parallel/client/client.py", line 844, in wait
self.spin()
File "/usr/lib/python2.7/dist-packages/IPython/parallel/client/client.py", line 799, in spin
self._flush_results(self._task_socket)
File "/usr/lib/python2.7/dist-packages/IPython/parallel/client/client.py", line 692, in _flush_results
handler(msg)
File "/usr/lib/python2.7/dist-packages/IPython/parallel/client/client.py", line 657, in _handle_apply_reply
self.results[msg_id] = util.unserialize_object(msg['buffers'])[0]
File "/usr/lib/python2.7/dist-packages/IPython/parallel/util.py", line 262, in unserialize_object
return uncanSequence(map(unserialize, sobj)), bufs
File "/usr/lib/python2.7/dist-packages/IPython/utils/newserialized.py", line 177, in unserialize
return UnSerializeIt(serialized).getObject()
File "/usr/lib/python2.7/dist-packages/IPython/utils/newserialized.py", line 161, in getObject
result = numpy.frombuffer(buf, dtype = self.serialized.metadata['dtype'])
ValueError: offset must be non-negative and smaller than buffer lenth (0)
This seems to be unconnected to my code. I'm not sure what's going wrong.

Categories