How to initialize a kernel with a tensor - python

I have created a custom layer in keras, which simply perform a dot product between the input and a kernel. But for the kernel I wanted to use the mean of the batch as a kernel initialization, meaning taking the mean of the batch and producing a kernel which initial value is that mean. To do so I have created a custom kernel initializer as follow:
class Tensor_Init(Initializer):
"""Initializer that generates tensors initialized to a given tensor.
# Arguments
Tensor: the generator tensors.
"""
def __init__(self, Tensor=None):
self.Tensor = Tensor
def __call__(self, shape, dtype=None):
return tf.Variable(self.Tensor)
def get_config(self):
return {'Tensor': self.Tensor}
This is the call method of the custom layer in keras. I simply compute the mean of the batch and use it with the above initializer class to produce a kernel. I use it as follow in the custom layer
def call(self, inputs):
data_format = conv_utils.convert_data_format(self.data_format, self.rank + 2)
inputs = tf.extract_image_patches(
inputs,
ksizes=(1,) + self.kernel_size + (1,),
strides=(1,) + self.strides + (1,),
rates=(1,) + self.dilation_rate + (1,),
padding=self.padding.upper(),
)
inputs = K.reshape(inputs,[-1,inputs.get_shape().as_list()[1],inputs.get_shape().as_list()
[2],self.kernel_size[0]*self.kernel_size[1] ,self.output_dim])
self.kernel = self.add_weight(name='kernel',shape=(),initializer=Tensor_Init(Tensor=tf.reduce_mean(inputs, 0)),trainable=True)
outputs = (tf.einsum('NHWKC,HWKC->NHWC',inputs,self.kernel)+self.c)**self.p
if self.data_format == 'channels_first':
outputs = K.permute_dimensions(outputs, (0, 3, 1, 2))
return outputs
Th e model is created and compiled normaly but I start training I am getting this error
InvalidArgumentError: You must feed a value for placeholder tensor 'conv2d_1_input' with dtype float and shape [?,48,48,3]
[[node conv2d_1_input (defined at C:\ProgramData\Anaconda3\envs\tensorflow\lib\site-packages\keras\backend\tensorflow_backend.py:736) ]]
Original stack trace for 'conv2d_1_input':
File "C:\ProgramData\Anaconda3\envs\tensorflow\lib\runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "C:\ProgramData\Anaconda3\envs\tensorflow\lib\runpy.py", line 85, in _run_code
exec(code, run_globals)
File "C:\ProgramData\Anaconda3\envs\tensorflow\lib\site-packages\ipykernel_launcher.py", line 16, in <module>
app.launch_new_instance()
File "C:\ProgramData\Anaconda3\envs\tensorflow\lib\site-packages\traitlets\config\application.py", line 658, in launch_instance
app.start()
File "C:\ProgramData\Anaconda3\envs\tensorflow\lib\site-packages\ipykernel\kernelapp.py", line 563, in start
self.io_loop.start()
File "C:\ProgramData\Anaconda3\envs\tensorflow\lib\site-packages\tornado\platform\asyncio.py", line 148, in start
self.asyncio_loop.run_forever()
File "C:\ProgramData\Anaconda3\envs\tensorflow\lib\asyncio\base_events.py", line 438, in run_forever
self._run_once()
File "C:\ProgramData\Anaconda3\envs\tensorflow\lib\asyncio\base_events.py", line 1451, in _run_once
handle._run()
File "C:\ProgramData\Anaconda3\envs\tensorflow\lib\asyncio\events.py", line 145, in _run
self._callback(*self._args)
File "C:\ProgramData\Anaconda3\envs\tensorflow\lib\site-packages\tornado\ioloop.py", line 690, in <lambda>
lambda f: self._run_callback(functools.partial(callback, future))
File "C:\ProgramData\Anaconda3\envs\tensorflow\lib\site-packages\tornado\ioloop.py", line 743, in _run_callback
ret = callback()
File "C:\ProgramData\Anaconda3\envs\tensorflow\lib\site-packages\tornado\gen.py", line 787, in inner
self.run()
File "C:\ProgramData\Anaconda3\envs\tensorflow\lib\site-packages\tornado\gen.py", line 748, in run
yielded = self.gen.send(value)
File "C:\ProgramData\Anaconda3\envs\tensorflow\lib\site-packages\ipykernel\kernelbase.py", line 378, in dispatch_queue
yield self.process_one()
File "C:\ProgramData\Anaconda3\envs\tensorflow\lib\site-packages\tornado\gen.py", line 225, in wrapper
runner = Runner(result, future, yielded)
File "C:\ProgramData\Anaconda3\envs\tensorflow\lib\site-packages\tornado\gen.py", line 714, in __init__
self.run()
File "C:\ProgramData\Anaconda3\envs\tensorflow\lib\site-packages\tornado\gen.py", line 748, in run
yielded = self.gen.send(value)
File "C:\ProgramData\Anaconda3\envs\tensorflow\lib\site-packages\ipykernel\kernelbase.py", line 365, in process_one
yield gen.maybe_future(dispatch(*args))
File "C:\ProgramData\Anaconda3\envs\tensorflow\lib\site-packages\tornado\gen.py", line 209, in wrapper
yielded = next(result)
File "C:\ProgramData\Anaconda3\envs\tensorflow\lib\site-packages\ipykernel\kernelbase.py", line 272, in dispatch_shell
yield gen.maybe_future(handler(stream, idents, msg))
File "C:\ProgramData\Anaconda3\envs\tensorflow\lib\site-packages\tornado\gen.py", line 209, in wrapper
yielded = next(result)
File "C:\ProgramData\Anaconda3\envs\tensorflow\lib\site-packages\ipykernel\kernelbase.py", line 542, in execute_request
user_expressions, allow_stdin,
File "C:\ProgramData\Anaconda3\envs\tensorflow\lib\site-packages\tornado\gen.py", line 209, in wrapper
yielded = next(result)
File "C:\ProgramData\Anaconda3\envs\tensorflow\lib\site-packages\ipykernel\ipkernel.py", line 294, in do_execute
res = shell.run_cell(code, store_history=store_history, silent=silent)
File "C:\ProgramData\Anaconda3\envs\tensorflow\lib\site-packages\ipykernel\zmqshell.py", line 536, in run_cell
return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)
File "C:\ProgramData\Anaconda3\envs\tensorflow\lib\site-packages\IPython\core\interactiveshell.py", line 2855, in run_cell
raw_cell, store_history, silent, shell_futures)
File "C:\ProgramData\Anaconda3\envs\tensorflow\lib\site-packages\IPython\core\interactiveshell.py", line 2881, in _run_cell
return runner(coro)
File "C:\ProgramData\Anaconda3\envs\tensorflow\lib\site-packages\IPython\core\async_helpers.py", line 68, in _pseudo_sync_runner
coro.send(None)
File "C:\ProgramData\Anaconda3\envs\tensorflow\lib\site-packages\IPython\core\interactiveshell.py", line 3058, in run_cell_async
interactivity=interactivity, compiler=compiler, result=result)
File "C:\ProgramData\Anaconda3\envs\tensorflow\lib\site-packages\IPython\core\interactiveshell.py", line 3249, in run_ast_nodes
if (await self.run_code(code, result, async_=asy)):
File "C:\ProgramData\Anaconda3\envs\tensorflow\lib\site-packages\IPython\core\interactiveshell.py", line 3326, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-2-35eda01d200a>", line 75, in <module>
model = create_vgg16()
File "<ipython-input-2-35eda01d200a>", line 12, in create_vgg16
model.add(Conv2D(64, (5, 5), input_shape=(48,48,3), padding='same'))
File "C:\ProgramData\Anaconda3\envs\tensorflow\lib\site-packages\keras\engine\sequential.py", line 162, in add
name=layer.name + '_input')
File "C:\ProgramData\Anaconda3\envs\tensorflow\lib\site-packages\keras\engine\input_layer.py", line 178, in Input
input_tensor=tensor)
File "C:\ProgramData\Anaconda3\envs\tensorflow\lib\site-packages\keras\legacy\interfaces.py", line 91, in wrapper
return func(*args, **kwargs)
File "C:\ProgramData\Anaconda3\envs\tensorflow\lib\site-packages\keras\engine\input_layer.py", line 87, in __init__
name=self.name)
File "C:\ProgramData\Anaconda3\envs\tensorflow\lib\site-packages\keras\backend\tensorflow_backend.py", line 736, in placeholder
shape=shape, ndim=ndim, dtype=dtype, sparse=sparse, name=name)
File "C:\ProgramData\Anaconda3\envs\tensorflow\lib\site-packages\tensorflow\python\keras\backend.py", line 998, in placeholder
x = array_ops.placeholder(dtype, shape=shape, name=name)
File "C:\ProgramData\Anaconda3\envs\tensorflow\lib\site-packages\tensorflow\python\ops\array_ops.py", line 2143, in placeholder
return gen_array_ops.placeholder(dtype=dtype, shape=shape, name=name)
File "C:\ProgramData\Anaconda3\envs\tensorflow\lib\site-packages\tensorflow\python\ops\gen_array_ops.py", line 7401, in placeholder
"Placeholder", dtype=dtype, shape=shape, name=name)
File "C:\ProgramData\Anaconda3\envs\tensorflow\lib\site-packages\tensorflow\python\framework\op_def_library.py", line 788, in _apply_op_helper
op_def=op_def)
File "C:\ProgramData\Anaconda3\envs\tensorflow\lib\site-packages\tensorflow\python\util\deprecation.py", line 507, in new_func
return func(*args, **kwargs)
File "C:\ProgramData\Anaconda3\envs\tensorflow\lib\site-packages\tensorflow\python\framework\ops.py", line 3616, in create_op
op_def=op_def)
File "C:\ProgramData\Anaconda3\envs\tensorflow\lib\site-packages\tensorflow\python\framework\ops.py", line 2005, in __init__
self._traceback = tf_stack.extract_stack()

I was able to pass the mean of the batch to the kernel by simply creating a zero initialized kernel then assigning the mean value to it, without even creating a custom initilizer. I modified the custom layer as follow
def call(self, inputs):
data_format = conv_utils.convert_data_format(self.data_format, self.rank + 2)
inputs = tf.extract_image_patches(
inputs,
ksizes=(1,) + self.kernel_size + (1,),
strides=(1,) + self.strides + (1,),
rates=(1,) + self.dilation_rate + (1,),
padding=self.padding.upper(),
)
inputs = K.reshape(inputs,[-1,inputs.get_shape().as_list()[1],inputs.get_shape().as_list()
[2],self.kernel_size[0]*self.kernel_size[1] ,self.output_dim])
weights = tf.reduce_mean(inputs, 0)
self.kernel = self.add_weight(name='kernel',
shape=(weights.get_shape().as_list()[0],weights.get_shape().as_list()
[1],weights.get_shape().as_list()[2],weights.get_shape().as_list()[3]),
initializer='zeros',
trainable=True)
tf.compat.v1.assign(self.kernel, weights)
outputs = (tf.einsum('NHWKC,HWKC->NHWC',inputs,self.kernel)+self.c)**self.p
if self.data_format == 'channels_first':
outputs = K.permute_dimensions(outputs, (0, 3, 1, 2))
return outputs

Related

Create customize Loss function for my deep learning model

I am currently working on a deep learning project in the signal processing domain. The currently used loss function is the mean square error (MSE). However, it turns out that this loss function does not directly reflect my evaluation metric. Thus, I decide to use the alternative loss function, named Zero mean Normalization Cross-Correlation (ZNCC).
Note: I use TensorFlow 2.0 for this project
Here is my defined loss function.
def ZNCC(true, predict):
true = true.numpy()
predict = predict.numpy()
x_bar = np.average(true)
y_bar = np.average(predict)
u = true - x_bar
v = predict - y_bar
top = np.dot(u,v)
bottom = np.linalg.norm(u) * np.linalg.norm(v)
zncc = top/bottom
if zncc<0:
zncc = 0
return tf.convert_to_tensor(1-zncc)
Here is the compilation statement
model = MyModel()
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.002,beta_1=0.99,beta_2=0.989,epsilon=1e-07), loss=ZNCC, run_eagerly=True)
And here is my model.fit() statement
history = model.fit(training_generator,
validation_data=validation_generator,
epochs=150,
callbacks=[tensorboard_callback, model_checkpoint_callback])
After I run the above cell, I run into this error.
Epoch 1/150
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-142-c047e0d40ac5> in <module>
3 validation_data=validation_generator,
4 epochs=150,
----> 5 callbacks=[tensorboard_callback, model_checkpoint_callback])
/tmp/__autograph_generated_file63laq54x.py in tf__ZNCC(true, predict)
14 u = (ag__.ld(true) - ag__.ld(x_bar))
15 v = (ag__.ld(predict) - ag__.ld(y_bar))
---> 16 top = ag__.converted_call(ag__.ld(np).dot, (ag__.ld(u), ag__.ld(v)), None, fscope)
17 bottom = (ag__.converted_call(ag__.ld(np).linalg.norm, (ag__.ld(u),), None, fscope) * ag__.converted_call(ag__.ld(np).linalg.norm, (ag__.ld(v),), None, fscope))
18 zncc = (ag__.ld(top) / ag__.ld(bottom))
<__array_function__ internals> in dot(*args, **kwargs)
ValueError: in user code:
File "<ipython-input-136-e4e00382e336>", line 9, in ZNCC *
top = np.dot(u,v)
File "<__array_function__ internals>", line 6, in dot **
ValueError: shapes (128,3490) and (128,3490) not aligned: 3490 (dim 1) != 128 (dim 0)
Could anyone provide the reason for this error and the solution to fix it?
Much appreciated.
EDIT
I have tried to reimplement the loss function like so:
def ZNCC(true, predict):
x_bar = tf.math.reduce_mean(true)
y_bar = tf.math.reduce_mean(predict)
u = true - x_bar
v = predict - y_bar
print(x_bar)
print(y_bar)
print(u)
print(v)
u = tf.dtypes.cast(u, tf.float64)
v = tf.dtypes.cast(v, tf.float64)
top = tf.tensordot(u,v,1)
bottom = tf.norm(u) * tf.norm(v)
zncc = top/bottom
print(top)
print(bottom)
print(zncc)
zncc = tf.cond(tf.less(zncc,0),lambda: tf.dtypes.cast(0, tf.float64), lambda: zncc)
return 1-zncc
Then I have also try to evaluate the function like so:
x = tf.constant([1, 2, 3, 4, 5])
y = tf.constant([2,2,2,2,1])
ZNCC(x,y)
It return the result as follow as expected:
tf.Tensor(3, shape=(), dtype=int32)
tf.Tensor(1, shape=(), dtype=int32)
tf.Tensor([-2 -1 0 1 2], shape=(5,), dtype=int32)
tf.Tensor([1 1 1 1 0], shape=(5,), dtype=int32)
tf.Tensor(-2.0, shape=(), dtype=float64)
tf.Tensor(6.324555320336759, shape=(), dtype=float64)
tf.Tensor(-0.31622776601683794, shape=(), dtype=float64)
<tf.Tensor: shape=(), dtype=float64, numpy=1.0>
However, when I try to run it as loss function it give the following error:
InvalidArgumentError: Graph execution error:
Detected at node 'ZNCC/Tensordot/MatMul' defined at (most recent call last):
File "/usr/lib/python3.7/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/usr/lib/python3.7/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py", line 16, in <module>
app.launch_new_instance()
File "/usr/local/lib/python3.7/dist-packages/traitlets/config/application.py", line 846, in launch_instance
app.start()
File "/usr/local/lib/python3.7/dist-packages/ipykernel/kernelapp.py", line 612, in start
self.io_loop.start()
File "/usr/local/lib/python3.7/dist-packages/tornado/platform/asyncio.py", line 132, in start
self.asyncio_loop.run_forever()
File "/usr/lib/python3.7/asyncio/base_events.py", line 541, in run_forever
self._run_once()
File "/usr/lib/python3.7/asyncio/base_events.py", line 1786, in _run_once
handle._run()
File "/usr/lib/python3.7/asyncio/events.py", line 88, in _run
self._context.run(self._callback, *self._args)
File "/usr/local/lib/python3.7/dist-packages/tornado/ioloop.py", line 758, in _run_callback
ret = callback()
File "/usr/local/lib/python3.7/dist-packages/tornado/stack_context.py", line 300, in null_wrapper
return fn(*args, **kwargs)
File "/usr/local/lib/python3.7/dist-packages/tornado/gen.py", line 1233, in inner
self.run()
File "/usr/local/lib/python3.7/dist-packages/tornado/gen.py", line 1147, in run
yielded = self.gen.send(value)
File "/usr/local/lib/python3.7/dist-packages/ipykernel/kernelbase.py", line 365, in process_one
yield gen.maybe_future(dispatch(*args))
File "/usr/local/lib/python3.7/dist-packages/tornado/gen.py", line 326, in wrapper
yielded = next(result)
File "/usr/local/lib/python3.7/dist-packages/ipykernel/kernelbase.py", line 268, in dispatch_shell
yield gen.maybe_future(handler(stream, idents, msg))
File "/usr/local/lib/python3.7/dist-packages/tornado/gen.py", line 326, in wrapper
yielded = next(result)
File "/usr/local/lib/python3.7/dist-packages/ipykernel/kernelbase.py", line 545, in execute_request
user_expressions, allow_stdin,
File "/usr/local/lib/python3.7/dist-packages/tornado/gen.py", line 326, in wrapper
yielded = next(result)
File "/usr/local/lib/python3.7/dist-packages/ipykernel/ipkernel.py", line 306, in do_execute
res = shell.run_cell(code, store_history=store_history, silent=silent)
File "/usr/local/lib/python3.7/dist-packages/ipykernel/zmqshell.py", line 536, in run_cell
return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)
File "/usr/local/lib/python3.7/dist-packages/IPython/core/interactiveshell.py", line 2855, in run_cell
raw_cell, store_history, silent, shell_futures)
File "/usr/local/lib/python3.7/dist-packages/IPython/core/interactiveshell.py", line 2881, in _run_cell
return runner(coro)
File "/usr/local/lib/python3.7/dist-packages/IPython/core/async_helpers.py", line 68, in _pseudo_sync_runner
coro.send(None)
File "/usr/local/lib/python3.7/dist-packages/IPython/core/interactiveshell.py", line 3058, in run_cell_async
interactivity=interactivity, compiler=compiler, result=result)
File "/usr/local/lib/python3.7/dist-packages/IPython/core/interactiveshell.py", line 3249, in run_ast_nodes
if (await self.run_code(code, result, async_=asy)):
File "/usr/local/lib/python3.7/dist-packages/IPython/core/interactiveshell.py", line 3326, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-42-c047e0d40ac5>", line 5, in <module>
callbacks=[tensorboard_callback, model_checkpoint_callback])
File "/usr/local/lib/python3.7/dist-packages/keras/utils/traceback_utils.py", line 64, in error_handler
return fn(*args, **kwargs)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1409, in fit
tmp_logs = self.train_function(iterator)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1051, in train_function
return step_function(self, iterator)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1040, in step_function
outputs = model.distribute_strategy.run(run_step, args=(data,))
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1030, in run_step
outputs = model.train_step(data)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 890, in train_step
loss = self.compute_loss(x, y, y_pred, sample_weight)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 949, in compute_loss
y, y_pred, sample_weight, regularization_losses=self.losses)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/compile_utils.py", line 201, in __call__
loss_value = loss_obj(y_t, y_p, sample_weight=sw)
File "/usr/local/lib/python3.7/dist-packages/keras/losses.py", line 139, in __call__
losses = call_fn(y_true, y_pred)
File "/usr/local/lib/python3.7/dist-packages/keras/losses.py", line 243, in call
return ag_fn(y_true, y_pred, **self._fn_kwargs)
File "<ipython-input-39-245f12eafcef>", line 9, in ZNCC
top = tf.tensordot(u,v,1)
Node: 'ZNCC/Tensordot/MatMul'
Matrix size-incompatible: In[0]: [128,3490], In[1]: [128,3490]
[[{{node ZNCC/Tensordot/MatMul}}]] [Op:__inference_train_function_3878]

How can i save tensors object to an numpy array?

I have implemented autoencoder on my custom images data for sign language recognition.Now i want to save tensors object of the output layer to an numpy array. I tried Session.run(tensor) and tensor.eval(). Here is my code.
#define model
x= tf.placeholder(tf.float32,[None,784])
y_=tf.placeholder(tf.float32,[None,6])
k=190
l=180
m=150
n=130
o=100
num_of_epoch=10
w1=tf.Variable(tf.truncated_normal([784,k],stddev=0.1))
b1=tf.Variable(tf.zeros([k]))
w2=tf.Variable(tf.truncated_normal([k,l],stddev=0.1))
b2=tf.Variable(tf.zeros([l]))
w3=tf.Variable(tf.truncated_normal([l,m],stddev=0.1))
b3=tf.Variable(tf.zeros([m]))
w4=tf.Variable(tf.truncated_normal([m,n],stddev=0.1))
b4=tf.Variable(tf.zeros([n]))
w5=tf.Variable(tf.truncated_normal([n,o],stddev=0.1))
b5=tf.Variable(tf.zeros([o]))
w6=tf.Variable(tf.truncated_normal([o,6],stddev=0.1))
b6=tf.Variable(tf.zeros([6]))
y1=tf.nn.relu(tf.matmul(x,w1)+b1)
y2=tf.nn.relu(tf.matmul(y1,w2)+b2)
y3=tf.nn.relu(tf.matmul(y2,w3)+b3)
y4=tf.nn.relu(tf.matmul(y3,w4)+b4)
y5=tf.nn.relu(tf.matmul(y4,w5)+b5)
y=tf.nn.softmax(tf.matmul(y5,w6)+b6)
cross_entropy=tf.reduce_mean(-tf.reduce_sum(y_*tf.log(y),
reduction_indices=[1]))
train_step=tf.train.GradientDescentOptimizer(0.03).minimize(cross_entropy)
init=tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for i in range(num_of_epoch):
train_data = {x:x_train,y_:y_train}
sess.run(train_step,feed_dict=train_data)
currect_prediction=tf.equal(tf.argmax(y,1),tf.argmax(y_,1))
accuracy=tf.reduce_mean(tf.cast(currect_prediction,tf.float32))
sess.run(accuracy,feed_dict={x:x_train,y_:y_train})
currect_prediction=tf.equal(tf.argmax(y,1),tf.argmax(y_,1))
accuracy=tf.reduce_mean(tf.cast(currect_prediction,tf.float32))
sess.run(accuracy,feed_dict= {x:x_test,y_:y_test})
y_p = tf.argmax(y, 1).eval() #this line shows me the error
print(y_p)
I am getting the below error. How can I fix this error and save tensor data to numpy array ?
Traceback (most recent call last):
File "<ipython-input-45-5e38490a3e8e>", line 1, in <module>
runfile('C:/Users/RIFAT/PycharmProjects/tensorflow_autoencoder
/autoencoderreconstruction.py',
wdir='C:/Users/RIFAT/PycharmProjects/tensorflow_autoencoder')
File "C:\Users\RIFAT\Anaconda3\lib\site-packages\spyder\utils
\site\sitecustomize.py", line 880, in runfile
execfile(filename, namespace)
File "C:\Users\RIFAT\Anaconda3\lib\site-packages\spyder\utils
\site\sitecustomize.py", line 102, in execfile
exec(compile(f.read(), filename, 'exec'), namespace)
File "C:/Users/RIFAT/PycharmProjects/tensorflow_autoencoder
/autoencoderreconstruction.py", line 112, in <module>
y_p = tf.argmax(y, 1).eval()
File "C:\Users\RIFAT\Anaconda3\lib\site-packages\tensorflow\python
\framework\ops.py", line 606, in eval
return _eval_using_default_session(self, feed_dict, self.graph, session)
File "C:\Users\RIFAT\Anaconda3\lib\site-packages\tensorflow\python
\framework\ops.py", line 3928, in _eval_using_default_session
return session.run(tensors, feed_dict)
File "C:\Users\RIFAT\Anaconda3\lib\site-packages\tensorflow\python\client
\session.py", line 789, in run
run_metadata_ptr)
File "C:\Users\RIFAT\Anaconda3\lib\site-packages\tensorflow\python\client
\session.py", line 997, in _run
feed_dict_string, options, run_metadata)
File "C:\Users\RIFAT\Anaconda3\lib\site-packages\tensorflow\python\client
\session.py", line 1132, in _do_run
target_list, options, run_metadata)
File "C:\Users\RIFAT\Anaconda3\lib\site-packages\tensorflow\python\client
\session.py", line 1152, in _do_call
raise type(e)(node_def, op, message)
InvalidArgumentError: Shape [-1,784] has negative dimensions
[[Node: Placeholder_62 = Placeholder[dtype=DT_FLOAT, shape=[?,784],
_device="/job:localhost/replica:0/task:0/cpu:0"]()]]
Caused by op 'Placeholder_62', defined at:
File "C:\Users\RIFAT\Anaconda3\lib\site-packages\spyder\utils\ipython
\start_kernel.py", line 231, in <module>
main()
File "C:\Users\RIFAT\Anaconda3\lib\site-packages\spyder\utils\ipython
\start_kernel.py", line 227, in main
kernel.start()
File "C:\Users\RIFAT\Anaconda3\lib\site-packages\ipykernel\kernelapp.py",
line 477, in start
ioloop.IOLoop.instance().start()
File "C:\Users\RIFAT\Anaconda3\lib\site-packages\zmq\eventloop
\ioloop.py", line 177, in start
super(ZMQIOLoop, self).start()
File "C:\Users\RIFAT\Anaconda3\lib\site-packages\tornado\ioloop.py", line
888, in start
handler_func(fd_obj, events)
File "C:\Users\RIFAT\Anaconda3\lib\site-packages\tornad
\stack_context.py", line 277, in null_wrapper
return fn(*args, **kwargs)
File "C:\Users\RIFAT\Anaconda3\lib\site-packages\zmq\eventloop
\zmqstream.py", line 440, in _handle_events
self._handle_recv()
File "C:\Users\RIFAT\Anaconda3\lib\site-packages\zmq\eventloop
\zmqstream.py", line 472, in _handle_recv
self._run_callback(callback, msg)
File "C:\Users\RIFAT\Anaconda3\lib\site-packages\zmq\eventloop
\zmqstream.py", line 414, in _run_callback
callback(*args, **kwargs)
File "C:\Users\RIFAT\Anaconda3\lib\site-packages\tornado
\stack_context.py", line 277, in null_wrapper
return fn(*args, **kwargs)
File "C:\Users\RIFAT\Anaconda3\lib\site-packages\ipykernel
\kernelbase.py", line 283, in dispatcher
return self.dispatch_shell(stream, msg)
File "C:\Users\RIFAT\Anaconda3\lib\site-packages\ipykernel
\kernelbase.py", line 235, in dispatch_shell
handler(stream, idents, msg)
File "C:\Users\RIFAT\Anaconda3\lib\site-packages\ipykernel
\kernelbase.py", line 399, in execute_request
user_expressions, allow_stdin)
File "C:\Users\RIFAT\Anaconda3\lib\site-packages\ipykernel\ipkernel.py",
line 196, in do_execute
res = shell.run_cell(code, store_history=store_history, silent=silent)
File "C:\Users\RIFAT\Anaconda3\lib\site-packages\ipykernel\zmqshell.py",
line 533, in run_cell
return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)
File "C:\Users\RIFAT\Anaconda3\lib\site-packages\IPython
\core\interactiveshell.py", line 2717, in run_cell
interactivity=interactivity, compiler=compiler, result=result)
File "C:\Users\RIFAT\Anaconda3\lib\site-packages\IPython
\core\interactiveshell.py", line 2827, in run_ast_nodes
if self.run_code(code, result):
File "C:\Users\RIFAT\Anaconda3\lib\site-packages\IPython
\core\interactiveshell.py", line 2881, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-45-5e38490a3e8e>", line 1, in <module>
runfile('C:/Users/RIFAT/PycharmProjects/tensorflow_autoencoder
/autoencoderreconstruction.py', wdir='C:/Users/RIFAT/PycharmProjects
/tensorflow_autoencoder')
File "C:\Users\RIFAT\Anaconda3\lib\site-packages\spyder\utils
\site\sitecustomize.py", line 880, in runfile
execfile(filename, namespace)
File "C:\Users\RIFAT\Anaconda3\lib\site-packages\spyder\utils
\site\sitecustomize.py", line 102, in execfile
exec(compile(f.read(), filename, 'exec'), namespace)
File "C:/Users/RIFAT/PycharmProjects/tensorflow_autoencoder
/autoencoderreconstruction.py", line 62, in <module>
x= tf.placeholder(tf.float32,[None,784])
File "C:\Users\RIFAT\Anaconda3\lib\site-packages\tensorflow\python
\ops\array_ops.py", line 1530, in placeholder
return gen_array_ops._placeholder(dtype=dtype, shape=shape, name=name)
File "C:\Users\RIFAT\Anaconda3\lib\site-packages\tensorflow\python
\ops\gen_array_ops.py", line 1954, in _placeholder
name=name)
File "C:\Users\RIFAT\Anaconda3\lib\site-packages\tensorflow\python
\framework\op_def_library.py", line 767, in apply_op
op_def=op_def)
File "C:\Users\RIFAT\Anaconda3\lib\site-packages\tensorflow\python
\framework\ops.py", line 2506, in create_op
original_op=self._default_original_op, op_def=op_def)
File "C:\Users\RIFAT\Anaconda3\lib\site-packages\tensorflow\python
\framework\ops.py", line 1269, in __init__
self._traceback = _extract_stack()
InvalidArgumentError (see above for traceback): Shape [-1,784] has
negative dimensions
[[Node: Placeholder_62 = Placeholder[dtype=DT_FLOAT, shape=[?,784],
_device="/job:localhost/replica:0/task:0/cpu:0"]()]]
That's because y is a tensor in the graph and not a variable. When you run .eval() on a variable, it gives you the current value held by that variable in that session but if you run .eval() on a tensor instead like tf.argmax(y, 1).eval() in your case then tensor flow runs the graph to that node to get the value of that node. And since in your case, it doesn't get the value of the placeholder x and y_ while running the graph, it gives the error. One way to resolve this error is by passing the values of the placeholders in your eval call like this:
tf.argmax(y, 1).eval(feed_dict= {x:x_test,y_:y_test})
But, a more preferred way is giving the context of your session to the eval call in which case it will return the value of the tensor. Ex:
tf.argmax(y, 1).eval(session = sess)
Your question is not 100% clear. But the error you are seeing is caused by the fact that you try to run the graph without the feed dict in place. To see the output of the prediction (that is with the argmax(y, 1) present) you'd simply run:
y_p = sess.run(tf.argmax(y, 1), feed_dict=train_data)
print(y_p)
But that will give you the actual predicted value (on the train data as this is fed, to get this on the test data, simply geed in the test_data). To get the probabilities you'd pull y without the argmax:
y_p = sess.run(y, feed_dict=train_data)
print(y_p)

Argument error with feed_dict

I got confused about feed_dict. I can feed my datasets as X but I cannot feed Y labels. all dtype are tf.float64 and I printed it out, and yes, they are float64. My X dataset has shape 90,32,32,3 and my Y is 90,2. so I defined a plaeholder to be fed with the same shape.
but when I run my code, error comes out :
InvalidArgumentError: You must feed a value for placeholder tensor 'Placeholder_34' with dtype double
[[Node: Placeholder_34 = Placeholder[dtype=DT_DOUBLE, shape=[], _device="/job:localhost/replica:0/task:0/cpu:0"]()]]
I have no idea why this happened, since I am really sure that both X and Y are float64. X is fine but why only Y? Please help, I really appreciate :)
here's the code :
size_image =32
allX = np.zeros((100, size_image, size_image, 3), dtype='float64')
ally = np.zeros(100, dtype='float64')
count = 0
for i in range(49):
img = io.imread("cat/cat"+str(i+1)+".jpg")
new_img = imresize(img, (size_image, size_image, 3))
allX[count] = np.array(new_img)
ally[count] = 0
count += 1
#dog images and labels
for i in range(51):
img = io.imread("dog/dog"+str(i+1)+".jpg")
new_img = imresize(img, (size_image, size_image, 3))
allX[count] = np.array(new_img)
ally[count] = 1
count += 1
#splitting dataset supaya random
X, X_test, Y, Y_test = train_test_split(allX, ally, test_size=0.1, random_state=42)
# encode the Ys
Y = to_categorical(Y, 2)
Y_test = to_categorical(Y_test, 2)
x = tf.placeholder(tf.float64, [None,None, None,3])
y_ = tf.placeholder(tf.float64, [None, 2])
sess = tf.Session()
sess.run(init)
t = x*2
print(sess.run(t, feed_dict={x:X}))
m = 1*y
print(sess.run(m, feed_dict={y_:Y}))
and here is the traceback :
InvalidArgumentError Traceback (most recent call last)
<ipython-input-95-6f97fe1da9c0> in <module>()
6 print(y.get_shape())
7 m = 1*y
----> 8 print(sess.run(m, feed_dict={y_:Y}))
9 print(y_.get_shape().as_list())
10 #x = tf.stack(X)
InvalidArgumentError: You must feed a value for placeholder tensor 'Placeholder_34' with dtype double
[[Node: Placeholder_34 = Placeholder[dtype=DT_DOUBLE, shape=[], _device="/job:localhost/replica:0/task:0/cpu:0"]()]]
Caused by op u'Placeholder_34', defined at:
File "/usr/lib/python2.7/runpy.py", line 174, in _run_module_as_main
"__main__", fname, loader, pkg_name)
File "/usr/lib/python2.7/runpy.py", line 72, in _run_code
exec code in run_globals
File "/usr/local/lib/python2.7/dist-packages/ipykernel/__main__.py", line 3, in <module>
app.launch_new_instance()
File "/usr/local/lib/python2.7/dist-packages/traitlets/config/application.py", line 658, in launch_instance
app.start()
File "/usr/local/lib/python2.7/dist-packages/ipykernel/kernelapp.py", line 474, in start
ioloop.IOLoop.instance().start()
File "/usr/local/lib/python2.7/dist-packages/zmq/eventloop/ioloop.py", line 177, in start
super(ZMQIOLoop, self).start()
File "/usr/local/lib/python2.7/dist-packages/tornado/ioloop.py", line 887, in start
handler_func(fd_obj, events)
File "/usr/local/lib/python2.7/dist-packages/tornado/stack_context.py", line 275, in null_wrapper
return fn(*args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/zmq/eventloop/zmqstream.py", line 440, in _handle_events
self._handle_recv()
File "/usr/local/lib/python2.7/dist-packages/zmq/eventloop/zmqstream.py", line 472, in _handle_recv
self._run_callback(callback, msg)
File "/usr/local/lib/python2.7/dist-packages/zmq/eventloop/zmqstream.py", line 414, in _run_callback
callback(*args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/tornado/stack_context.py", line 275, in null_wrapper
return fn(*args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/ipykernel/kernelbase.py", line 276, in dispatcher
return self.dispatch_shell(stream, msg)
File "/usr/local/lib/python2.7/dist-packages/ipykernel/kernelbase.py", line 228, in dispatch_shell
handler(stream, idents, msg)
File "/usr/local/lib/python2.7/dist-packages/ipykernel/kernelbase.py", line 390, in execute_request
user_expressions, allow_stdin)
File "/usr/local/lib/python2.7/dist-packages/ipykernel/ipkernel.py", line 196, in do_execute
res = shell.run_cell(code, store_history=store_history, silent=silent)
File "/usr/local/lib/python2.7/dist-packages/ipykernel/zmqshell.py", line 501, in run_cell
return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/IPython/core/interactiveshell.py", line 2717, in run_cell
interactivity=interactivity, compiler=compiler, result=result)
File "/usr/local/lib/python2.7/dist-packages/IPython/core/interactiveshell.py", line 2821, in run_ast_nodes
if self.run_code(code, result):
File "/usr/local/lib/python2.7/dist-packages/IPython/core/interactiveshell.py", line 2881, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-94-297273a8e189>", line 1, in <module>
x = tf.placeholder(tf.float64, [None,None, None,3])
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/array_ops.py", line 1520, in placeholder
name=name)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/gen_array_ops.py", line 2149, in _placeholder
name=name)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/op_def_library.py", line 763, in apply_op
op_def=op_def)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/ops.py", line 2395, in create_op
original_op=self._default_original_op, op_def=op_def)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/ops.py", line 1264, in __init__
self._traceback = _extract_stack()
InvalidArgumentError (see above for traceback): You must feed a value for placeholder tensor 'Placeholder_34' with dtype double
[[Node: Placeholder_34 = Placeholder[dtype=DT_DOUBLE, shape=[], _device="/job:localhost/replica:0/task:0/cpu:0"]()]]

TypeError when calling dynamic_rnn on LSTMCell

I'm kind of new to working with TensorFlow and my problem may be easy to solve, at least I hope so.
I'm trying to work with LSTMCell to predict the next label in a sequence.
Here is the code I'm using :
import tensorflow as tf
max_sequence_length = 1000
vector_length = 1
number_of_classes = 1000
batch_size = 50
num_hidden = 24
# Define graph
data = tf.placeholder(tf.int64, [None, max_sequence_length, vector_length])
# 0 must be a free class so that the mask can work
target = tf.placeholder(tf.int64, [None, max_sequence_length, number_of_classes + 1])
labels = tf.argmax(target, 2)
cell = tf.nn.rnn_cell.LSTMCell(num_hidden, state_is_tuple=True)
Then I try to get the real length of each sequence in the batch
no_of_batches = tf.shape(data)[0]
sequence_lengths = tf.zeros([batch_size])
for i in xrange(max_sequence_length):
data_at_t = tf.squeeze(tf.slice(data, [0,i,0],[-1,1,-1]))
t = tf.scalar_mul(i, tf.ones([batch_size]))
boolean = tf.not_equal(data_at_t, tf.zeros([no_of_batches, batch_size], dtype = tf.int64))
sequence_lengths = tf.select(boolean, t, sequence_lengths)
And finally I try to call tf.nn.dynamic_rnn :
outputs, state = tf.nn.dynamic_rnn(
cell = cell,
inputs = data,
sequence_length = max_sequence_length,
dtype = tf.float64
)
Then, I get a TypeError:
Traceback (most recent call last):
File "<stdin>", line 5, in <module>
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/rnn.py", line 830, in dynamic_rnn
dtype=dtype)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/rnn.py", line 997, in _dynamic_rnn_loop
swap_memory=swap_memory)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/control_flow_ops.py", line 1973, in while_loop
result = context.BuildLoop(cond, body, loop_vars)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/control_flow_ops.py", line 1860, in BuildLoop
pred, body, original_loop_vars, loop_vars)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/control_flow_ops.py", line 1810, in _BuildLoop
body_result = body(*packed_vars_for_body)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/rnn.py", line 980, in _time_step
skip_conditionals=True)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/rnn.py", line 394, in _rnn_step
new_output, new_state = call_cell()
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/rnn.py", line 968, in <lambda>
call_cell = lambda: cell(input_t, state)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/rnn_cell.py", line 489, in __call__
dtype, self._num_unit_shards)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/rnn_cell.py", line 323, in _get_concat_variable
sharded_variable = _get_sharded_variable(name, shape, dtype, num_shards)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/rnn_cell.py", line 353, in _get_sharded_variable
dtype=dtype))
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/variable_scope.py", line 830, in get_variable
custom_getter=custom_getter)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/variable_scope.py", line 673, in get_variable
custom_getter=custom_getter)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/variable_scope.py", line 217, in get_variable
validate_shape=validate_shape)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/variable_scope.py", line 202, in _true_getter
caching_device=caching_device, validate_shape=validate_shape)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/variable_scope.py", line 536, in _get_single_variable
validate_shape=validate_shape)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/variables.py", line 211, in __init__
dtype=dtype)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/variables.py", line 281, in _init_from_args
self._initial_value = ops.convert_to_tensor(initial_value(),
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/variable_scope.py", line 526, in <lambda>
init_val = lambda: initializer(shape.as_list(), dtype=dtype)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/init_ops.py", line 210, in _initializer
dtype, seed=seed)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/ops/random_ops.py", line 235, in random_uniform
minval = ops.convert_to_tensor(minval, dtype=dtype, name="min")
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/ops.py", line 621, in convert_to_tensor
ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/constant_op.py", line 180, in _constant_tensor_conversion_function
return constant(v, dtype=dtype, name=name)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/constant_op.py", line 163, in constant
tensor_util.make_tensor_proto(value, dtype=dtype, shape=shape))
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/tensor_util.py", line 353, in make_tensor_proto
_AssertCompatible(values, dtype)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/tensor_util.py", line 290, in _AssertCompatible
(dtype.name, repr(mismatch), type(mismatch).__name__))
TypeError: Expected int64, got -0.34641016151377546 of type 'float' instead.
I don't understand where this float comes from as all other values in the script are integers. How can I solve this problem ?
1) The RNN cell state is tf.float64. You set this explicitly within the tf.nn.dynamic_rnn call (dtype). The tensorflow engine then initialized the states with RNN's default random_uniform initializer. This is why you have the -0.34 float value there.
I'm not sure what you wanted to achieve. Please refer to https://www.tensorflow.org/versions/r0.11/api_docs/python/nn.html#dynamic_rnn
2) The sequence_length = max_sequence_length must be an int32/int64 vector sized [batch_size] instead of scalar 1000
3) You may want to initialize the LSTMCell state too:
cell = tf.nn.rnn_cell.LSTMCell(num_hidden, state_is_tuple=True),
initializer=tf.constant_initializer(value=0, dtype=tf.int32))
i have a similar problem with my code, i fixed it by replacing the int32 at the placeholder as float, check if that works

Odd Dimension() object in shape of input?

I am experiencing issues with Tensorflow. I've narrowed the issues down to the format of my input shape of the build() method of a custom Attention layer in tf-keras. I am getting a list of Dimension() objects instead of the actual input-shape. Please help.
Attention Layer
x_attn = []
for i in range(self.attn_range): #4
x_attn.append(Attention()(x))
print("DEBUG:00001")
x = L.Concatenate(-1)(x_attn)
Custom Attention Layer build() method:
def build(self,
input_shape):
print("DEBUG:00005")
params_shape = list(input_shape[1:])
print("DEBUG:00007", params_shape)
self.query_weights = self.add_weight(
name='q_weights',
shape=params_shape,
initializer=self.q_weights_init
)
print("DEBUG:00006")
self.key_weights = self.add_weight(
name='key_weights',
shape=params_shape,
initializer=self.key_weights_init
)
self.val_weights = self.add_weight(
name='val_weights',
shape=params_shape,
initializer=self.value_weights_init
)
Output of param shape debug print function is:
DEBUG:00007 [Dimension(10), Dimension(5), Dimension(128)]
Edit:
Full Error Message as Requested:
Traceback (most recent call last):
File "/home/ai/anaconda3/lib/python3.7/site-packages/tensorflow/python/framework/tensor_util.py", line 558, in make_tensor_proto
str_values = [compat.as_bytes(x) for x in proto_values]
File "/home/ai/anaconda3/lib/python3.7/site-packages/tensorflow/python/framework/tensor_util.py", line 558, in <listcomp>
str_values = [compat.as_bytes(x) for x in proto_values]
File "/home/ai/anaconda3/lib/python3.7/site-packages/tensorflow/python/util/compat.py", line 65, in as_bytes
(bytes_or_text,))
TypeError: Expected binary or unicode string, got Dimension(10)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "Empowerment\.py", line 327, in <module>
env_actor = ComputerEnv()
File "Empowerment\.py", line 225, in __init__
self.dqn = ICDQNAgent(self.state_size + (3,), self.state_size[0], 4)
File "Empowerment\.py", line 78, in __init__
self.model, self.autoencoder, self.critic = self.build_model()
File "Empowerment\.py", line 99, in build_model
x_attn.append(Attention()(x))
File "/home/ai/anaconda3/lib/python3.7/site-packages/tensorflow/python/keras/engine/base_layer.py", line 591, in __call__
self._maybe_build(inputs)
File "/home/ai/anaconda3/lib/python3.7/site-packages/tensorflow/python/keras/engine/base_layer.py", line 1881, in _maybe_build
self.build(input_shapes)
File "/home/ai/Desktop/ai_proj/layers.py", line 69, in build
initializer=self.q_weights_init
File "/home/ai/anaconda3/lib/python3.7/site-packages/tensorflow/python/keras/engine/base_layer.py", line 384, in add_weight
aggregation=aggregation)
File "/home/ai/anaconda3/lib/python3.7/site-packages/tensorflow/python/training/tracking/base.py", line 663, in _add_variable_with_custom_getter
**kwargs_for_getter)
File "/home/ai/anaconda3/lib/python3.7/site-packages/tensorflow/python/keras/engine/base_layer_utils.py", line 155, in make_variable
shape=variable_shape if variable_shape.rank else None)
File "/home/ai/anaconda3/lib/python3.7/site-packages/tensorflow/python/ops/variables.py", line 259, in __call__
return cls._variable_v1_call(*args, **kwargs)
File "/home/ai/anaconda3/lib/python3.7/site-packages/tensorflow/python/ops/variables.py", line 220, in _variable_v1_call
shape=shape)
File "/home/ai/anaconda3/lib/python3.7/site-packages/tensorflow/python/ops/variables.py", line 198, in <lambda>
previous_getter = lambda **kwargs: default_variable_creator(None, **kwargs)
File "/home/ai/anaconda3/lib/python3.7/site-packages/tensorflow/python/ops/variable_scope.py", line 2495, in default_variable_creator
shape=shape)
File "/home/ai/anaconda3/lib/python3.7/site-packages/tensorflow/python/ops/variables.py", line 263, in __call__
return super(VariableMetaclass, cls).__call__(*args, **kwargs)
File "/home/ai/anaconda3/lib/python3.7/site-packages/tensorflow/python/ops/resource_variable_ops.py", line 460, in __init__
shape=shape)
File "/home/ai/anaconda3/lib/python3.7/site-packages/tensorflow/python/ops/resource_variable_ops.py", line 604, in _init_from_args
initial_value() if init_from_fn else initial_value,
File "/home/ai/anaconda3/lib/python3.7/site-packages/tensorflow/python/keras/engine/base_layer_utils.py", line 135, in <lambda>
init_val = lambda: initializer(shape, dtype=dtype)
File "/home/ai/anaconda3/lib/python3.7/site-packages/tensorflow/python/ops/init_ops.py", line 533, in __call__
shape, -limit, limit, dtype, seed=self.seed)
File "/home/ai/anaconda3/lib/python3.7/site-packages/tensorflow/python/ops/random_ops.py", line 239, in random_uniform
shape = _ShapeTensor(shape)
File "/home/ai/anaconda3/lib/python3.7/site-packages/tensorflow/python/ops/random_ops.py", line 44, in _ShapeTensor
return ops.convert_to_tensor(shape, dtype=dtype, name="shape")
File "/home/ai/anaconda3/lib/python3.7/site-packages/tensorflow/python/framework/ops.py", line 1087, in convert_to_tensor
return convert_to_tensor_v2(value, dtype, preferred_dtype, name)
File "/home/ai/anaconda3/lib/python3.7/site-packages/tensorflow/python/framework/ops.py", line 1145, in convert_to_tensor_v2
as_ref=False)
File "/home/ai/anaconda3/lib/python3.7/site-packages/tensorflow/python/framework/ops.py", line 1224, in internal_convert_to_tensor
ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
File "/home/ai/anaconda3/lib/python3.7/site-packages/tensorflow/python/framework/constant_op.py", line 305, in _constant_tensor_conversion_function
return constant(v, dtype=dtype, name=name)
File "/home/ai/anaconda3/lib/python3.7/site-packages/tensorflow/python/framework/constant_op.py", line 246, in constant
allow_broadcast=True)
File "/home/ai/anaconda3/lib/python3.7/site-packages/tensorflow/python/framework/constant_op.py", line 284, in _constant_impl
allow_broadcast=allow_broadcast))
File "/home/ai/anaconda3/lib/python3.7/site-packages/tensorflow/python/framework/tensor_util.py", line 562, in make_tensor_proto
"supported type." % (type(values), values))
TypeError: Failed to convert object of type <class 'list'> to Tensor. Contents: [Dimension(10), Dimension(5), Dimension(128)]. Consider casting elements to a supported type.

Categories