Having a problem to run .compile() on my jupyter notebook - python

I have problem with using .compile() after building a simple Sequential model with keras.
The error type I got:
TypeError: 'str' object is not callable
By the way, I run the code on Colab and it works. The problem is that the code doesn't work on my system, and I'm using anaconda Jupyter notebook.
the code:
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
model = keras.Sequential(
[
layers.Dense(2, activation="relu", name ="layer1"),
layers.Dense(3, activation="relu", name ="layer2"),
layers.Dense(4, name="layer3")
])
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
~/anaconda3/envs/tf-gpu/lib/python3.8/site-packages/tensorflow/python/ops/array_ops.py in zeros(shape, dtype, name)
2725 # Go through tensor shapes to get int64-if-needed semantics
-> 2726 shape = constant_op._tensor_shape_tensor_conversion_function(
2727 tensor_shape.TensorShape(shape))
~/anaconda3/envs/tf-gpu/lib/python3.8/site-packages/tensorflow/python/framework/constant_op.py in _tensor_shape_tensor_conversion_function(s, dtype, name, as_ref)
356 name = "shape_as_tensor"
--> 357 return constant(s_list, dtype=dtype, name=name)
358
~/anaconda3/envs/tf-gpu/lib/python3.8/site-packages/tensorflow/python/framework/constant_op.py in constant(value, dtype, shape, name)
261 """
--> 262 return _constant_impl(value, dtype, shape, name, verify_shape=False,
263 allow_broadcast=True)
~/anaconda3/envs/tf-gpu/lib/python3.8/site-packages/tensorflow/python/framework/constant_op.py in _constant_impl(value, dtype, shape, name, verify_shape, allow_broadcast)
270 if ctx.executing_eagerly():
--> 271 t = convert_to_eager_tensor(value, ctx, dtype)
272 if shape is None:
~/anaconda3/envs/tf-gpu/lib/python3.8/site-packages/tensorflow/python/framework/constant_op.py in convert_to_eager_tensor(value, ctx, dtype)
96
---> 97 return ops.EagerTensor(value, ctx.device_name(), dtype)
98
TypeError: 'str' object is not callable
During handling of the above exception, another exception occurred:
TypeError Traceback (most recent call last)
<ipython-input-107-3bc1e703d8ff> in <module>
----> 1 model.compile(loss=loss, optimizer=optim, metrics=metrics)
~/anaconda3/envs/tf-gpu/lib/python3.8/site-packages/tensorflow/python/keras/engine/training.py in compile(self, optimizer, loss, metrics, loss_weights, sample_weight_mode, weighted_metrics, **kwargs)
327
328 self.optimizer = self._get_optimizer(optimizer)
--> 329 self.compiled_loss = compile_utils.LossesContainer(
330 loss, loss_weights, output_names=self.output_names)
331 self.compiled_metrics = compile_utils.MetricsContainer(
~/anaconda3/envs/tf-gpu/lib/python3.8/site-packages/tensorflow/python/keras/engine/compile_utils.py in __init__(self, losses, loss_weights, output_names)
118 self._loss_weights = loss_weights
119 self._per_output_metrics = None # Per-output losses become metrics.
--> 120 self._loss_metric = metrics_mod.Mean(name='loss') # Total loss.
121 self._built = False
122
~/anaconda3/envs/tf-gpu/lib/python3.8/site-packages/tensorflow/python/keras/metrics.py in __init__(self, name, dtype)
468 dtype: (Optional) data type of the metric result.
469 """
--> 470 super(Mean, self).__init__(
471 reduction=metrics_utils.Reduction.WEIGHTED_MEAN, name=name, dtype=dtype)
472
~/anaconda3/envs/tf-gpu/lib/python3.8/site-packages/tensorflow/python/keras/metrics.py in __init__(self, reduction, name, dtype)
305 self.reduction = reduction
306 with ops.init_scope():
--> 307 self.total = self.add_weight(
308 'total', initializer=init_ops.zeros_initializer)
309 if reduction in [metrics_utils.Reduction.SUM_OVER_BATCH_SIZE,
~/anaconda3/envs/tf-gpu/lib/python3.8/site-packages/tensorflow/python/keras/metrics.py in add_weight(self, name, shape, aggregation, synchronization, initializer, dtype)
274 synchronization = tf_variables.VariableSynchronization.ON_WRITE
275
--> 276 return super(Metric, self).add_weight(
277 name=name,
278 shape=shape,
~/anaconda3/envs/tf-gpu/lib/python3.8/site-packages/tensorflow/python/keras/engine/base_layer.py in add_weight(self, name, shape, dtype, initializer, regularizer, trainable, constraint, partitioner, use_resource, synchronization, aggregation, **kwargs)
558 caching_device = None
559
--> 560 variable = self._add_variable_with_custom_getter(
561 name=name,
562 shape=shape,
~/anaconda3/envs/tf-gpu/lib/python3.8/site-packages/tensorflow/python/training/tracking/base.py in _add_variable_with_custom_getter(self, name, shape, dtype, initializer, getter, overwrite, **kwargs_for_getter)
736 initializer = checkpoint_initializer
737 shape = None
--> 738 new_variable = getter(
739 name=name,
740 shape=shape,
~/anaconda3/envs/tf-gpu/lib/python3.8/site-packages/tensorflow/python/keras/engine/base_layer_utils.py in make_variable(name, shape, dtype, initializer, trainable, caching_device, validate_shape, constraint, use_resource, collections, synchronization, aggregation, partitioner)
127 # can remove the V1.
128 variable_shape = tensor_shape.TensorShape(shape)
--> 129 return tf_variables.VariableV1(
130 initial_value=init_val,
131 name=name,
~/anaconda3/envs/tf-gpu/lib/python3.8/site-packages/tensorflow/python/ops/variables.py in __call__(cls, *args, **kwargs)
257 def __call__(cls, *args, **kwargs):
258 if cls is VariableV1:
--> 259 return cls._variable_v1_call(*args, **kwargs)
260 elif cls is Variable:
261 return cls._variable_v2_call(*args, **kwargs)
~/anaconda3/envs/tf-gpu/lib/python3.8/site-packages/tensorflow/python/ops/variables.py in _variable_v1_call(cls, initial_value, trainable, collections, validate_shape, caching_device, name, variable_def, dtype, expected_shape, import_scope, constraint, use_resource, synchronization, aggregation, shape)
203 if aggregation is None:
204 aggregation = VariableAggregation.NONE
--> 205 return previous_getter(
206 initial_value=initial_value,
207 trainable=trainable,
~/anaconda3/envs/tf-gpu/lib/python3.8/site-packages/tensorflow/python/ops/variables.py in getter(**kwargs)
64
65 def getter(**kwargs):
---> 66 return captured_getter(captured_previous, **kwargs)
67
68 return getter
~/anaconda3/envs/tf-gpu/lib/python3.8/site-packages/tensorflow/python/distribute/distribute_lib.py in creator(next_creator, **kwargs)
2560 def creator(next_creator, **kwargs):
2561 _require_strategy_scope_strategy(strategy)
-> 2562 return next_creator(**kwargs)
2563
2564 self._var_creator_scope = variable_scope.variable_creator_scope(creator)
~/anaconda3/envs/tf-gpu/lib/python3.8/site-packages/tensorflow/python/ops/variables.py in <lambda>(**kwargs)
196 shape=None):
197 """Call on Variable class. Useful to force the signature."""
--> 198 previous_getter = lambda **kwargs: default_variable_creator(None, **kwargs)
199 for _, getter in ops.get_default_graph()._variable_creator_stack: # pylint: disable=protected-access
200 previous_getter = _make_getter(getter, previous_getter)
~/anaconda3/envs/tf-gpu/lib/python3.8/site-packages/tensorflow/python/ops/variable_scope.py in default_variable_creator(next_creator, **kwargs)
2582 if use_resource:
2583 distribute_strategy = kwargs.get("distribute_strategy", None)
-> 2584 return resource_variable_ops.ResourceVariable(
2585 initial_value=initial_value,
2586 trainable=trainable,
~/anaconda3/envs/tf-gpu/lib/python3.8/site-packages/tensorflow/python/ops/variables.py in __call__(cls, *args, **kwargs)
261 return cls._variable_v2_call(*args, **kwargs)
262 else:
--> 263 return super(VariableMetaclass, cls).__call__(*args, **kwargs)
264
265
~/anaconda3/envs/tf-gpu/lib/python3.8/site-packages/tensorflow/python/ops/resource_variable_ops.py in __init__(self, initial_value, trainable, collections, validate_shape, caching_device, name, dtype, variable_def, import_scope, constraint, distribute_strategy, synchronization, aggregation, shape)
1421 self._init_from_proto(variable_def, import_scope=import_scope)
1422 else:
-> 1423 self._init_from_args(
1424 initial_value=initial_value,
1425 trainable=trainable,
~/anaconda3/envs/tf-gpu/lib/python3.8/site-packages/tensorflow/python/ops/resource_variable_ops.py in _init_from_args(self, initial_value, trainable, collections, caching_device, name, dtype, constraint, synchronization, aggregation, distribute_strategy, shape)
1565 with ops.name_scope("Initializer"), device_context_manager(None):
1566 initial_value = ops.convert_to_tensor(
-> 1567 initial_value() if init_from_fn else initial_value,
1568 name="initial_value", dtype=dtype)
1569 if shape is not None:
~/anaconda3/envs/tf-gpu/lib/python3.8/site-packages/tensorflow/python/keras/engine/base_layer_utils.py in <lambda>()
119 (type(init_ops.Initializer), type(init_ops_v2.Initializer))):
120 initializer = initializer()
--> 121 init_val = lambda: initializer(shape, dtype=dtype)
122 variable_dtype = dtype.base_dtype
123 if use_resource is None:
~/anaconda3/envs/tf-gpu/lib/python3.8/site-packages/tensorflow/python/ops/init_ops.py in __call__(self, shape, dtype, partition_info)
112 if dtype is None:
113 dtype = self.dtype
--> 114 return array_ops.zeros(shape, dtype)
115
116 def get_config(self):
~/anaconda3/envs/tf-gpu/lib/python3.8/site-packages/tensorflow/python/ops/array_ops.py in wrapped(*args, **kwargs)
2675
2676 def wrapped(*args, **kwargs):
-> 2677 tensor = fun(*args, **kwargs)
2678 tensor._is_zeros_tensor = True
2679 return tensor
~/anaconda3/envs/tf-gpu/lib/python3.8/site-packages/tensorflow/python/ops/array_ops.py in zeros(shape, dtype, name)
2728 except (TypeError, ValueError):
2729 # Happens when shape is a list with tensor elements
-> 2730 shape = ops.convert_to_tensor(shape, dtype=dtypes.int32)
2731 if not shape._shape_tuple():
2732 shape = reshape(shape, [-1]) # Ensure it's a vector
~/anaconda3/envs/tf-gpu/lib/python3.8/site-packages/tensorflow/python/framework/ops.py in convert_to_tensor(value, dtype, name, as_ref, preferred_dtype, dtype_hint, ctx, accepted_result_types)
1339
1340 if ret is None:
-> 1341 ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
1342
1343 if ret is NotImplemented:
~/anaconda3/envs/tf-gpu/lib/python3.8/site-packages/tensorflow/python/framework/constant_op.py in _constant_tensor_conversion_function(v, dtype, name, as_ref)
320 as_ref=False):
321 _ = as_ref
--> 322 return constant(v, dtype=dtype, name=name)
323
324
~/anaconda3/envs/tf-gpu/lib/python3.8/site-packages/tensorflow/python/framework/constant_op.py in constant(value, dtype, shape, name)
260 ValueError: if called on a symbolic tensor.
261 """
--> 262 return _constant_impl(value, dtype, shape, name, verify_shape=False,
263 allow_broadcast=True)
264
~/anaconda3/envs/tf-gpu/lib/python3.8/site-packages/tensorflow/python/framework/constant_op.py in _constant_impl(value, dtype, shape, name, verify_shape, allow_broadcast)
269 ctx = context.context()
270 if ctx.executing_eagerly():
--> 271 t = convert_to_eager_tensor(value, ctx, dtype)
272 if shape is None:
273 return t
~/anaconda3/envs/tf-gpu/lib/python3.8/site-packages/tensorflow/python/framework/constant_op.py in convert_to_eager_tensor(value, ctx, dtype)
95 ctx.ensure_initialized()
96
---> 97 return ops.EagerTensor(value, ctx.device_name(), dtype)
98
99
TypeError: 'str' object is not callable
​

Related

InternalError: Cannot dlopen all CUDA libraries

I am trying to run the Python code of this Kaggle Jupyter Notebook and encounter following error:
---------------------------------------------------------------------------
InternalError Traceback (most recent call last)
<ipython-input-40-be0fb0b18f3a> in <module>
1 #Defining Neural Network
----> 2 model = Sequential()
3 #Non-trainable embeddidng layer
4 model.add(Embedding(max_features, output_dim=embed_size, weights=[embedding_matrix], input_length=maxlen, trainable=False))
5 #LSTM
c:\users\kim\appdata\local\programs\python\python38\lib\site-packages\tensorflow\python\training\tracking\base.py in _method_wrapper(self, *args, **kwargs)
528 self._self_setattr_tracking = False # pylint: disable=protected-access
529 try:
--> 530 result = method(self, *args, **kwargs)
531 finally:
532 self._self_setattr_tracking = previous_value # pylint: disable=protected-access
c:\users\kim\appdata\local\programs\python\python38\lib\site-packages\keras\engine\sequential.py in __init__(self, layers, name)
105 """
106 # Skip the init in FunctionalModel since model doesn't have input/output yet
--> 107 super(functional.Functional, self).__init__( # pylint: disable=bad-super-call
108 name=name, autocast=False)
109 base_layer.keras_api_gauge.get_cell('Sequential').set(True)
c:\users\kim\appdata\local\programs\python\python38\lib\site-packages\tensorflow\python\training\tracking\base.py in _method_wrapper(self, *args, **kwargs)
528 self._self_setattr_tracking = False # pylint: disable=protected-access
529 try:
--> 530 result = method(self, *args, **kwargs)
531 finally:
532 self._self_setattr_tracking = previous_value # pylint: disable=protected-access
c:\users\kim\appdata\local\programs\python\python38\lib\site-packages\keras\engine\training.py in __init__(self, *args, **kwargs)
287 self._steps_per_execution = None
288
--> 289 self._init_batch_counters()
290 self._base_model_initialized = True
291
c:\users\kim\appdata\local\programs\python\python38\lib\site-packages\tensorflow\python\training\tracking\base.py in _method_wrapper(self, *args, **kwargs)
528 self._self_setattr_tracking = False # pylint: disable=protected-access
529 try:
--> 530 result = method(self, *args, **kwargs)
531 finally:
532 self._self_setattr_tracking = previous_value # pylint: disable=protected-access
c:\users\kim\appdata\local\programs\python\python38\lib\site-packages\keras\engine\training.py in _init_batch_counters(self)
295 # `evaluate`, and `predict`.
296 agg = tf.VariableAggregation.ONLY_FIRST_REPLICA
--> 297 self._train_counter = tf.Variable(0, dtype='int64', aggregation=agg)
298 self._test_counter = tf.Variable(0, dtype='int64', aggregation=agg)
299 self._predict_counter = tf.Variable(
c:\users\kim\appdata\local\programs\python\python38\lib\site-packages\tensorflow\python\ops\variables.py in __call__(cls, *args, **kwargs)
266 return cls._variable_v1_call(*args, **kwargs)
267 elif cls is Variable:
--> 268 return cls._variable_v2_call(*args, **kwargs)
269 else:
270 return super(VariableMetaclass, cls).__call__(*args, **kwargs)
c:\users\kim\appdata\local\programs\python\python38\lib\site-packages\tensorflow\python\ops\variables.py in _variable_v2_call(cls, initial_value, trainable, validate_shape, caching_device, name, variable_def, dtype, import_scope, constraint, synchronization, aggregation, shape)
248 if aggregation is None:
249 aggregation = VariableAggregation.NONE
--> 250 return previous_getter(
251 initial_value=initial_value,
252 trainable=trainable,
c:\users\kim\appdata\local\programs\python\python38\lib\site-packages\tensorflow\python\ops\variables.py in <lambda>(**kws)
241 shape=None):
242 """Call on Variable class. Useful to force the signature."""
--> 243 previous_getter = lambda **kws: default_variable_creator_v2(None, **kws)
244 for _, getter in ops.get_default_graph()._variable_creator_stack: # pylint: disable=protected-access
245 previous_getter = _make_getter(getter, previous_getter)
c:\users\kim\appdata\local\programs\python\python38\lib\site-packages\tensorflow\python\ops\variable_scope.py in default_variable_creator_v2(next_creator, **kwargs)
2660 shape = kwargs.get("shape", None)
2661
-> 2662 return resource_variable_ops.ResourceVariable(
2663 initial_value=initial_value,
2664 trainable=trainable,
c:\users\kim\appdata\local\programs\python\python38\lib\site-packages\tensorflow\python\ops\variables.py in __call__(cls, *args, **kwargs)
268 return cls._variable_v2_call(*args, **kwargs)
269 else:
--> 270 return super(VariableMetaclass, cls).__call__(*args, **kwargs)
271
272
c:\users\kim\appdata\local\programs\python\python38\lib\site-packages\tensorflow\python\ops\resource_variable_ops.py in __init__(self, initial_value, trainable, collections, validate_shape, caching_device, name, dtype, variable_def, import_scope, constraint, distribute_strategy, synchronization, aggregation, shape)
1600 self._init_from_proto(variable_def, import_scope=import_scope)
1601 else:
-> 1602 self._init_from_args(
1603 initial_value=initial_value,
1604 trainable=trainable,
c:\users\kim\appdata\local\programs\python\python38\lib\site-packages\tensorflow\python\ops\resource_variable_ops.py in _init_from_args(self, initial_value, trainable, collections, caching_device, name, dtype, constraint, synchronization, aggregation, distribute_strategy, shape)
1743 self._update_uid = initial_value.checkpoint_position.restore_uid
1744 initial_value = initial_value.wrapped_value
-> 1745 initial_value = ops.convert_to_tensor(initial_value,
1746 name="initial_value",
1747 dtype=dtype)
c:\users\kim\appdata\local\programs\python\python38\lib\site-packages\tensorflow\python\profiler\trace.py in wrapped(*args, **kwargs)
161 with Trace(trace_name, **trace_kwargs):
162 return func(*args, **kwargs)
--> 163 return func(*args, **kwargs)
164
165 return wrapped
c:\users\kim\appdata\local\programs\python\python38\lib\site-packages\tensorflow\python\framework\ops.py in convert_to_tensor(value, dtype, name, as_ref, preferred_dtype, dtype_hint, ctx, accepted_result_types)
1564
1565 if ret is None:
-> 1566 ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
1567
1568 if ret is NotImplemented:
c:\users\kim\appdata\local\programs\python\python38\lib\site-packages\tensorflow\python\framework\tensor_conversion_registry.py in _default_conversion_function(***failed resolving arguments***)
50 def _default_conversion_function(value, dtype, name, as_ref):
51 del as_ref # Unused.
---> 52 return constant_op.constant(value, dtype, name=name)
53
54
c:\users\kim\appdata\local\programs\python\python38\lib\site-packages\tensorflow\python\framework\constant_op.py in constant(value, dtype, shape, name)
269 ValueError: if called on a symbolic tensor.
270 """
--> 271 return _constant_impl(value, dtype, shape, name, verify_shape=False,
272 allow_broadcast=True)
273
c:\users\kim\appdata\local\programs\python\python38\lib\site-packages\tensorflow\python\framework\constant_op.py in _constant_impl(value, dtype, shape, name, verify_shape, allow_broadcast)
281 with trace.Trace("tf.constant"):
282 return _constant_eager_impl(ctx, value, dtype, shape, verify_shape)
--> 283 return _constant_eager_impl(ctx, value, dtype, shape, verify_shape)
284
285 g = ops.get_default_graph()
c:\users\kim\appdata\local\programs\python\python38\lib\site-packages\tensorflow\python\framework\constant_op.py in _constant_eager_impl(ctx, value, dtype, shape, verify_shape)
306 def _constant_eager_impl(ctx, value, dtype, shape, verify_shape):
307 """Creates a constant on the current device."""
--> 308 t = convert_to_eager_tensor(value, ctx, dtype)
309 if shape is None:
310 return t
c:\users\kim\appdata\local\programs\python\python38\lib\site-packages\tensorflow\python\framework\constant_op.py in convert_to_eager_tensor(value, ctx, dtype)
103 except AttributeError:
104 dtype = dtypes.as_dtype(dtype).as_datatype_enum
--> 105 ctx.ensure_initialized()
106 return ops.EagerTensor(value, ctx.device_name, dtype)
107
c:\users\kim\appdata\local\programs\python\python38\lib\site-packages\tensorflow\python\eager\context.py in ensure_initialized(self)
534 opts = pywrap_tfe.TFE_NewContextOptions()
535 try:
--> 536 config_str = self.config.SerializeToString()
537 pywrap_tfe.TFE_ContextOptionsSetConfig(opts, config_str)
538 if self._device_policy is not None:
c:\users\kim\appdata\local\programs\python\python38\lib\site-packages\tensorflow\python\eager\context.py in config(self)
962 """Return the ConfigProto with all runtime deltas applied."""
963 # Ensure physical devices have been discovered and config has been imported
--> 964 self._initialize_physical_devices()
965
966 config = config_pb2.ConfigProto()
c:\users\kim\appdata\local\programs\python\python38\lib\site-packages\tensorflow\python\eager\context.py in _initialize_physical_devices(self, reinitialize)
1291 return
1292
-> 1293 devs = pywrap_tfe.TF_ListPhysicalDevices()
1294 self._physical_devices = [
1295 PhysicalDevice(name=d.decode(), device_type=d.decode().split(":")[1])
InternalError: Cannot dlopen all CUDA libraries.
How can I resolve it?
Okay so I tried a few things and after installing tensorflow-gpu it worked. Maybe it can help someone else with this problem as well:
pip install tensorflow-gpu

Tensorflow 2 -Probability: ValueError: Failed to convert a NumPy array to a Tensor (Unsupported numpy type: NPY_INT)

This is a mysterious error --to me-- that keeps propping up.
For a reproducible example, you can find the Jupyter Notebook here: https://github.com/CamDavidsonPilon/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers/blob/master/Chapter5_LossFunctions/Ch5_LossFunctions_TFP.ipynb) -- Chapter 5 (Loss Functions).
Conveniently, in this example, the data are artificial and constructed on the fly.
The part of the code that creates the problem is the following (I am running tensorflow 2):
# Code for creating artificial "dummy" data
# This is a common strategy for testing our models
# before applying it to real-world data
num_data = 100
X_data = (0.025 * tfd.Normal(loc=0.,scale=1.).sample(sample_shape=num_data))
Y_data = (0.5 * X_data + 0.01 * tfd.Normal(loc=0.,scale=1.).sample(sample_shape=num_data))
tf_var_data = tf.nn.moments(X_data, axes=0)[1]
covar = tfp.stats.covariance(X_data,Y_data, sample_axis=0, event_axis=None)
ls_coef = covar / tf_var_data
[
X_data_, Y_data_, ls_coef_,
] = [
X_data.numpy(), Y_data.numpy(), ls_coef.numpy(),
]
ls_intercept_ = Y_data_.mean() - ls_coef_ * X_data_.mean()
obs_stdev = tf.sqrt(
tf.reduce_mean(tf.math.squared_difference(Y_data_, tf.reduce_mean(Y_data_, axis=0)),
axis=0))
# Let's define the log probability of the bayesian regression function
def finance_posterior_log_prob(X_data_, Y_data_, alpha, beta, sigma):
"""
Our posterior log probability, as a function of states
Args:
alpha_: scalar, taken from state of the HMC
beta_: scalar, taken from state of the HMC
sigma_: scalar, the standard deviation of , taken from state of the HMC
Returns:
Scalar sum of log probabilities
Closure over: Y_data, X_data
"""
rv_std = tfd.Uniform(name="std", low=0., high=100.)
rv_beta = tfd.Normal(name="beta", loc=0., scale=100.)
rv_alpha = tfd.Normal(name="alpha", loc=0., scale=100.)
mean = alpha + beta * X_data_
rv_observed = tfd.Normal(name="obs", loc=mean, scale=sigma)
return (
rv_alpha.log_prob(alpha)
+ rv_beta.log_prob(beta)
+ rv_std.log_prob(sigma)
+ tf.reduce_sum(rv_observed.log_prob(Y_data_))
)
number_of_steps = 30000
burnin = 5000
# Set the chain's start state.
initial_chain_state = [
tf.cast(1.,dtype=tf.float32) * tf.ones([], name='init_alpha', dtype=tf.float32),
tf.cast(0.01,dtype=tf.float32) * tf.ones([], name='init_beta', dtype=tf.float32),
tf.cast(obs_stdev,dtype=tf.float32) * tf.ones([], name='init_sigma', dtype=tf.float32)
]
# Since HMC operates over unconstrained space, we need to transform the
# samples so they live in real-space.
# Beta and sigma are 100x and 10x of alpha, approximately, so apply Affine scalar bijector
# to multiply the unconstrained beta and sigma by 100x and 10x to get back to
# the problem space
unconstraining_bijectors = [
tfp.bijectors.Identity(), #alpha
tfp.bijectors.Shift(100.), #beta
tfp.bijectors.Scale(10.), #sigma
]
# Define a closure over our joint_log_prob.
unnormalized_posterior_log_prob = lambda *args: finance_posterior_log_prob(X_data_, Y_data_, *args)
step_size = 0.5
# Defining the HMC
kernel=tfp.mcmc.TransformedTransitionKernel(
inner_kernel=tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=unnormalized_posterior_log_prob,
num_leapfrog_steps=2,
step_size=step_size,
state_gradients_are_stopped=True),
bijector=unconstraining_bijectors)
kernel = tfp.mcmc.SimpleStepSizeAdaptation(
inner_kernel=kernel, num_adaptation_steps=int(burnin * 0.8))
# Sampling from the chain.
[
alpha,
beta,
sigma
], kernel_results = tfp.mcmc.sample_chain(
num_results = number_of_steps,
num_burnin_steps = burnin,
current_state=initial_chain_state,
kernel=kernel,
name='HMC_sampling'
)
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-63-b2e46a99062a> in <module>
21 current_state=initial_chain_state,
22 kernel=kernel,
---> 23 name='HMC_sampling'
24 )
25
~\Anaconda3\envs\tf2\lib\site-packages\tensorflow_probability\python\mcmc\sample.py in sample_chain(num_results, current_state, previous_kernel_results, kernel, num_burnin_steps, num_steps_between_results, trace_fn, return_final_kernel_results, parallel_iterations, name)
357 trace_fn(*state_and_results)),
358 # pylint: enable=g-long-lambda
--> 359 parallel_iterations=parallel_iterations)
360
361 if return_final_kernel_results:
~\Anaconda3\envs\tf2\lib\site-packages\tensorflow_probability\python\mcmc\internal\util.py in trace_scan(loop_fn, initial_state, elems, trace_fn, parallel_iterations, name)
393 body=_body,
394 loop_vars=(0, initial_state, trace_arrays),
--> 395 parallel_iterations=parallel_iterations)
396
397 stacked_trace = tf.nest.map_structure(lambda x: x.stack(), trace_arrays)
~\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\util\deprecation.py in new_func(*args, **kwargs)
572 func.__module__, arg_name, arg_value, 'in a future version'
573 if date is None else ('after %s' % date), instructions)
--> 574 return func(*args, **kwargs)
575
576 doc = _add_deprecated_arg_value_notice_to_docstring(
~\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\ops\control_flow_ops.py in while_loop_v2(cond, body, loop_vars, shape_invariants, parallel_iterations, back_prop, swap_memory, maximum_iterations, name)
2489 name=name,
2490 maximum_iterations=maximum_iterations,
-> 2491 return_same_structure=True)
2492
2493
~\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\ops\control_flow_ops.py in while_loop(cond, body, loop_vars, shape_invariants, parallel_iterations, back_prop, swap_memory, name, maximum_iterations, return_same_structure)
2725 list(loop_vars))
2726 while cond(*loop_vars):
-> 2727 loop_vars = body(*loop_vars)
2728 if try_to_pack and not isinstance(loop_vars, (list, _basetuple)):
2729 packed = True
~\Anaconda3\envs\tf2\lib\site-packages\tensorflow_probability\python\mcmc\internal\util.py in _body(i, state, trace_arrays)
382
383 def _body(i, state, trace_arrays):
--> 384 state = loop_fn(state, elems_array.read(i))
385 trace_arrays = tf.nest.pack_sequence_as(trace_arrays, [
386 a.write(i, v) for a, v in zip(
~\Anaconda3\envs\tf2\lib\site-packages\tensorflow_probability\python\mcmc\sample.py in _trace_scan_fn(state_and_results, num_steps)
341 body_fn=kernel.one_step,
342 initial_loop_vars=list(state_and_results),
--> 343 parallel_iterations=parallel_iterations)
344 return next_state, current_kernel_results
345
~\Anaconda3\envs\tf2\lib\site-packages\tensorflow_probability\python\mcmc\internal\util.py in smart_for_loop(loop_num_iter, body_fn, initial_loop_vars, parallel_iterations, name)
315 body=lambda i, *args: [i + 1] + list(body_fn(*args)),
316 loop_vars=[np.int32(0)] + initial_loop_vars,
--> 317 parallel_iterations=parallel_iterations
318 )[1:]
319 result = initial_loop_vars
~\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\util\deprecation.py in new_func(*args, **kwargs)
572 func.__module__, arg_name, arg_value, 'in a future version'
573 if date is None else ('after %s' % date), instructions)
--> 574 return func(*args, **kwargs)
575
576 doc = _add_deprecated_arg_value_notice_to_docstring(
~\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\ops\control_flow_ops.py in while_loop_v2(cond, body, loop_vars, shape_invariants, parallel_iterations, back_prop, swap_memory, maximum_iterations, name)
2489 name=name,
2490 maximum_iterations=maximum_iterations,
-> 2491 return_same_structure=True)
2492
2493
~\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\ops\control_flow_ops.py in while_loop(cond, body, loop_vars, shape_invariants, parallel_iterations, back_prop, swap_memory, name, maximum_iterations, return_same_structure)
2725 list(loop_vars))
2726 while cond(*loop_vars):
-> 2727 loop_vars = body(*loop_vars)
2728 if try_to_pack and not isinstance(loop_vars, (list, _basetuple)):
2729 packed = True
~\Anaconda3\envs\tf2\lib\site-packages\tensorflow_probability\python\mcmc\internal\util.py in <lambda>(i, *args)
313 return tf.while_loop(
314 cond=lambda i, *args: i < loop_num_iter,
--> 315 body=lambda i, *args: [i + 1] + list(body_fn(*args)),
316 loop_vars=[np.int32(0)] + initial_loop_vars,
317 parallel_iterations=parallel_iterations
~\Anaconda3\envs\tf2\lib\site-packages\tensorflow_probability\python\mcmc\simple_step_size_adaptation.py in one_step(self, current_state, previous_kernel_results)
378 reduced_log_accept_prob = reduce_logmeanexp(
379 log_accept_prob,
--> 380 axis=prefer_static.range(num_reduce_dims))
381 # reduced_log_accept_prob must broadcast into step_size_part on the
382 # left, so we do an additional reduction over dimensions where their
~\Anaconda3\envs\tf2\lib\site-packages\tensorflow_probability\python\math\generic.py in reduce_logmeanexp(input_tensor, axis, keepdims, name)
109 lse = tf.reduce_logsumexp(input_tensor, axis=axis, keepdims=keepdims)
110 n = prefer_static.size(input_tensor) // prefer_static.size(lse)
--> 111 log_n = tf.math.log(tf.cast(n, lse.dtype))
112 return lse - log_n
113
~\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\util\dispatch.py in wrapper(*args, **kwargs)
178 """Call target, and fall back on dispatchers if there is a TypeError."""
179 try:
--> 180 return target(*args, **kwargs)
181 except (TypeError, ValueError):
182 # Note: convert_to_eager_tensor currently raises a ValueError, not a
~\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\ops\math_ops.py in cast(x, dtype, name)
746 # allows some conversions that cast() can't do, e.g. casting numbers to
747 # strings.
--> 748 x = ops.convert_to_tensor(x, name="x")
749 if x.dtype.base_dtype != base_type:
750 x = gen_math_ops.cast(x, base_type, name=name)
~\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\framework\ops.py in convert_to_tensor(value, dtype, name, as_ref, preferred_dtype, dtype_hint, ctx, accepted_result_types)
1348
1349 if ret is None:
-> 1350 ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
1351
1352 if ret is NotImplemented:
~\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\framework\tensor_conversion_registry.py in _default_conversion_function(***failed resolving arguments***)
50 def _default_conversion_function(value, dtype, name, as_ref):
51 del as_ref # Unused.
---> 52 return constant_op.constant(value, dtype, name=name)
53
54
~\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\framework\constant_op.py in constant(value, dtype, shape, name)
256 """
257 return _constant_impl(value, dtype, shape, name, verify_shape=False,
--> 258 allow_broadcast=True)
259
260
~\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\framework\constant_op.py in _constant_impl(value, dtype, shape, name, verify_shape, allow_broadcast)
264 ctx = context.context()
265 if ctx.executing_eagerly():
--> 266 t = convert_to_eager_tensor(value, ctx, dtype)
267 if shape is None:
268 return t
~\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\framework\constant_op.py in convert_to_eager_tensor(value, ctx, dtype)
94 dtype = dtypes.as_dtype(dtype).as_datatype_enum
95 ctx.ensure_initialized()
---> 96 return ops.EagerTensor(value, ctx.device_name, dtype)
97
98
ValueError: Failed to convert a NumPy array to a Tensor (Unsupported numpy type: NPY_INT).
problem seems to come from
kernel = tfp.mcmc.SimpleStepSizeAdaptation(
inner_kernel=kernel, num_adaptation_steps=int(burnin * 0.8))
in another similar example, I got same error. if you skip this line, it works.

What could cause a type error during the build process of Keras Layer?

I was creating a custom layer based off the NALU paper, but when testing my code in Google Colab (tensorflow version 1.10.0) I got a type error. This did not hapen in my local Jupyter notebook(tensorflow cpu version 1.8.0).
The type error appears to occurring when adding a weight in the build function of Layer. When adding the weight a type error occurs when code tries to divide the Dimension by a float. I thought this might be an error with how Keras handles arguments, so I dropped the initializer. Even without the initializer, it is not possible to add a weight to the layer. I got the structure of my code from tensorflow's keras tutorial and I am wondering if there is anything that I need to do in order to update my code to work with the most recent version of tensorflow.
TypeError Traceback (most recent call last)
<ipython-input-7-55fb94a8f3b1> in <module>()
82 y_test = x_test[:, 0] * x_test[:, 1]
83
---> 84 model = nalu_model()
85
86 model.compile(optimizer='RMSProp',
<ipython-input-7-55fb94a8f3b1> in nalu_model()
48 def nalu_model():
49 inp = tf.keras.layers.Input(shape=(2,))
---> 50 out = NALU(1)(inp)
51
52 model = tf.keras.models.Model(inputs=inp, outputs=out)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/base_layer.py in __call__(self, inputs, *args, **kwargs)
726 if all(hasattr(x, 'shape') for x in input_list):
727 input_shapes = nest.map_structure(lambda x: x.shape, inputs)
--> 728 self.build(input_shapes)
729 self.built = True
730
<ipython-input-7-55fb94a8f3b1> in build(self, input_shape)
9 shape = tf.TensorShape((input_shape[1], self.num_outputs))
10 get = tf.keras.initializers.get
---> 11 self.W_ = self.add_variable("W_", shape=shape, initializer=get('glorot_uniform'))
12 self.M_ = self.add_variable("M_", shape=shape, initializer=get('glorot_uniform'))
13 self.GAM = self.add_variable("GAM", shape=shape, initializer=get('glorot_uniform')) # Gate add & multiply
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/base_layer.py in add_variable(self, *args, **kwargs)
459 def add_variable(self, *args, **kwargs):
460 """Alias for `add_weight`."""
--> 461 return self.add_weight(*args, **kwargs)
462
463 def add_weight(self,
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/base_layer.py in add_weight(self, name, shape, dtype, initializer, regularizer, trainable, constraint, partitioner, use_resource, synchronization, aggregation, getter)
563 use_resource=use_resource,
564 synchronization=synchronization,
--> 565 aggregation=aggregation)
566
567 if regularizer is not None:
/usr/local/lib/python3.6/dist-packages/tensorflow/python/training/checkpointable/base.py in _add_variable_with_custom_getter(self, name, shape, dtype, initializer, getter, overwrite, **kwargs_for_getter)
533 new_variable = getter(
534 name=name, shape=shape, dtype=dtype, initializer=initializer,
--> 535 **kwargs_for_getter)
536
537 # If we set an initializer and the variable processed it, tracking will not
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/base_layer.py in make_variable(name, shape, dtype, initializer, partition_info, trainable, caching_device, validate_shape, constraint, use_resource, synchronization, aggregation, partitioner)
1916 use_resource=use_resource,
1917 synchronization=synchronization,
-> 1918 aggregation=aggregation)
1919 return v
/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/variable_scope.py in variable(initial_value, trainable, collections, validate_shape, caching_device, name, dtype, constraint, use_resource, synchronization, aggregation)
2441 use_resource=use_resource,
2442 synchronization=synchronization,
-> 2443 aggregation=aggregation)
2444
2445
/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/variable_scope.py in <lambda>(**kwargs)
2423 synchronization=VariableSynchronization.AUTO,
2424 aggregation=VariableAggregation.NONE):
-> 2425 previous_getter = lambda **kwargs: default_variable_creator(None, **kwargs)
2426 for getter in ops.get_default_graph()._variable_creator_stack: # pylint: disable=protected-access
2427 previous_getter = _make_getter(getter, previous_getter)
/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/variable_scope.py in default_variable_creator(next_creator, **kwargs)
2393 collections=collections, validate_shape=validate_shape,
2394 caching_device=caching_device, name=name, dtype=dtype,
-> 2395 constraint=constraint)
2396 elif not use_resource and context.executing_eagerly():
2397 raise RuntimeError(
/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/resource_variable_ops.py in __init__(self, initial_value, trainable, collections, validate_shape, caching_device, name, dtype, variable_def, import_scope, constraint)
310 name=name,
311 dtype=dtype,
--> 312 constraint=constraint)
313
314 # pylint: disable=unused-argument
/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/resource_variable_ops.py in _init_from_args(self, initial_value, trainable, collections, validate_shape, caching_device, name, dtype, constraint)
415 with ops.name_scope("Initializer"), ops.device(None):
416 initial_value = ops.convert_to_tensor(
--> 417 initial_value(), name="initial_value", dtype=dtype)
418 self._handle = _eager_safe_variable_handle(
419 shape=initial_value.get_shape(),
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/base_layer.py in <lambda>()
1901 initializer = initializer(dtype=dtype)
1902 init_val = lambda: initializer( # pylint: disable=g-long-lambda
-> 1903 shape, dtype=dtype, partition_info=partition_info)
1904 variable_dtype = dtype.base_dtype
1905 if use_resource is None:
/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/init_ops.py in __call__(self, shape, dtype, partition_info)
474 scale /= max(1., fan_out)
475 else:
--> 476 scale /= max(1., (fan_in + fan_out) / 2.)
477 if self.distribution == "normal" or self.distribution == "truncated_normal":
478 # constant taken from scipy.stats.truncnorm.std(a=-2, b=2, loc=0., scale=1.)
TypeError: unsupported operand type(s) for /: 'Dimension' and 'float'
Code:
https://gist.github.com/Carliceiv/0b68fe0d9f2a9fc9a1b901e8a722f6cd
The stacktrace indicates that somewhere in the code a Dimension value is given as n argument where you usually expect an integer/float.
I think that this might be caused by this line
shape = tf.TensorShape((input_shape[1], self.num_outputs))
please try to change it to
shape = (input_shape[1], self.num_outputs)
Edit: As pointed out in the comments, this is not working of input_shape is a Dimension object and not an int. To solve this, use this modification:
shape = tf.TensorShape((input_shape[1], self.num_outputs)).as_list()
This will at first create a new TensorShape which is able to deal with the Dimension data type and then convert this to a list of integers.

How to solve this error in using TypeError: Expected int32, got <tf.Variable 'lstm_1_W_i:0' in Keras?

I am training a Vanilla RNN in Keras, using LSTM cells.
This is the shape of my training data. Basically 600 samples, each sample is a 2500 length vector with 100 features.
>>> training_data.shape
(600, 2500, 100)
Here is the start of my model definition in Keras.
rnn_inputs = Input(shape=(2500, 100), dtype='int32')
simple_rnn = LSTM(3, return_sequences=True,activation='relu')(rnn_inputs)
The first 'rnn_inputs' runs fine, but the second line, 'simple_rnn' raises this error:
TypeError: Expected int32, got <tf.Variable 'lstm_1_W_i:0' shape=(100, 3) dtype=float32_ref> of type 'Variable' instead.
The version of Keras I am using is 1.0.7 and tensorflow version is 1.5.0. I think it has something to do with the version of these libraries though, I updated and tried it and it doesn't work.
Trace
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-36-fe4504deb980> in <module>()
2 rnn_inputs = Input(shape=(2500, 100), dtype='float64')
3 print(rnn_inputs.dtype)
----> 4 simple_rnn = LSTM(3, return_sequences=True,activation='relu')(rnn_inputs)
5 #output = TimeDistributed(Dense(1, activation='sigmoid'))(simple_rnn)
6 #model = Model(inputs=rnn_inputs, outputs=output)
~/miniconda2/envs/py35/lib/python3.5/site-packages/keras/engine/topology.py in __call__(self, x, mask)
482 '`layer.build(batch_input_shape)`')
483 if len(input_shapes) == 1:
--> 484 self.build(input_shapes[0])
485 else:
486 self.build(input_shapes)
~/miniconda2/envs/py35/lib/python3.5/site-packages/keras/layers/recurrent.py in build(self, input_shape)
729 self.W_o, self.U_o, self.b_o]
730
--> 731 self.W = K.concatenate([self.W_i, self.W_f, self.W_c, self.W_o])
732 self.U = K.concatenate([self.U_i, self.U_f, self.U_c, self.U_o])
733 self.b = K.concatenate([self.b_i, self.b_f, self.b_c, self.b_o])
~/miniconda2/envs/py35/lib/python3.5/site-packages/keras/backend/tensorflow_backend.py in concatenate(tensors, axis)
667 else:
668 axis = 0
--> 669 return tf.concat(axis, tensors)
670
671
~/miniconda2/envs/py35/lib/python3.5/site-packages/tensorflow/python/ops/array_ops.py in concat(values, axis, name)
1125 ops.convert_to_tensor(
1126 axis, name="concat_dim",
-> 1127 dtype=dtypes.int32).get_shape().assert_is_compatible_with(
1128 tensor_shape.scalar())
1129 return identity(values[0], name=scope)
~/miniconda2/envs/py35/lib/python3.5/site-packages/tensorflow/python/framework/ops.py in convert_to_tensor(value, dtype, name, preferred_dtype)
930 name=name,
931 preferred_dtype=preferred_dtype,
--> 932 as_ref=False)
933
934
~/miniconda2/envs/py35/lib/python3.5/site-packages/tensorflow/python/framework/ops.py in internal_convert_to_tensor(value, dtype, name, as_ref, preferred_dtype, ctx)
1020
1021 if ret is None:
-> 1022 ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
1023
1024 if ret is NotImplemented:
~/miniconda2/envs/py35/lib/python3.5/site-packages/tensorflow/python/framework/constant_op.py in _constant_tensor_conversion_function(v, dtype, name, as_ref)
231 as_ref=False):
232 _ = as_ref
--> 233 return constant(v, dtype=dtype, name=name)
234
235
~/miniconda2/envs/py35/lib/python3.5/site-packages/tensorflow/python/framework/constant_op.py in constant(value, dtype, shape, name, verify_shape)
210 tensor_value.tensor.CopyFrom(
211 tensor_util.make_tensor_proto(
--> 212 value, dtype=dtype, shape=shape, verify_shape=verify_shape))
213 dtype_value = attr_value_pb2.AttrValue(type=tensor_value.tensor.dtype)
214 const_tensor = g.create_op(
~/miniconda2/envs/py35/lib/python3.5/site-packages/tensorflow/python/framework/tensor_util.py in make_tensor_proto(values, dtype, shape, verify_shape)
411 nparray = np.empty(shape, dtype=np_dt)
412 else:
--> 413 _AssertCompatible(values, dtype)
414 nparray = np.array(values, dtype=np_dt)
415 # check to them.
~/miniconda2/envs/py35/lib/python3.5/site-packages/tensorflow/python/framework/tensor_util.py in _AssertCompatible(values, dtype)
326 else:
327 raise TypeError("Expected %s, got %s of type '%s' instead." %
--> 328 (dtype.name, repr(mismatch), type(mismatch).__name__))
329
330
TypeError: Expected int32, got <tf.Variable 'lstm_7_W_i:0' shape=(100, 3) dtype=float32_ref> of type 'Variable' instead.
Any clue on what the issue is?

TypeError: Expected string, got list containing Tensors of type '_Message' instead

Python 3.6 on windows 7
(Jupyter Notebook)
I'm trying to load label csv file. but it's not working properly.
The csv file has been created already. (1 column, 570 rows)
I'v been googled to debug this problem, but i can't find
the exact case like this.
finally, I wanna train jpg files with this label file.
in model.py
imagename_queue = tf.train.string_input_producer(images, shuffle=False)
labelname_queue = tf.train.string_input_producer([csv_label], shuffle=False)
# print(type(labelname_queue))
image_reader = tf.WholeFileReader()
key_img, raw_img = image_reader.read(imagename_queue)
csv_reader = tf.TextLineReader()
key_txt, raw_txt = csv_reader.read(labelname_queue)
jpg_image = tf.image.decode_jpeg(raw_img)
csv_label = tf.decode_csv(raw_txt, record_defaults=[[0]])
jpg_image = tf.reduce_mean(jpg_image, axis=2)
jpg_image = tf.reshape(jpg_image, [W_img, H_img, 1])
I got this error
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-23-6c24000e9977> in <module>()
1 imagename_queue = tf.train.string_input_producer(images, shuffle=False)
----> 2 labelname_queue = tf.train.string_input_producer([csv_label], shuffle=False)
3
4 # print(type(labelname_queue))
5
C:\Anaconda3\lib\site-packages\tensorflow\python\training\input.py in string_input_producer(string_tensor, num_epochs, shuffle, seed, capacity, shared_name, name, cancel_op)
218
219 with ops.name_scope(name, "input_producer", [string_tensor]) as name:
--> 220 string_tensor = ops.convert_to_tensor(string_tensor, dtype=dtypes.string)
221 with ops.control_dependencies([
222 control_flow_ops.Assert(
C:\Anaconda3\lib\site-packages\tensorflow\python\framework\ops.py in convert_to_tensor(value, dtype, name, preferred_dtype)
609 name=name,
610 preferred_dtype=preferred_dtype,
--> 611 as_ref=False)
612
613
C:\Anaconda3\lib\site-packages\tensorflow\python\framework\ops.py in internal_convert_to_tensor(value, dtype, name, as_ref, preferred_dtype)
674
675 if ret is None:
--> 676 ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
677
678 if ret is NotImplemented:
C:\Anaconda3\lib\site-packages\tensorflow\python\framework\constant_op.py in _constant_tensor_conversion_function(v, dtype, name, as_ref)
119 as_ref=False):
120 _ = as_ref
--> 121 return constant(v, dtype=dtype, name=name)
122
123
C:\Anaconda3\lib\site-packages\tensorflow\python\framework\constant_op.py in constant(value, dtype, shape, name, verify_shape)
100 tensor_value = attr_value_pb2.AttrValue()
101 tensor_value.tensor.CopyFrom(
--> 102 tensor_util.make_tensor_proto(value, dtype=dtype, shape=shape, verify_shape=verify_shape))
103 dtype_value = attr_value_pb2.AttrValue(type=tensor_value.tensor.dtype)
104 const_tensor = g.create_op(
C:\Anaconda3\lib\site-packages\tensorflow\python\framework\tensor_util.py in make_tensor_proto(values, dtype, shape, verify_shape)
374 nparray = np.empty(shape, dtype=np_dt)
375 else:
--> 376 _AssertCompatible(values, dtype)
377 nparray = np.array(values, dtype=np_dt)
378 # check to them.
C:\Anaconda3\lib\site-packages\tensorflow\python\framework\tensor_util.py in _AssertCompatible(values, dtype)
300 else:
301 raise TypeError("Expected %s, got %s of type '%s' instead." %
--> 302 (dtype.name, repr(mismatch), type(mismatch).__name__))
303
304
TypeError: Expected string, got list containing Tensors of type '_Message' instead.
How can I solve this issue?

Categories