Related
I am getting multiple error while running my code can't able to understand it is because of dataset or some architecture issue.
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.layers import Dense,concatenate,Activation,Dropout,Input,LSTM,Flatten
from tensorflow.keras import regularizers,initializers,optimizers,Model
tokenizeressay = Tokenizer(oov_token="<OOV>")
# generate word indexes
tokenizeressay.fit_on_texts(X_train.essay)
# generate sequences
tokenisedessayTrain = tokenizeressay.texts_to_sequences(X_train.essay)
tokenisedessayTest = tokenizeressay.texts_to_sequences(X_test.essay)
tokenisedessayCV = tokenizeressay.texts_to_sequences(X_cv.essay)
tokenisedTrainessay = pad_sequences(tokenisedessayTrain,maxlen=350)
tokenisedTestessay = pad_sequences(tokenisedessayTest,maxlen=350)
tokenizedCvessay = pad_sequences(tokenisedessayCV,maxlen=350)
import pickle
with open("C:\\Users\\Administrator\\Downloads\\glove_vectors", 'rb') as f:
glove = pickle.load(f)
size_glove = 300 # glove vectors are 300 dims
size_vocab = len(list(tokenizeressay.word_counts.keys()))
word_Weights = np.zeros((size_vocab+1, size_glove))
for word, i in tokenizeressay.word_index.items():
embedding_vector = glove.get(word)
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
word_Weights[i-1] = embedding_vector
print("Max Length on Essay")
print(X_train.essay.apply(lambda x : len(x.split(' '))).max())
print("Word Item Length:")
print(len(tokenizeressay.word_index.items()))
print("Max Length we are taken will be 350")
Max Length on Essay
339
Word Item Length:
49330
Max Length we are taken will be 350
#LSTM and get the LSTM output and Flatten that output. ----> Accoridng to the assignment
# I have choose 128 Units
essayInput = Input(shape=(350,),dtype='int32',name='essayInput')
embeddedEssay = Embedding(input_dim=(len(tokenizeressay.word_index.items())),output_dim=300,name='embeddedEssay',weights=[word_Weights],trainable=False)(essayInput)
essayLSTM = LSTM(units=128, return_sequences=True)(embeddedEssay)
essayOut = Flatten()(essayLSTM)
from tensorflow.keras.layers import concatenate
concat_layer = concatenate(inputs=[essayOut],name="concat_layer")
from tensorflow.keras.layers import Dropout,BatchNormalization
AfterConcatLayer = Dense(256,activation='relu',kernel_regularizer=regularizers.l2(0.001),kernel_initializer=initializers.he_normal())(concat_layer)
AfterConcatLayer = Dropout(0.5)(AfterConcatLayer)
AfterConcatLayer = Dense(128,activation='relu',kernel_regularizer=regularizers.l2(0.001),kernel_initializer=initializers.he_normal())(AfterConcatLayer)
AfterConcatLayer = Dropout(0.5)(AfterConcatLayer)
AfterConcatLayer = BatchNormalization()(AfterConcatLayer)
AfterConcatLayer = Dense(64,activation='relu',kernel_regularizer=regularizers.l2(0.001),kernel_initializer=initializers.he_normal())(AfterConcatLayer)
AfterConcatLayer = Dropout(0.5)(AfterConcatLayer)
SoftmaxOutput = Dense(2, activation = 'softmax')(AfterConcatLayer)
model1 = Model([essayInput], SoftmaxOutput)
model1.compile(loss='categorical_crossentropy', optimizer=optimizers.Adam(lr=0.0006,decay = 1e-4),metrics=[auc])
print(model1.summary())
checkpoint_path ="C:\\Windows_Old\\Learnings\\MachineLearning\\CheckPoints.hdf5"
cp_callback = ModelCheckpoint(filepath=checkpoint_path,save_best_only=True, save_weights_only=True, verbose=1,monitor='val_auc')
#Delete THIS CODE
import os
from keras.utils import to_categorical
if os.path.isfile(checkpoint_path):
model1.load_weights(checkpoint_path)
#essayInput,SchoolStateInput,TeacherPrefixInput,CleanCategoriesInput,CleanSubCategoriesInput,ProjectGradeInput,PriceProjectNumberInput
model1.fit([tokenisedTrainessay],to_categorical(y_train), epochs=50,verbose=2,batch_size=512,validation_split=0.3,callbacks = [cp_callback])
I am using DonorChoose Dataset. You can download the same from this Link
Error I am getting is:
InvalidArgumentError Traceback (most recent call last)
Input In [23], in <cell line: 10>()
7 model1.load_weights(checkpoint_path)
8 #essayInput,SchoolStateInput,TeacherPrefixInput,CleanCategoriesInput,CleanSubCategoriesInput,ProjectGradeInput,PriceProjectNumberInput
---> 10 model1.fit([tokenisedTrainessay],to_categorical(y_train), epochs=50,verbose=2,batch_size=512,validation_split=0.3,callbacks = [cp_callback])
File C:\ProgramData\Anaconda3\lib\site-packages\keras\utils\traceback_utils.py:70, in filter_traceback.<locals>.error_handler(*args, **kwargs)
67 filtered_tb = _process_traceback_frames(e.__traceback__)
68 # To get the full stack trace, call:
69 # `tf.debugging.disable_traceback_filtering()`
---> 70 raise e.with_traceback(filtered_tb) from None
71 finally:
72 del filtered_tb
File C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\eager\execute.py:54, in quick_execute(op_name, num_outputs, inputs, attrs, ctx, name)
52 try:
53 ctx.ensure_initialized()
---> 54 tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,
55 inputs, attrs, num_outputs)
56 except core._NotOkStatusException as e:
57 if name is not None:
Different Error: Invalid Argument Error
InvalidArgumentError: Graph execution error:
Detected at node 'model/embeddedEssay/embedding_lookup' defined at (most recent call last):
File "C:\ProgramData\Anaconda3\lib\runpy.py", line 197, in _run_module_as_main
return _run_code(code, main_globals, None,
File "C:\ProgramData\Anaconda3\lib\runpy.py", line 87, in _run_code
exec(code, run_globals)
File "C:\ProgramData\Anaconda3\lib\site-packages\ipykernel_launcher.py", line 16, in <module>
app.launch_new_instance()
File "C:\ProgramData\Anaconda3\lib\site-packages\traitlets\config\application.py", line 846, in launch_instance
app.start()
File "C:\ProgramData\Anaconda3\lib\site-packages\ipykernel\kernelapp.py", line 677, in start
self.io_loop.start()
File "C:\ProgramData\Anaconda3\lib\site-packages\tornado\platform\asyncio.py", line 199, in start
self.asyncio_loop.run_forever()
File "C:\ProgramData\Anaconda3\lib\asyncio\base_events.py", line 601, in run_forever
self._run_once()
File "C:\ProgramData\Anaconda3\lib\asyncio\base_events.py", line 1905, in _run_once
handle._run()
File "C:\ProgramData\Anaconda3\lib\asyncio\events.py", line 80, in _run
self._context.run(self._callback, *self._args)
File "C:\ProgramData\Anaconda3\lib\site-packages\ipykernel\kernelbase.py", line 471, in dispatch_queue
await self.process_one()
File "C:\ProgramData\Anaconda3\lib\site-packages\ipykernel\kernelbase.py", line 460, in process_one
await dispatch(*args)
File "C:\ProgramData\Anaconda3\lib\site-packages\ipykernel\kernelbase.py", line 367, in dispatch_shell
await result
File "C:\ProgramData\Anaconda3\lib\site-packages\ipykernel\kernelbase.py", line 662, in execute_request
reply_content = await reply_content
File "C:\ProgramData\Anaconda3\lib\site-packages\ipykernel\ipkernel.py", line 360, in do_execute
res = shell.run_cell(code, store_history=store_history, silent=silent)
File "C:\ProgramData\Anaconda3\lib\site-packages\ipykernel\zmqshell.py", line 532, in run_cell
return super().run_cell(*args, **kwargs)
File "C:\ProgramData\Anaconda3\lib\site-packages\IPython\core\interactiveshell.py", line 2863, in run_cell
result = self._run_cell(
File "C:\ProgramData\Anaconda3\lib\site-packages\IPython\core\interactiveshell.py", line 2909, in _run_cell
return runner(coro)
File "C:\ProgramData\Anaconda3\lib\site-packages\IPython\core\async_helpers.py", line 129, in _pseudo_sync_runner
coro.send(None)
File "C:\ProgramData\Anaconda3\lib\site-packages\IPython\core\interactiveshell.py", line 3106, in run_cell_async
has_raised = await self.run_ast_nodes(code_ast.body, cell_name,
File "C:\ProgramData\Anaconda3\lib\site-packages\IPython\core\interactiveshell.py", line 3309, in run_ast_nodes
if await self.run_code(code, result, async_=asy):
File "C:\ProgramData\Anaconda3\lib\site-packages\IPython\core\interactiveshell.py", line 3369, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "C:\Users\Administrator\AppData\Local\Temp\2\ipykernel_4140\2952872782.py", line 10, in <cell line: 10>
model1.fit([tokenisedTrainessay],to_categorical(y_train), epochs=50,verbose=2,batch_size=512,validation_split=0.3,callbacks = [cp_callback])
File "C:\ProgramData\Anaconda3\lib\site-packages\keras\utils\traceback_utils.py", line 65, in error_handler
return fn(*args, **kwargs)
File "C:\ProgramData\Anaconda3\lib\site-packages\keras\engine\training.py", line 1606, in fit
val_logs = self.evaluate(
File "C:\ProgramData\Anaconda3\lib\site-packages\keras\utils\traceback_utils.py", line 65, in error_handler
return fn(*args, **kwargs)
File "C:\ProgramData\Anaconda3\lib\site-packages\keras\engine\training.py", line 1947, in evaluate
tmp_logs = self.test_function(iterator)
File "C:\ProgramData\Anaconda3\lib\site-packages\keras\engine\training.py", line 1727, in test_function
return step_function(self, iterator)
File "C:\ProgramData\Anaconda3\lib\site-packages\keras\engine\training.py", line 1713, in step_function
outputs = model.distribute_strategy.run(run_step, args=(data,))
File "C:\ProgramData\Anaconda3\lib\site-packages\keras\engine\training.py", line 1701, in run_step
outputs = model.test_step(data)
File "C:\ProgramData\Anaconda3\lib\site-packages\keras\engine\training.py", line 1665, in test_step
y_pred = self(x, training=False)
File "C:\ProgramData\Anaconda3\lib\site-packages\keras\utils\traceback_utils.py", line 65, in error_handler
return fn(*args, **kwargs)
File "C:\ProgramData\Anaconda3\lib\site-packages\keras\engine\training.py", line 557, in __call__
return super().__call__(*args, **kwargs)
File "C:\ProgramData\Anaconda3\lib\site-packages\keras\utils\traceback_utils.py", line 65, in error_handler
return fn(*args, **kwargs)
File "C:\ProgramData\Anaconda3\lib\site-packages\keras\engine\base_layer.py", line 1097, in __call__
outputs = call_fn(inputs, *args, **kwargs)
File "C:\ProgramData\Anaconda3\lib\site-packages\keras\utils\traceback_utils.py", line 96, in error_handler
return fn(*args, **kwargs)
File "C:\ProgramData\Anaconda3\lib\site-packages\keras\engine\functional.py", line 510, in call
return self._run_internal_graph(inputs, training=training, mask=mask)
File "C:\ProgramData\Anaconda3\lib\site-packages\keras\engine\functional.py", line 667, in _run_internal_graph
outputs = node.layer(*args, **kwargs)
File "C:\ProgramData\Anaconda3\lib\site-packages\keras\utils\traceback_utils.py", line 65, in error_handler
return fn(*args, **kwargs)
File "C:\ProgramData\Anaconda3\lib\site-packages\keras\engine\base_layer.py", line 1097, in __call__
outputs = call_fn(inputs, *args, **kwargs)
File "C:\ProgramData\Anaconda3\lib\site-packages\keras\utils\traceback_utils.py", line 96, in error_handler
return fn(*args, **kwargs)
File "C:\ProgramData\Anaconda3\lib\site-packages\keras\layers\core\embedding.py", line 208, in call
out = tf.nn.embedding_lookup(self.embeddings, inputs)
Node: 'model/embeddedEssay/embedding_lookup'
indices[413,323] = 49330 is not in [0, 49330)
[[{{node model/embeddedEssay/embedding_lookup}}]] [Op:__inference_test_function_5617]
I am kinda stuck in this from last three days.
I am have tries to increase the dim by 1 but failed.
Code Snippet attached
essayInput = Input(shape=(350,),dtype='int32',name='essayInput')
embeddedEssay = Embedding(input_dim=(len(tokenizeressay.word_index.items())+1),output_dim=300,name='embeddedEssay',weights=[word_Weights],trainable=False)(essayInput)
essayLSTM = LSTM(units=128, return_sequences=True)(embeddedEssay)
essayOut = Flatten()(essayLSTM)
Error I am getting while doing this.
ValueError Traceback (most recent call last)
Input In [25], in <cell line: 5>()
1 #LSTM and get the LSTM output and Flatten that output. ----> Accoridng to the assignment
2 # I have choose 128 Units
4 essayInput = Input(shape=(350,),dtype='int32',name='essayInput')
----> 5 embeddedEssay = Embedding(input_dim=(len(tokenizeressay.word_index.items())+1),output_dim=300,name='embeddedEssay',weights=[word_Weights],trainable=False)(essayInput)
6 essayLSTM = LSTM(units=128, return_sequences=True)(embeddedEssay)
7 essayOut = Flatten()(essayLSTM)
File C:\ProgramData\Anaconda3\lib\site-packages\keras\utils\traceback_utils.py:70, in filter_traceback.<locals>.error_handler(*args, **kwargs)
67 filtered_tb = _process_traceback_frames(e.__traceback__)
68 # To get the full stack trace, call:
69 # `tf.debugging.disable_traceback_filtering()`
---> 70 raise e.with_traceback(filtered_tb) from None
71 finally:
72 del filtered_tb
File C:\ProgramData\Anaconda3\lib\site-packages\keras\engine\base_layer.py:1772, in Layer.set_weights(self, weights)
1770 ref_shape = param.shape
1771 if not ref_shape.is_compatible_with(weight_shape):
-> 1772 raise ValueError(
1773 f"Layer {self.name} weight shape {ref_shape} "
1774 "is not compatible with provided weight "
1775 f"shape {weight_shape}."
1776 )
1777 weight_value_tuples.append((param, weight))
1778 weight_index += 1
ValueError: Layer embeddedEssay weight shape (49331, 300) is not compatible with provided weight shape (49330, 300).
As stated in the error,
ValueError: Layer embeddedEssay weight shape (49331, 300) is not compatible with provided weight shape (49330, 300).
The error has occured due to shape mismatch, hence try increasing the input dimension of the Embedding layer like below.
embeddedEssay = Embedding(input_dim=(len(tokenizeressay.word_index.items()))+1,output_dim=300,name='embeddedEssay',weights=[word_Weights],trainable=False)(essayInput)
Also increase the vocabulary size as follows in order to avoid the error.
size_vocab = len(list(tokenizeressay.word_counts.keys()))+1
Kindly refer to this gist for the complete code and this example for more information on the error. Thank you!
I'm trying to fine tune resnet50 in half precision mode without success. It seems there are parts of model which are not compatible with float16. Here is my code:
dtype='float16'
K.set_floatx(dtype)
K.set_epsilon(1e-4)
model = Sequential()
model.add(ResNet50(weights='imagenet', include_top=False, pooling='avg'))
and I get this error:
Traceback (most recent call last):
File "train_resnet.py", line 40, in <module>
model.add(ResNet50(weights='imagenet', include_top=False, pooling='avg'))
File "/usr/local/lib/python3.6/dist-packages/keras/applications/__init__.py", line 28, in wrapper
return base_fun(*args, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/keras/applications/resnet50.py", line 11, in ResNet50
return resnet50.ResNet50(*args, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/keras_applications/resnet50.py", line 231, in ResNet50
x = layers.BatchNormalization(axis=bn_axis, name='bn_conv1')(x)
File "/usr/local/lib/python3.6/dist-packages/keras/engine/base_layer.py", line 457, in __call__
output = self.call(inputs, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/keras/layers/normalization.py", line 185, in call
epsilon=self.epsilon)
File "/usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py", line 1864, in normalize_batch_in_training
epsilon=epsilon)
File "/usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py", line 1839, in _fused_normalize_batch_in_training
data_format=tf_data_format)
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/nn_impl.py", line 1329, in fused_batch_norm
name=name)
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/gen_nn_ops.py", line 4488, in fused_batch_norm_v2
name=name)
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/op_def_library.py", line 626, in _apply_op_helper
param_name=input_name)
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/op_def_library.py", line 60, in _SatisfiesTypeConstraint
", ".join(dtypes.as_dtype(x).name for x in allowed_list)))
TypeError: Value passed to parameter 'scale' has DataType float16 not in list of allowed values: float32
This was a reported bug and upgrading to Keras==2.2.5 solved the issue.
Let me start from the beggining. I'm implementing in tensorflow 1.14 a partial convolution layer for image inpainting based on the not official Keras implementation (I already test it and it works on my dataset).
This architecture uses a pretrained (imagenet) VGG16 to compute some loss terms. Sadly, a VGG implemented in tensorflow didn't worked (I've tried with this one), as the one in keras application. Therefore, I used this class to incorporate the keras application VGG16 into my tensorflow 1.14 code.
Everything was working fine but then I incorporate Mixed Precision Training (documentation) into my code and the VGG16 part gave the following error:
Instructions for updating:
Call initializer instance with the dtype argument instead of passing it to the constructor
ERROR:tensorflow:==================================
Object was never used (type <class 'tensorflow.python.framework.ops.Tensor'>):
<tf.Tensor 'VGG16/model/IsVariableInitialized_3:0' shape=() dtype=bool>
If you want to mark it as used call its "mark_used()" method.
It was originally created here:
File "main.py", line 131, in <module>
psi_gt, psi_out, psi_comp, I_comp, layers = model.build_vgg(data_gt, unet_pconv,
data_mask) File "/workspace/model.py", line 52, in build_vgg
vgg = vgg16.VGG16(image_shape=gt.shape, input_tensor=gt) File "/workspace/vgg.py", line
17, in __init__
self._build_graph(input_tensor) File "/workspace/vgg.py", line 35, in _build_graph
self.vgg16 = tf.keras.applications.VGG16(weights='imagenet', include_top=False,
input_tensor=img) File "/usr/local/lib/python3.6/dist-
packages/tensorflow/python/keras/applications/__init__.py", line 70, in wrapper
return base_fun(*args, **kwargs) File "/usr/local/lib/python3.6/dist-
packages/tensorflow/python/keras/applications/vgg16.py", line 32, in VGG16
return vgg16.VGG16(*args, **kwargs) File "/usr/local/lib/python3.6/dist-
packages/keras_applications/vgg16.py", line 210, in VGG16
model.load_weights(weights_path) File "/usr/local/lib/python3.6/dist-
packages/tensorflow/python/keras/engine/training.py", line 162, in load_weights
return super(Model, self).load_weights(filepath, by_name) File
"/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/network.py", line
1424, in load_weights
saving.load_weights_from_hdf5_group(f, self.layers) File "/usr/local/lib/python3.6/dist-
packages/tensorflow/python/keras/saving/hdf5_format.py", line 759, in
load_weights_from_hdf5_group
K.batch_set_value(weight_value_tuples) File "/usr/local/lib/python3.6/dist-
packages/tensorflow/python/keras/backend.py", line 3071, in batch_set_value
get_session().run(assign_ops, feed_dict=feed_dict) File "/usr/local/lib/python3.6/dist-
packages/tensorflow/python/keras/backend.py", line 462, in get_session
_initialize_variables(session) File "/usr/local/lib/python3.6/dist-
packages/tensorflow/python/keras/backend.py", line 879, in _initialize_variables
[variables_module.is_variable_initialized(v) for v in candidate_vars]) File
"/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/backend.py", line 879, in
<listcomp>
[variables_module.is_variable_initialized(v) for v in candidate_vars]) File
"/usr/local/lib/python3.6/dist-packages/tensorflow/python/util/tf_should_use.py", line 193,
in wrapped
return _add_should_use_warning(fn(*args, **kwargs))
==================================
ERROR:tensorflow:==================================
Object was never used (type <class 'tensorflow.python.framework.ops.Tensor'>):
<tf.Tensor 'VGG16/model/IsVariableInitialized_2:0' shape=() dtype=bool>
If you want to mark it as used call its "mark_used()" method.
It was originally created here:
File "main.py", line 131, in <module>
psi_gt, psi_out, psi_comp, I_comp, layers = model.build_vgg(data_gt, unet_pconv, data_mask)
File "/workspace/model.py", line 52, in build_vgg
vgg = vgg16.VGG16(image_shape=gt.shape, input_tensor=gt) File "/workspace/vgg.py", line 17,
in __init__
self._build_graph(input_tensor) File "/workspace/vgg.py", line 35, in _build_graph
self.vgg16 = tf.keras.applications.VGG16(weights='imagenet', include_top=False,
input_tensor=img) File "/usr/local/lib/python3.6/dist-
packages/tensorflow/python/keras/applications/__init__.py", line 70, in wrapper
return base_fun(*args, **kwargs) File "/usr/local/lib/python3.6/dist-
packages/tensorflow/python/keras/applications/vgg16.py", line 32, in VGG16
return vgg16.VGG16(*args, **kwargs) File "/usr/local/lib/python3.6/dist-
packages/keras_applications/vgg16.py", line 210, in VGG16
model.load_weights(weights_path) File "/usr/local/lib/python3.6/dist-
packages/tensorflow/python/keras/engine/training.py", line 162, in load_weights
return super(Model, self).load_weights(filepath, by_name) File
"/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/network.py", line
1424, in load_weights
saving.load_weights_from_hdf5_group(f, self.layers) File "/usr/local/lib/python3.6/dist-
packages/tensorflow/python/keras/saving/hdf5_format.py", line 759, in
load_weights_from_hdf5_group
K.batch_set_value(weight_value_tuples) File "/usr/local/lib/python3.6/dist-
packages/tensorflow/python/keras/backend.py", line 3071, in batch_set_value
get_session().run(assign_ops, feed_dict=feed_dict) File "/usr/local/lib/python3.6/dist-
packages/tensorflow/python/keras/backend.py", line 462, in get_session
_initialize_variables(session) File "/usr/local/lib/python3.6/dist-
packages/tensorflow/python/keras/backend.py", line 879, in _initialize_variables
[variables_module.is_variable_initialized(v) for v in candidate_vars]) File
"/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/backend.py", line 879, in
<listcomp>
[variables_module.is_variable_initialized(v) for v in candidate_vars]) File
"/usr/local/lib/python3.6/dist-packages/tensorflow/python/util/tf_should_use.py", line 193,
in wrapped
return _add_should_use_warning(fn(*args, **kwargs))
==================================
ERROR:tensorflow:==================================
Object was never used (type <class 'tensorflow.python.framework.ops.Tensor'>):
<tf.Tensor 'VGG16/model/IsVariableInitialized_1:0' shape=() dtype=bool>
If you want to mark it as used call its "mark_used()" method.
It was originally created here:
File "main.py", line 131, in <module>
psi_gt, psi_out, psi_comp, I_comp, layers = model.build_vgg(data_gt, unet_pconv, data_mask)
File "/workspace/model.py", line 52, in build_vgg
vgg = vgg16.VGG16(image_shape=gt.shape, input_tensor=gt) File "/workspace/vgg.py", line 17,
in __init__
self._build_graph(input_tensor) File "/workspace/vgg.py", line 35, in _build_graph
self.vgg16 = tf.keras.applications.VGG16(weights='imagenet', include_top=False,
input_tensor=img) File "/usr/local/lib/python3.6/dist-
packages/tensorflow/python/keras/applications/__init__.py", line 70, in wrapper
return base_fun(*args, **kwargs) File "/usr/local/lib/python3.6/dist-
packages/tensorflow/python/keras/applications/vgg16.py", line 32, in VGG16
return vgg16.VGG16(*args, **kwargs) File "/usr/local/lib/python3.6/dist-
packages/keras_applications/vgg16.py", line 210, in VGG16
model.load_weights(weights_path) File "/usr/local/lib/python3.6/dist-
packages/tensorflow/python/keras/engine/training.py", line 162, in load_weights
return super(Model, self).load_weights(filepath, by_name) File
"/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/network.py", line
1424, in load_weights
saving.load_weights_from_hdf5_group(f, self.layers) File "/usr/local/lib/python3.6/dist-
packages/tensorflow/python/keras/saving/hdf5_format.py", line 759, in
load_weights_from_hdf5_group
K.batch_set_value(weight_value_tuples) File "/usr/local/lib/python3.6/dist-
packages/tensorflow/python/keras/backend.py", line 3071, in batch_set_value
get_session().run(assign_ops, feed_dict=feed_dict) File "/usr/local/lib/python3.6/dist-
packages/tensorflow/python/keras/backend.py", line 462, in get_session
_initialize_variables(session) File "/usr/local/lib/python3.6/dist-
packages/tensorflow/python/keras/backend.py", line 879, in _initialize_variables
[variables_module.is_variable_initialized(v) for v in candidate_vars]) File
"/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/backend.py", line 879, in
<listcomp>
[variables_module.is_variable_initialized(v) for v in candidate_vars]) File
"/usr/local/lib/python3.6/dist-packages/tensorflow/python/util/tf_should_use.py", line 193,
in wrapped
return _add_should_use_warning(fn(*args, **kwargs))
==================================
ERROR:tensorflow:==================================
Object was never used (type <class 'tensorflow.python.framework.ops.Tensor'>):
<tf.Tensor 'VGG16/model/IsVariableInitialized:0' shape=() dtype=bool>
If you want to mark it as used call its "mark_used()" method.
It was originally created here:
File "main.py", line 131, in <module>
psi_gt, psi_out, psi_comp, I_comp, layers = model.build_vgg(data_gt, unet_pconv, data_mask)
File "/workspace/model.py", line 52, in build_vgg
vgg = vgg16.VGG16(image_shape=gt.shape, input_tensor=gt) File "/workspace/vgg.py", line 17,
in __init__
self._build_graph(input_tensor) File "/workspace/vgg.py", line 35, in _build_graph
self.vgg16 = tf.keras.applications.VGG16(weights='imagenet', include_top=False,
input_tensor=img) File "/usr/local/lib/python3.6/dist-
packages/tensorflow/python/keras/applications/__init__.py", line 70, in wrapper
return base_fun(*args, **kwargs) File "/usr/local/lib/python3.6/dist-
packages/tensorflow/python/keras/applications/vgg16.py", line 32, in VGG16
return vgg16.VGG16(*args, **kwargs) File "/usr/local/lib/python3.6/dist-
packages/keras_applications/vgg16.py", line 210, in VGG16
model.load_weights(weights_path) File "/usr/local/lib/python3.6/dist-
packages/tensorflow/python/keras/engine/training.py", line 162, in load_weights
return super(Model, self).load_weights(filepath, by_name) File
"/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/network.py", line
1424, in load_weights
saving.load_weights_from_hdf5_group(f, self.layers) File "/usr/local/lib/python3.6/dist-
packages/tensorflow/python/keras/saving/hdf5_format.py", line 759, in
load_weights_from_hdf5_group
K.batch_set_value(weight_value_tuples) File "/usr/local/lib/python3.6/dist-
packages/tensorflow/python/keras/backend.py", line 3071, in batch_set_value
get_session().run(assign_ops, feed_dict=feed_dict) File "/usr/local/lib/python3.6/dist-
packages/tensorflow/python/keras/backend.py", line 462, in get_session
_initialize_variables(session) File "/usr/local/lib/python3.6/dist-
packages/tensorflow/python/keras/backend.py", line 879, in _initialize_variables
[variables_module.is_variable_initialized(v) for v in candidate_vars]) File
"/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/backend.py", line 879, in
<listcomp>
[variables_module.is_variable_initialized(v) for v in candidate_vars]) File
"/usr/local/lib/python3.6/dist-packages/tensorflow/python/util/tf_should_use.py", line 193,
in wrapped
return _add_should_use_warning(fn(*args, **kwargs))
==================================
Traceback (most recent call last):
File "main.py", line 131, in <module>
psi_gt, psi_out, psi_comp, I_comp, layers = model.build_vgg(data_gt, unet_pconv, data_mask)
File "/workspace/model.py", line 52, in build_vgg
vgg = vgg16.VGG16(image_shape=gt.shape, input_tensor=gt)
File "/workspace/vgg.py", line 17, in __init__
self._build_graph(input_tensor)
File "/workspace/vgg.py", line 35, in _build_graph
self.vgg16 = tf.keras.applications.VGG16(weights='imagenet', include_top=False,
input_tensor=img)
File "/usr/local/lib/python3.6/dist-
packages/tensorflow/python/keras/applications/__init__.py", line 70, in wrapper
return base_fun(*args, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/applications/vgg16.py", line 32, in VGG16
return vgg16.VGG16(*args, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/keras_applications/vgg16.py", line 210, in VGG16
model.load_weights(weights_path)
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/training.py", line 162, in load_weights
return super(Model, self).load_weights(filepath, by_name)
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/network.py", line 1424, in load_weights
saving.load_weights_from_hdf5_group(f, self.layers)
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/saving/hdf5_format.py", line 759, in load_weights_from_hdf5_group
K.batch_set_value(weight_value_tuples)
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/backend.py", line 3071, in batch_set_value
get_session().run(assign_ops, feed_dict=feed_dict)
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/backend.py", line 462, in get_session
_initialize_variables(session)
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/backend.py", line 879, in _initialize_variables
[variables_module.is_variable_initialized(v) for v in candidate_vars])
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/backend.py", line 879, in <listcomp>
[variables_module.is_variable_initialized(v) for v in candidate_vars])
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/util/tf_should_use.py", line 193, in wrapped
return _add_should_use_warning(fn(*args, **kwargs))
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/variables.py", line 3083, in is_variable_initialized
return state_ops.is_variable_initialized(variable)
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/state_ops.py", line 133, in is_variable_initialized
return ref.is_initialized(name=name)
AttributeError: 'Tensor' object has no attribute 'is_initialized'
For mixed precision training I'm using an nvidia docker.
The VGG16 it's being used here to obtain the feature map of 3 images:
def build_vgg(gt, y_pred, mask):
vgg_layer = ['block1_pool', 'block2_pool', 'block3_pool']
vgg = vgg16.VGG16(image_shape=gt.shape, input_tensor=gt)
psi_gt = {}
psi_gt[vgg_layer[0]] = tf.identity(vgg[vgg_layer[0]], name='gt_vgg0')
psi_gt[vgg_layer[1]] = tf.identity(vgg[vgg_layer[1]], name='gt_vgg1')
psi_gt[vgg_layer[2]] = tf.identity(vgg[vgg_layer[2]], name='gt_vgg2')
vgg = vgg16.VGG16(image_shape=y_pred.shape, input_tensor=y_pred)
psi_out = {}
psi_out[vgg_layer[0]] = tf.identity(vgg[vgg_layer[0]], name='out_vgg0')
psi_out[vgg_layer[1]] = tf.identity(vgg[vgg_layer[1]], name='out_vgg1')
psi_out[vgg_layer[2]] = tf.identity(vgg[vgg_layer[2]], name='out_vgg2')
I_comp = (mask * gt) + ((1-mask) * y_pred)
vgg = vgg16.VGG16(image_shape=I_comp.shape, input_tensor=I_comp)
psi_comp = {}
psi_comp[vgg_layer[0]] = tf.identity(vgg[vgg_layer[0]], name='comp_vgg0')
psi_comp[vgg_layer[1]] = tf.identity(vgg[vgg_layer[1]], name='comp_vgg1')
psi_comp[vgg_layer[2]] = tf.identity(vgg[vgg_layer[2]], name='comp_vgg2')
return psi_gt, psi_out, psi_comp, I_comp, vgg_layer
The previous function it's used in the main script:
import tensorflow as tf
import PConv
import model
import layers
import math
import os
import data
import utils
import numpy as np
import datetime
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# Mixed precision training variable storage
def float32_variable_storage_getter(getter, name, shape=None, dtype=None,
initializer=None, regularizer=None,
trainable=True, *args, **kwargs):
storage_dtype = tf.float32 if trainable else dtype
variable = getter(name, shape, dtype=storage_dtype,
initializer=initializer, regularizer=regularizer,
trainable=trainable, *args, **kwargs)
if trainable and dtype != tf.float32:
variable = tf.cast(variable, dtype)
return variable
# ==============================================================================
# SETTINGS
# ==============================================================================
path_ =''
batch_size = 16
best_val = math.inf
best_val_epoch = 0
patience = 0
stop = 300
epochs = 2000
steps_train = 25
steps_val = 8
template = '{}, Epoch {}, train_loss: {:.4f} - val_loss: {:.4f}'
path = path_ + 'tmp/'
if not os.path.isdir(path):
os.mkdir(path)
# ==============================================================================
# DATA
# ==============================================================================
X_train, m_train, y_train = data.get_filenames()
X_val, m_val, y_val = data.get_filenames(train=False)
# ==============================================================================
# DATASET
# ==============================================================================
train_dataset = tf.data.Dataset.from_tensor_slices((X_train, m_train, y_train))#(images, mask, gt))
train_dataset = train_dataset.map(data.load, num_parallel_calls=tf.data.experimental.AUTOTUNE)
train_dataset = train_dataset.batch(batch_size)
train_dataset = train_dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
val_dataset = tf.data.Dataset.from_tensor_slices((X_val, m_val, y_val))#(images, mask, gt))
val_dataset = val_dataset.map(data.load, num_parallel_calls=tf.data.experimental.AUTOTUNE)
val_dataset = val_dataset.batch(batch_size)
val_dataset = val_dataset.prefetch(buffer_size=1)
iterator = tf.data.Iterator.from_structure(train_dataset.output_types,
train_dataset.output_shapes)
data_im, data_mask, data_gt = iterator.get_next()
# create the initialization operations
train_init_op = iterator.make_initializer(train_dataset)
val_init_op = iterator.make_initializer(val_dataset)
# ==============================================================================
# MODEL
# ==============================================================================
data_im = tf.cast(data_im, tf.float16)
data_mask = tf.cast(data_mask, tf.float16)
with tf.variable_scope('fp32_vars', custom_getter=float32_variable_storage_getter):
unet_pconv = model.pconv_unet(data_im, data_mask)
unet_pconv = tf.cast(unet_pconv, tf.float32)
data_mask = tf.cast(data_mask, tf.float32)
psi_gt, psi_out, psi_comp, I_comp, layers = model.build_vgg(data_gt, unet_pconv, data_mask)
I_comp = tf.cast(I_comp, tf.float32)
# # ==============================================================================
# # LOSS
# # ==============================================================================
loss = utils.get_total_loss(unet_pconv, data_gt, data_mask, psi_gt, psi_out, psi_comp, I_comp, layers)
lr = 0.0002
optimizer = utils.optimize(loss, lr)
saver = tf.train.Saver()
# # ==============================================================================
# # TRAINING
# # ==============================================================================
output_summary = tf.summary.image(name='output', tensor=unet_pconv)
merged = tf.summary.merge_all()
with tf.Session() as sess:
sess.run(tf.local_variables_initializer())
sess.run(tf.global_variables_initializer())
writer = tf.summary.FileWriter('graphs',sess.graph)
train_loss_, val_loss_ = [], []
for epoch in range(epochs):
pred_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
tl, vl = [], []
#Initialize iterator with training data
sess.run(train_init_op)
try:
for step in range (steps_train):
_, train_loss, summ = sess.run([optimizer, loss, merged])
writer.add_summary(summ, epoch)
tl.append(train_loss)
mean_train = utils.list_mean(tl)
train_loss_.append(mean_train)
except tf.errors.OutOfRangeError:
pass
if (epoch+1) % 1 == 0:
sess.run(val_init_op)
try:
for step in range (steps_val):
val_loss = sess.run([loss])
vl.append(val_loss)
mean_val = utils.list_mean(vl)
val_loss_.append(mean_val)
except tf.errors.OutOfRangeError:
pass
print(template.format(pred_time, epoch, mean_train, mean_val))
# early stopping
if mean_val < best_val:
print('Saving on epoch {0}'.format(epoch))
best_val = mean_val
patience = 0
best_val_epoch = epoch
saver.save(sess, path+'best_model')
else:
patience += 1
if patience == stop:
print('Early stopping at epoch: {}'.format(best_val_epoch))
break
# # ==============================================================================
# # SAVE CURVES
# # ==============================================================================
np.save(path_+'loss.npy', train_loss_)
np.save(path_+'val_loss.npy', val_loss_)
The optimization it's being done as follows:
def optimize(loss, learning_rate=1e-4):
U_vars = [var for var in tf.trainable_variables() if 'UNET' in var.name]
opt = tf.train.AdamOptimizer(learning_rate=learning_rate)
opt = tf.train.experimental.enable_mixed_precision_graph_rewrite(opt, loss_scale=128.0)
train_opt = opt.minimize(loss, var_list=U_vars)
return train_opt
I've trying to fix this for a while and still don't understand why it doesn't work when I implement the mixed precision training. Feel free to ask for more details.
If you can give a hand would be great! Thank you in advance.
I've try many ways and my final thought is that pre trained keras models are not compatible. I changed it to a tensorflow VGG16 model and it works slower but at least it works.
I want to load a checkpoint from TensorFlow1 that consists of .index, .meta, .data-00000-of-00001 files into tensorflow2.0.0 and convert it to a keras model so to be able to use it natively in eager mode without need for tf.Session. Here is the code I ran:
import tensorflow as tf
import numpy as np
from tensorflow.python.keras.backend import set_session
from tensorflow.python.training.saver import _import_meta_graph_with_return_elements
def save_ckpt(ckpt_path='test'):
'''save TensorFlow-1 Checkpoint '''
with tf.Graph().as_default() as g:
in_op = tf.constant(np.random.rand(1,2,2,2),name='input',dtype=tf.float32)
out_op = tf.keras.layers.Conv2D(3,(3,3),padding='same',name='MY_LAYER')(in_op)
saver = tf.compat.v1.train.Saver()
with tf.compat.v1.Session() as sess:
sess.run(tf.compat.v1.variables_initializer(tf.compat.v1.global_variables()))
saver.save(sess,ckpt_path)
def load_ckpt():
'''KerasModel from meta & ckpt'''
in_op = tf.keras.Input([2,2,2])
_m = tf.keras.models.Model(inputs=in_op,outputs=in_op)
with _m.input.graph.as_default() as g:
saver, out_op = _import_meta_graph_with_return_elements('test.meta',
input_map={'input':_m.output},
return_elements=[
# 'input:0',
'MY_LAYER/Conv2D:0'
])
with tf.compat.v1.Session() as sess:
saver.restore(sess,'test')
set_session(sess)
out_mdl = tf.keras.models.Model(inputs=_m.input, outputs=out_op[0])
return out_mdl
# main
save_ckpt() # save name based checkpoint
meta_model = load_ckpt() # restore in keras model
oo = meta_model(np.random.rand(1,2,2,2)) # run the model
print(oo)
but I get this error:
Traceback (most recent call last):
File "question2.py", line 38, in <module>
meta_model = load_ckpt() # restore in keras model
File "question2.py", line 32, in load_ckpt
out_mdl = tf.keras.models.Model(inputs=_m.input, outputs=out_op[0])
File "/home/dionyssos/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/training.py", line 146, in __init__
super(Model, self).__init__(*args, **kwargs)
File "/home/dionyssos/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/network.py", line 167, in __init__
self._init_graph_network(*args, **kwargs)
File "/home/dionyssos/tf2/lib/python3.6/site-packages/tensorflow_core/python/training/tracking/base.py", line 457, in _method_wrapper
result = method(self, *args, **kwargs)
File "/home/dionyssos/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/network.py", line 270, in _init_graph_network
base_layer_utils.create_keras_history(self._nested_outputs)
File "/home/dionyssos/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/base_layer_utils.py", line 184, in create_keras_history
_, created_layers = _create_keras_history_helper(tensors, set(), [])
File "/home/dionyssos/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/engine/base_layer_utils.py", line 229, in _create_keras_history_helper
constants[i] = backend.function([], op_input)([])
File "/home/dionyssos/tf2/lib/python3.6/site-packages/tensorflow_core/python/keras/backend.py", line 3740, in __call__
outputs = self._graph_fn(*converted_inputs)
File "/home/dionyssos/tf2/lib/python3.6/site-packages/tensorflow_core/python/eager/function.py", line 1081, in __call__
return self._call_impl(args, kwargs)
File "/home/dionyssos/tf2/lib/python3.6/site-packages/tensorflow_core/python/eager/function.py", line 1121, in _call_impl
return self._call_flat(args, self.captured_inputs, cancellation_manager)
File "/home/dionyssos/tf2/lib/python3.6/site-packages/tensorflow_core/python/eager/function.py", line 1224, in _call_flat
ctx, args, cancellation_manager=cancellation_manager)
File "/home/dionyssos/tf2/lib/python3.6/site-packages/tensorflow_core/python/eager/function.py", line 511, in call
ctx=ctx)
File "/home/dionyssos/tf2/lib/python3.6/site-packages/tensorflow_core/python/eager/execute.py", line 67, in quick_execute
six.raise_from(core._status_to_exception(e.code, message), None)
File "<string>", line 3, in raise_from
tensorflow.python.framework.errors_impl.FailedPreconditionError: Error while reading resource variable MY_LAYER/kernel from Container: localhost. This could mean that the variable was uninitialized. Not found: Container localhost does not exist. (Could not find resource: localhost/MY_LAYER/kernel)
[[node MY_LAYER/Conv2D/ReadVariableOp (defined at /home/dionyssos/tf2/lib/python3.6/site-packages/tensorflow_core/python/framework/ops.py:1751) ]] [Op:__inference_keras_scratch_graph_72]
Function call stack:
keras_scratch_graph
What I tried so far
Replacing MY_LAYER/Conv2D:0 with input:0 in _import_meta_graph_with_return_elements() makes the code run with no problem.
I'm writing a simple application using flask. First I tried this:
def get_model():
global model
model = load_model('mobilenet.h5')
print("** Model loaded!")
But I got an error:
Traceback (most recent call last):
File "C:\Users\Agnieszka\Anaconda4\Scripts\flask-script.py", line 10, in <module>
sys.exit(main())
File "C:\Users\Agnieszka\Anaconda4\lib\site-packages\flask\cli.py", line 894, in main
cli.main(args=args, prog_name=name)
File "C:\Users\Agnieszka\Anaconda4\lib\site-packages\flask\cli.py", line 557, in main
return super(FlaskGroup, self).main(*args, **kwargs)
File "C:\Users\Agnieszka\Anaconda4\lib\site-packages\click\core.py", line 697, in main
rv = self.invoke(ctx)
File "C:\Users\Agnieszka\Anaconda4\lib\site-packages\click\core.py", line 1066, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "C:\Users\Agnieszka\Anaconda4\lib\site-packages\click\core.py", line 895, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "C:\Users\Agnieszka\Anaconda4\lib\site-packages\click\core.py", line 535, in invoke
return callback(*args, **kwargs)
File "C:\Users\Agnieszka\Anaconda4\lib\site-packages\click\decorators.py", line 64, in new_func
return ctx.invoke(f, obj, *args[1:], **kwargs)
File "C:\Users\Agnieszka\Anaconda4\lib\site-packages\click\core.py", line 535, in invoke
return callback(*args, **kwargs)
File "C:\Users\Agnieszka\Anaconda4\lib\site-packages\flask\cli.py", line 767, in run_command
app = DispatchingApp(info.load_app, use_eager_loading=eager_loading)
File "C:\Users\Agnieszka\Anaconda4\lib\site-packages\flask\cli.py", line 293, in __init__
self._load_unlocked()
File "C:\Users\Agnieszka\Anaconda4\lib\site-packages\flask\cli.py", line 317, in _load_unlocked
self._app = rv = self.loader()
File "C:\Users\Agnieszka\Anaconda4\lib\site-packages\flask\cli.py", line 372, in load_app
app = locate_app(self, import_name, name)
File "C:\Users\Agnieszka\Anaconda4\lib\site-packages\flask\cli.py", line 235, in locate_app
__import__(module_name)
File "C:\Users\Agnieszka\Desktop\noc-naukowcow\flask_apps\predict.py", line 32, in <module>
get_model()
File "C:\Users\Agnieszka\Desktop\noc-naukowcow\flask_apps\predict.py", line 19, in get_model
model = tf.keras.models.load_model('mobilenet.h5')
File "C:\Users\Agnieszka\Anaconda4\lib\site-packages\tensorflow\python\keras\engine\saving.py", line 249, in load_model
optimizer_config, custom_objects=custom_objects)
File "C:\Users\Agnieszka\Anaconda4\lib\site-packages\tensorflow\python\keras\optimizers.py", line 838, in deserialize
printable_module_name='optimizer')
File "C:\Users\Agnieszka\Anaconda4\lib\site-packages\tensorflow\python\keras\utils\generic_utils.py", line 194, in deserialize_keras_object
return cls.from_config(cls_config)
File "C:\Users\Agnieszka\Anaconda4\lib\site-packages\tensorflow\python\keras\optimizers.py", line 159, in from_config
return cls(**config)
File "C:\Users\Agnieszka\Anaconda4\lib\site-packages\tensorflow\python\keras\optimizers.py", line 471, in __init__
super(Adam, self).__init__(**kwargs)
File "C:\Users\Agnieszka\Anaconda4\lib\site-packages\tensorflow\python\keras\optimizers.py", line 68, in __init__
'passed to optimizer: ' + str(k))
TypeError: Unexpected keyword argument passed to optimizer: name
Then I changed the code using tf.keras.models.load_model but this haven't helped.
The model was built in Colaboratory using keras 2.2.4 version. I've built the model as follows:
from keras import backend as K
inputs = Input(shape=(96, 80, 3), name='input')
x = Conv2D(64, (3, 3), activation='relu')(inputs)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Dropout(0.25)(x)
x = Conv2D(128, (3, 3), activation='relu')(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Dropout(0.25)(x)
x = Conv2D(256, (3, 3), activation='relu')(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Dropout(0.25)(x)
x = Flatten()(x)
x = Dense(256, activation='relu')(x)
x = Dropout(0.5)(x)
x = Dense(35, activation='softmax')(x)
model = Model(inputs=inputs, outputs=x, name='CNN')
model.compile(optimizer=optimizer, loss=loss_fn, metrics=['acc'])
I wonder if the problem is due to optimaizer that I used (it was adam).
Is there any solution for this?
The error suggests that Unexpected keyword argument passed to optimizer: name. Now as we know, the argument name is used in TensorFlow models to name a specific op or tensor. TensorFlow has included Keras, but Keras still exists as a standalone package.
Maybe you have created a model using tf.keras but you are loading it using keras.models.load_model. The Keras packages can't recognize the TF argument.
Maybe you can try to bring the whole process in TensorFlow by using methods from tf.keras.