Python: I have a problem with "Graph execution error" - python

I wrote this code which gives me this error "Graph execution error". On my friend's pc the code works without errors. what could be the problem?
I've checked the versions of the various libraries and everything seems to be the same.
I'm working in spyder environment and I don't understand what the problem is.
This is the error
File "C:\Users\hp\anaconda3\envs\unet\lib\site-packages\spyder_kernels\py3compat.py", line 356, in compat_exec
exec(code, globals, locals)
File "c:\users\hp\desktop\universita_laurea_magistrale\tirocinio\dataset\codiceprof.py", line 147, in <module>
storia=model.fit(train_gen, epochs=epochs, validation_data=train_gen)
File "C:\Users\hp\anaconda3\envs\unet\lib\site-packages\keras\utils\traceback_utils.py", line 70, in error_handler
raise e.with_traceback(filtered_tb) from None
File "C:\Users\hp\anaconda3\envs\unet\lib\site-packages\tensorflow\python\eager\execute.py", line 54, in quick_execute
tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,
InvalidArgumentError: Graph execution error:
Detected at node 'sparse_categorical_crossentropy/SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits' defined at (most recent call last):
File "C:\Users\hp\anaconda3\envs\unet\lib\runpy.py", line 197, in _run_module_as_main
return _run_code(code, main_globals, None,
File "C:\Users\hp\anaconda3\envs\unet\lib\runpy.py", line 87, in _run_code
exec(code, run_globals)
File "C:\Users\hp\anaconda3\envs\unet\lib\site-packages\spyder_kernels\console\__main__.py", line 24, in <module>
start.main()
File "C:\Users\hp\anaconda3\envs\unet\lib\site-packages\spyder_kernels\console\start.py", line 340, in main
kernel.start()
File "C:\Users\hp\anaconda3\envs\unet\lib\site-packages\ipykernel\kernelapp.py", line 712, in start
self.io_loop.start()
File "C:\Users\hp\anaconda3\envs\unet\lib\site-packages\tornado\platform\asyncio.py", line 215, in start
self.asyncio_loop.run_forever()
File "C:\Users\hp\anaconda3\envs\unet\lib\asyncio\base_events.py", line 596, in run_forever
self._run_once()
File "C:\Users\hp\anaconda3\envs\unet\lib\asyncio\base_events.py", line 1890, in _run_once
handle._run()
File "C:\Users\hp\anaconda3\envs\unet\lib\asyncio\events.py", line 80, in _run
self._context.run(self._callback, *self._args)
File "C:\Users\hp\anaconda3\envs\unet\lib\site-packages\ipykernel\kernelbase.py", line 510, in dispatch_queue
await self.process_one()
File "C:\Users\hp\anaconda3\envs\unet\lib\site-packages\ipykernel\kernelbase.py", line 499, in process_one
await dispatch(*args)
File "C:\Users\hp\anaconda3\envs\unet\lib\site-packages\ipykernel\kernelbase.py", line 406, in dispatch_shell
await result
File "C:\Users\hp\anaconda3\envs\unet\lib\site-packages\ipykernel\kernelbase.py", line 730, in execute_request
reply_content = await reply_content
File "C:\Users\hp\anaconda3\envs\unet\lib\site-packages\ipykernel\ipkernel.py", line 390, in do_execute
res = shell.run_cell(code, store_history=store_history, silent=silent)
File "C:\Users\hp\anaconda3\envs\unet\lib\site-packages\ipykernel\zmqshell.py", line 528, in run_cell
return super().run_cell(*args, **kwargs)
File "C:\Users\hp\anaconda3\envs\unet\lib\site-packages\IPython\core\interactiveshell.py", line 2914, in run_cell
result = self._run_cell(
File "C:\Users\hp\anaconda3\envs\unet\lib\site-packages\IPython\core\interactiveshell.py", line 2960, in _run_cell
return runner(coro)
File "C:\Users\hp\anaconda3\envs\unet\lib\site-packages\IPython\core\async_helpers.py", line 78, in _pseudo_sync_runner
coro.send(None)
File "C:\Users\hp\anaconda3\envs\unet\lib\site-packages\IPython\core\interactiveshell.py", line 3185, in run_cell_async
has_raised = await self.run_ast_nodes(code_ast.body, cell_name,
File "C:\Users\hp\anaconda3\envs\unet\lib\site-packages\IPython\core\interactiveshell.py", line 3377, in run_ast_nodes
if (await self.run_code(code, result, async_=asy)):
File "C:\Users\hp\anaconda3\envs\unet\lib\site-packages\IPython\core\interactiveshell.py", line 3457, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "C:\Users\hp\AppData\Local\Temp\ipykernel_9760\815882102.py", line 1, in <module>
runfile('C:/Users/hp/Desktop/Universita_laurea_Magistrale/TIROCINIO/Dataset/codiceprof.py', wdir='C:/Users/hp/Desktop/Universita_laurea_Magistrale/TIROCINIO/Dataset')
File "C:\Users\hp\anaconda3\envs\unet\lib\site-packages\spyder_kernels\customize\spydercustomize.py", line 524, in runfile
return _exec_file(
File "C:\Users\hp\anaconda3\envs\unet\lib\site-packages\spyder_kernels\customize\spydercustomize.py", line 611, in _exec_file
exec_code(file_code, filename, ns_globals, ns_locals,
File "C:\Users\hp\anaconda3\envs\unet\lib\site-packages\spyder_kernels\customize\spydercustomize.py", line 469, in exec_code
exec_fun(compile(ast_code, filename, 'exec'), ns_globals, ns_locals)
File "C:\Users\hp\anaconda3\envs\unet\lib\site-packages\spyder_kernels\py3compat.py", line 356, in compat_exec
exec(code, globals, locals)
File "c:\users\hp\desktop\universita_laurea_magistrale\tirocinio\dataset\codiceprof.py", line 147, in <module>
storia=model.fit(train_gen, epochs=epochs, validation_data=train_gen)
File "C:\Users\hp\anaconda3\envs\unet\lib\site-packages\keras\utils\traceback_utils.py", line 65, in error_handler
return fn(*args, **kwargs)
File "C:\Users\hp\anaconda3\envs\unet\lib\site-packages\keras\engine\training.py", line 1564, in fit
tmp_logs = self.train_function(iterator)
File "C:\Users\hp\anaconda3\envs\unet\lib\site-packages\keras\engine\training.py", line 1160, in train_function
return step_function(self, iterator)
File "C:\Users\hp\anaconda3\envs\unet\lib\site-packages\keras\engine\training.py", line 1146, in step_function
outputs = model.distribute_strategy.run(run_step, args=(data,))
File "C:\Users\hp\anaconda3\envs\unet\lib\site-packages\keras\engine\training.py", line 1135, in run_step
outputs = model.train_step(data)
File "C:\Users\hp\anaconda3\envs\unet\lib\site-packages\keras\engine\training.py", line 994, in train_step
loss = self.compute_loss(x, y, y_pred, sample_weight)
File "C:\Users\hp\anaconda3\envs\unet\lib\site-packages\keras\engine\training.py", line 1052, in compute_loss
return self.compiled_loss(
File "C:\Users\hp\anaconda3\envs\unet\lib\site-packages\keras\engine\compile_utils.py", line 265, in __call__
loss_value = loss_obj(y_t, y_p, sample_weight=sw)
File "C:\Users\hp\anaconda3\envs\unet\lib\site-packages\keras\losses.py", line 152, in __call__
losses = call_fn(y_true, y_pred)
File "C:\Users\hp\anaconda3\envs\unet\lib\site-packages\keras\losses.py", line 272, in call
return ag_fn(y_true, y_pred, **self._fn_kwargs)
File "C:\Users\hp\anaconda3\envs\unet\lib\site-packages\keras\losses.py", line 2084, in sparse_categorical_crossentropy
return backend.sparse_categorical_crossentropy(
File "C:\Users\hp\anaconda3\envs\unet\lib\site-packages\keras\backend.py", line 5630, in sparse_categorical_crossentropy
res = tf.nn.sparse_softmax_cross_entropy_with_logits(
Node: 'sparse_categorical_crossentropy/SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits'
logits and labels must have the same first dimension, got logits shape [2048,2] and labels shape [131072]
[[{{node sparse_categorical_crossentropy/SparseSoftmaxCrossEntropyWithLogits/SparseSoftmaxCrossEntropyWithLogits}}]] [Op:__inference_train_function_14691]
from tensorflow import keras
import numpy as np
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.keras.preprocessing.image import img_to_array
import PIL
from keras.models import Model
from keras.layers import Input, Conv2D, MaxPooling2D, UpSampling2D, concatenate, Conv2DTranspose, BatchNormalization, Dropout, Lambda
from keras.optimizers import Adam
from keras.layers import Activation, MaxPool2D, Concatenate
import glob
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import pyplot as plt
from tensorflow.keras import layers
class leggi_immagini(keras.utils.Sequence):
def __init__(self, batch_size, img_size, input_img_paths, target_img_paths):
self.batch_size = batch_size
self.img_size = img_size
self.input_img_paths = input_img_paths
self.target_img_paths = target_img_paths
def __len__(self):
return len(self.target_img_paths) // self.batch_size
def __getitem__(self, idx):
"""Returns tuple (input, target) correspond to batch #idx."""
i = idx * self.batch_size
batch_input_img_paths = self.input_img_paths[i : i + self.batch_size]
batch_target_img_paths = self.target_img_paths[i : i + self.batch_size]
x = np.zeros((self.batch_size,) + self.img_size +(1,), dtype="float32")
for j, path in enumerate(batch_input_img_paths):
#print("eccomi")
img = load_img(path, color_mode='grayscale')
x[j] = img_to_array(img) #/ 8191 # normalizza al massimo
y = np.zeros((self.batch_size,) + self.img_size + (1,), dtype="uint8")
for j, path2 in enumerate(batch_target_img_paths):
img2 = load_img(path2, color_mode="grayscale")
y[j] = img_to_array(img2) / 255
return x, y
def get_model(img_size, num_classes):
inputs = keras.Input(shape=img_size + (1,))
### [First half of the network: downsampling inputs] ###
# Entry block
x = layers.Conv2D(32, (3,3),strides=2,padding="same",input_shape=(256,256,1))(inputs)# strides=2, padding="same")(inputs)
x = layers.BatchNormalization()(x)
x = layers.Activation("relu")(x)
previous_block_activation = x # Set aside residual
# Blocks 1, 2, 3 are identical apart from the feature depth.
for filters in [64, 128, 256]:
x = layers.Activation("relu")(x)
x = layers.SeparableConv2D(filters, 3, padding="same")(x)
x = layers.BatchNormalization()(x)
x = layers.Activation("relu")(x)
x = layers.SeparableConv2D(filters, 3, padding="same")(x)
x = layers.BatchNormalization()(x)
x = layers.MaxPooling2D(3, strides=2, padding="same")(x)
# Project residual
residual = layers.Conv2D(filters, 1, strides=2, padding="same")(
previous_block_activation
)
x = layers.add([x, residual]) # Add back residual
previous_block_activation = x # Set aside next residual
### [Second half of the network: upsampling inputs] ###
for filters in [256, 128, 64, 32]:
x = layers.Activation("relu")(x)
x = layers.Conv2DTranspose(filters, 3, padding="same")(x)
x = layers.BatchNormalization()(x)
x = layers.Activation("relu")(x)
x = layers.Conv2DTranspose(filters, 3, padding="same")(x)
x = layers.BatchNormalization()(x)
x = layers.UpSampling2D(2)(x)
# Project residual
residual = layers.UpSampling2D(2)(previous_block_activation)
residual = layers.Conv2D(filters, 1, padding="same")(residual)
x = layers.add([x, residual]) # Add back residual
previous_block_activation = x # Set aside next residual
# Add a per-pixel classification layer
outputs = layers.Conv2D(num_classes, 3, activation="softmax", padding="same")(x)
# Define the model
model = keras.Model(inputs, outputs)
return model
epochs=5;
batch_size=2;
img_size=(256,256)
inputdir= 'C://Users//hp//Desktop//Universita_laurea_Magistrale//TIROCINIO//Imm//immagine_uscita//'
input_path= glob.glob(inputdir+ '*.tif')
maskdir= 'C://Users//hp//Desktop//Universita_laurea_Magistrale//TIROCINIO//Imm//maschera_uscita//'
mask_path= glob.glob(maskdir+ '*.tif')
train_gen= leggi_immagini(batch_size,img_size,input_path,mask_path)
model= get_model(img_size,num_classes=2) #configuro il modello per il training
model.summary()
model.compile(optimizer="rmsprop", loss="sparse_categorical_crossentropy")
storia=model.fit(train_gen, epochs=epochs, validation_data=train_gen)
valori=model.predict(train_gen)
plt.plot(storia.history['loss'])
plt.plot(storia.history['val_loss'])

Related

How to use K-fold cross validation on transfer learning?

I have created a transfer learning model using Resnet50. I want to perform K-fold cross-validation on my model after which I want to find the average AUC value and standard deviation. However, I am getting an error message while performing the task. I have created a separate Files.csv file which contains the image names and their corresponding labels. I am not sure if this is the correct method or not. Please let me know if there is any other process. Please find my code below:
from tensorflow.keras.applications import ResNet50
from tensorflow.keras.applications.resnet50 import preprocess_input
from tensorflow.keras import Model, layers
from tensorflow.keras.models import load_model, model_from_json
from tensorflow.keras.layers import GlobalAveragePooling2D, Dropout, Dense, Input
import numpy as np
import pandas as pd
import os
from sklearn.model_selection import KFold, StratifiedKFold
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
train_data = pd.read_csv('Files.csv')
Y = train_data[['label']]
kf = KFold(n_splits = 5)
from tensorflow.keras.preprocessing.image import ImageDataGenerator
idg = ImageDataGenerator(rescale = 1./255,
horizontal_flip=True,
rotation_range=40,
zoom_range= 0.2,
shear_range=0.2,
width_shift_range=0.2,
height_shift_range=0.2,)
validation_datagen = ImageDataGenerator(rescale = 1./255)
def get_model_name(k):
return 'model_'+str(k)+'.h5'
from keras import models
from keras.layers import Dense, Flatten
from tensorflow.keras import optimizers
from keras.applications.vgg16 import VGG16
from tensorflow.keras.applications import ResNet50
image_dir=r'D:/regionGrowing_MLT/NewSavedRGBImages/Training'
VALIDATION_ACCURACY = []
VALIDAITON_LOSS = []
save_dir = 'C:/Users/warid'
fold_var = 1
for train_index, val_index in kf.split(np.zeros(n),Y):
training_data = train_data.iloc[train_index]
validation_data = train_data.iloc[val_index]
train_data_generator = idg.flow_from_dataframe(training_data, directory = image_dir,
x_col = "filename", y_col = "label",
class_mode = "categorical", shuffle = True)
valid_data_generator = idg.flow_from_dataframe(validation_data, directory = image_dir,
x_col = "filename", y_col = "label",
class_mode = "categorical", shuffle = True)
# CREATE NEW MODEL
model = models.Sequential()
model.add(ResNet50(weights='imagenet', include_top=False, input_shape=(224,224,3)))
model.add(Flatten())
model.add(ChannelAttention(32, 8))
model.add(SpatialAttention(7))
model.add(Dense(256, activation='relu', name='fc1'))
model.add(Dense(128, activation='relu', name='fc2'))
model.add(layers.Dropout(0.5)) #### used for regularization (to aviod overfitting)
model.add(Dense(2, activation='sigmoid'))
# model.summary()
model.compile(optimizer=optimizers.Adam(learning_rate=2e-5),
loss='binary_crossentropy',
metrics=['accuracy'])
# COMPILE NEW MODEL
# CREATE CALLBACKS
checkpoint = tf.keras.callbacks.ModelCheckpoint(save_dir+get_model_name(fold_var),
monitor='val_accuracy', verbose=1,
save_best_only=True, mode='max')
callbacks_list = [checkpoint]
# There can be other callbacks, but just showing one because it involves the model name
# This saves the best model
# FIT THE MODEL
history = model.fit(train_data_generator,
epochs=num_epochs,
callbacks=callbacks_list,
validation_data=valid_data_generator)
#PLOT HISTORY
# :
# :
# LOAD BEST MODEL to evaluate the performance of the model
model.load_weights("/saved_models/model_"+str(fold_var)+".h5")
results = model.evaluate(valid_data_generator)
results = dict(zip(model.metrics_names,results))
VALIDATION_ACCURACY.append(results['accuracy'])
VALIDATION_LOSS.append(results['loss'])
tf.keras.backend.clear_session()
fold_var += 1
After running this code, I am getting the following error message:
Found 3076 validated image filenames belonging to 2 classes.
Found 769 validated image filenames belonging to 1 classes.
Epoch 1/5
---------------------------------------------------------------------------
InvalidArgumentError Traceback (most recent call last)
Input In [75], in <cell line: 7>()
39 callbacks_list = [checkpoint]
40 # There can be other callbacks, but just showing one because it involves the model name
41 # This saves the best model
42 # FIT THE MODEL
---> 43 history = model.fit(train_data_generator,
44 epochs=num_epochs,
45 callbacks=callbacks_list,
46 validation_data=valid_data_generator)
47 #PLOT HISTORY
48 # :
49 # :
50
51 # LOAD BEST MODEL to evaluate the performance of the model
52 model.load_weights("/saved_models/model_"+str(fold_var)+".h5")
File ~\anaconda3\lib\site-packages\keras\utils\traceback_utils.py:67, in filter_traceback.<locals>.error_handler(*args, **kwargs)
65 except Exception as e: # pylint: disable=broad-except
66 filtered_tb = _process_traceback_frames(e.__traceback__)
---> 67 raise e.with_traceback(filtered_tb) from None
68 finally:
69 del filtered_tb
File ~\anaconda3\lib\site-packages\tensorflow\python\eager\execute.py:54, in quick_execute(op_name, num_outputs, inputs, attrs, ctx, name)
52 try:
53 ctx.ensure_initialized()
---> 54 tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,
55 inputs, attrs, num_outputs)
56 except core._NotOkStatusException as e:
57 if name is not None:
InvalidArgumentError: Graph execution error:
Detected at node 'sequential_2/flatten_2/Reshape' defined at (most recent call last):
File "C:\Users\warid\anaconda3\lib\runpy.py", line 197, in _run_module_as_main
return _run_code(code, main_globals, None,
File "C:\Users\warid\anaconda3\lib\runpy.py", line 87, in _run_code
exec(code, run_globals)
File "C:\Users\warid\anaconda3\lib\site-packages\ipykernel_launcher.py", line 16, in <module>
app.launch_new_instance()
File "C:\Users\warid\anaconda3\lib\site-packages\traitlets\config\application.py", line 846, in launch_instance
app.start()
File "C:\Users\warid\anaconda3\lib\site-packages\ipykernel\kernelapp.py", line 677, in start
self.io_loop.start()
File "C:\Users\warid\anaconda3\lib\site-packages\tornado\platform\asyncio.py", line 199, in start
self.asyncio_loop.run_forever()
File "C:\Users\warid\anaconda3\lib\asyncio\base_events.py", line 601, in run_forever
self._run_once()
File "C:\Users\warid\anaconda3\lib\asyncio\base_events.py", line 1905, in _run_once
handle._run()
File "C:\Users\warid\anaconda3\lib\asyncio\events.py", line 80, in _run
self._context.run(self._callback, *self._args)
File "C:\Users\warid\anaconda3\lib\site-packages\ipykernel\kernelbase.py", line 471, in dispatch_queue
await self.process_one()
File "C:\Users\warid\anaconda3\lib\site-packages\ipykernel\kernelbase.py", line 460, in process_one
await dispatch(*args)
File "C:\Users\warid\anaconda3\lib\site-packages\ipykernel\kernelbase.py", line 367, in dispatch_shell
await result
File "C:\Users\warid\anaconda3\lib\site-packages\ipykernel\kernelbase.py", line 662, in execute_request
reply_content = await reply_content
File "C:\Users\warid\anaconda3\lib\site-packages\ipykernel\ipkernel.py", line 360, in do_execute
res = shell.run_cell(code, store_history=store_history, silent=silent)
File "C:\Users\warid\anaconda3\lib\site-packages\ipykernel\zmqshell.py", line 532, in run_cell
return super().run_cell(*args, **kwargs)
File "C:\Users\warid\anaconda3\lib\site-packages\IPython\core\interactiveshell.py", line 2863, in run_cell
result = self._run_cell(
File "C:\Users\warid\anaconda3\lib\site-packages\IPython\core\interactiveshell.py", line 2909, in _run_cell
return runner(coro)
File "C:\Users\warid\anaconda3\lib\site-packages\IPython\core\async_helpers.py", line 129, in _pseudo_sync_runner
coro.send(None)
File "C:\Users\warid\anaconda3\lib\site-packages\IPython\core\interactiveshell.py", line 3106, in run_cell_async
has_raised = await self.run_ast_nodes(code_ast.body, cell_name,
File "C:\Users\warid\anaconda3\lib\site-packages\IPython\core\interactiveshell.py", line 3309, in run_ast_nodes
if await self.run_code(code, result, async_=asy):
File "C:\Users\warid\anaconda3\lib\site-packages\IPython\core\interactiveshell.py", line 3369, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "C:\Users\warid\AppData\Local\Temp\ipykernel_37076\2928028949.py", line 43, in <cell line: 7>
history = model.fit(train_data_generator,
File "C:\Users\warid\anaconda3\lib\site-packages\keras\utils\traceback_utils.py", line 64, in error_handler
return fn(*args, **kwargs)
File "C:\Users\warid\anaconda3\lib\site-packages\keras\engine\training.py", line 1409, in fit
tmp_logs = self.train_function(iterator)
File "C:\Users\warid\anaconda3\lib\site-packages\keras\engine\training.py", line 1051, in train_function
return step_function(self, iterator)
File "C:\Users\warid\anaconda3\lib\site-packages\keras\engine\training.py", line 1040, in step_function
outputs = model.distribute_strategy.run(run_step, args=(data,))
File "C:\Users\warid\anaconda3\lib\site-packages\keras\engine\training.py", line 1030, in run_step
outputs = model.train_step(data)
File "C:\Users\warid\anaconda3\lib\site-packages\keras\engine\training.py", line 889, in train_step
y_pred = self(x, training=True)
File "C:\Users\warid\anaconda3\lib\site-packages\keras\utils\traceback_utils.py", line 64, in error_handler
return fn(*args, **kwargs)
File "C:\Users\warid\anaconda3\lib\site-packages\keras\engine\training.py", line 490, in __call__
return super().__call__(*args, **kwargs)
File "C:\Users\warid\anaconda3\lib\site-packages\keras\utils\traceback_utils.py", line 64, in error_handler
return fn(*args, **kwargs)
File "C:\Users\warid\anaconda3\lib\site-packages\keras\engine\base_layer.py", line 1014, in __call__
outputs = call_fn(inputs, *args, **kwargs)
File "C:\Users\warid\anaconda3\lib\site-packages\keras\utils\traceback_utils.py", line 92, in error_handler
return fn(*args, **kwargs)
File "C:\Users\warid\anaconda3\lib\site-packages\keras\engine\sequential.py", line 374, in call
return super(Sequential, self).call(inputs, training=training, mask=mask)
File "C:\Users\warid\anaconda3\lib\site-packages\keras\engine\functional.py", line 458, in call
return self._run_internal_graph(
File "C:\Users\warid\anaconda3\lib\site-packages\keras\engine\functional.py", line 596, in _run_internal_graph
outputs = node.layer(*args, **kwargs)
File "C:\Users\warid\anaconda3\lib\site-packages\keras\utils\traceback_utils.py", line 64, in error_handler
return fn(*args, **kwargs)
File "C:\Users\warid\anaconda3\lib\site-packages\keras\engine\base_layer.py", line 1014, in __call__
outputs = call_fn(inputs, *args, **kwargs)
File "C:\Users\warid\anaconda3\lib\site-packages\keras\utils\traceback_utils.py", line 92, in error_handler
return fn(*args, **kwargs)
File "C:\Users\warid\anaconda3\lib\site-packages\keras\layers\reshaping\flatten.py", line 98, in call
return tf.reshape(inputs, flattened_shape)
Node: 'sequential_2/flatten_2/Reshape'
Input to reshape is a tensor with 4194304 values, but the requested shape requires a multiple of 100352
[[{{node sequential_2/flatten_2/Reshape}}]] [Op:__inference_train_function_37893]

Have you experienced an error in Keras with a custom generator to handle double input (img + size) while training the model?

The goal is to categorize pottery using both images and a size.
I'm using Tensorflow version 2.8.0
I created a custom generator to return ((img, size),classification) as follows:
def __init__(self,input_gen1,input_gen2,
batch_size,
shuffle=False):
self.batch_size = batch_size
self.shuffle = shuffle
self.gen = input_gen1
self.measures = input_gen2
def __len__(self):
return len(self.gen)
def on_epoch_end(self):
pass
def __getitem__(self, index):
filenames_np = np.vectorize(os.path.basename)(np.array(self.gen.filenames[index : index + self.gen.batch_size]))
measures_of_files = np.vectorize(self.measures.get)(filenames_np)
return (self.gen[index][0],measures_of_files),self.gen[index][1]
and this is how the generator is used composing flow_from_directory and a dictionary filename-size
TRAINING_DIR = "/tmp/Anfore/training/"
train_datagen = ImageDataGenerator(rescale=1./255,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
seed = 1
train_generator = train_datagen.flow_from_directory(TRAINING_DIR,
batch_size=64,
class_mode='categorical',
target_size=(150, 150),
seed=seed)
VALIDATION_DIR = "/tmp/Anfore/testing/"
validation_datagen = ImageDataGenerator(rescale=1./255)
validation_generator = validation_datagen.flow_from_directory(VALIDATION_DIR,
batch_size=64,
class_mode='categorical',
target_size=(150, 150))
testCustomDataGen_for_train = CustomDataGen(train_generator,dict_measures,batch_size=64)
testCustomDataGen_for_validation = CustomDataGen(validation_generator,dict_measures,batch_size=64)
The model is composed of pretrained InceptionV3 stripped of the last few layers and coupled with a simple Dense layer with Concatenate to classify the results.
weights_url = "https://storage.googleapis.com/mledu-datasets/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5"
weights_file = "inception_v3.h5"
urllib.request.urlretrieve(weights_url, weights_file)
# Instantiate the model
pre_trained_model = InceptionV3(input_shape=(150, 150, 3),
include_top=False,
weights=None)
# load pre-trained weights
pre_trained_model.load_weights(weights_file)
# freeze the layers
for layer in pre_trained_model.layers:
layer.trainable = False
# pre_trained_model.summary()
last_layer = pre_trained_model.get_layer('mixed7')
last_output = last_layer.output
from keras.layers import *
from keras.utils.vis_utils import plot_model
model2 = Sequential()
model2.add(Dense(1, input_shape=(1,), activation="relu"))
# here I can join the 2 models
x = layers.Conv2D(128,kernel_size=(3,3),activation='relu',padding='same')(last_output)
x = layers.GlobalAveragePooling2D()(x)
mergedOut = Concatenate()([x,model2.output])
x = layers.Dense(12, activation="softmax", name="classification")(mergedOut)
model = Model([pre_trained_model.input,model2.input], x)
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['acc'],
run_eagerly=False)
The schema of the model is the following:
The problem is that when I do the training
history = model.fit(
testCustomDataGen_for_train,
validation_data=testCustomDataGen_for_validation,
epochs=150,
verbose=1)
I get an error:
---------------------------------------------------------------------------
InvalidArgumentError Traceback (most recent call last)
<ipython-input-26-4d4e59a0e1c6> in <module>()
3 validation_data=testCustomDataGen_for_validation,
4 epochs=150,
----> 5 verbose=1)
1 frames
/usr/local/lib/python3.7/dist-packages/tensorflow/python/eager/execute.py in quick_execute(op_name, num_outputs, inputs, attrs, ctx, name)
53 ctx.ensure_initialized()
54 tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,
---> 55 inputs, attrs, num_outputs)
56 except core._NotOkStatusException as e:
57 if name is not None:
InvalidArgumentError: Graph execution error:
Detected at node 'gradient_tape/model_7/concatenate_9/ConcatOffset' defined at (most recent call last):
File "/usr/lib/python3.7/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/usr/lib/python3.7/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py", line 16, in <module>
app.launch_new_instance()
File "/usr/local/lib/python3.7/dist-packages/traitlets/config/application.py", line 846, in launch_instance
app.start()
File "/usr/local/lib/python3.7/dist-packages/ipykernel/kernelapp.py", line 499, in start
self.io_loop.start()
File "/usr/local/lib/python3.7/dist-packages/tornado/platform/asyncio.py", line 132, in start
self.asyncio_loop.run_forever()
File "/usr/lib/python3.7/asyncio/base_events.py", line 541, in run_forever
self._run_once()
File "/usr/lib/python3.7/asyncio/base_events.py", line 1786, in _run_once
handle._run()
File "/usr/lib/python3.7/asyncio/events.py", line 88, in _run
self._context.run(self._callback, *self._args)
File "/usr/local/lib/python3.7/dist-packages/tornado/platform/asyncio.py", line 122, in _handle_events
handler_func(fileobj, events)
File "/usr/local/lib/python3.7/dist-packages/tornado/stack_context.py", line 300, in null_wrapper
return fn(*args, **kwargs)
File "/usr/local/lib/python3.7/dist-packages/zmq/eventloop/zmqstream.py", line 452, in _handle_events
self._handle_recv()
File "/usr/local/lib/python3.7/dist-packages/zmq/eventloop/zmqstream.py", line 481, in _handle_recv
self._run_callback(callback, msg)
File "/usr/local/lib/python3.7/dist-packages/zmq/eventloop/zmqstream.py", line 431, in _run_callback
callback(*args, **kwargs)
File "/usr/local/lib/python3.7/dist-packages/tornado/stack_context.py", line 300, in null_wrapper
return fn(*args, **kwargs)
File "/usr/local/lib/python3.7/dist-packages/ipykernel/kernelbase.py", line 283, in dispatcher
return self.dispatch_shell(stream, msg)
File "/usr/local/lib/python3.7/dist-packages/ipykernel/kernelbase.py", line 233, in dispatch_shell
handler(stream, idents, msg)
File "/usr/local/lib/python3.7/dist-packages/ipykernel/kernelbase.py", line 399, in execute_request
user_expressions, allow_stdin)
File "/usr/local/lib/python3.7/dist-packages/ipykernel/ipkernel.py", line 208, in do_execute
res = shell.run_cell(code, store_history=store_history, silent=silent)
File "/usr/local/lib/python3.7/dist-packages/ipykernel/zmqshell.py", line 537, in run_cell
return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)
File "/usr/local/lib/python3.7/dist-packages/IPython/core/interactiveshell.py", line 2718, in run_cell
interactivity=interactivity, compiler=compiler, result=result)
File "/usr/local/lib/python3.7/dist-packages/IPython/core/interactiveshell.py", line 2822, in run_ast_nodes
if self.run_code(code, result):
File "/usr/local/lib/python3.7/dist-packages/IPython/core/interactiveshell.py", line 2882, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-26-4d4e59a0e1c6>", line 5, in <module>
verbose=1)
File "/usr/local/lib/python3.7/dist-packages/keras/utils/traceback_utils.py", line 64, in error_handler
return fn(*args, **kwargs)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1384, in fit
tmp_logs = self.train_function(iterator)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1021, in train_function
return step_function(self, iterator)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1010, in step_function
outputs = model.distribute_strategy.run(run_step, args=(data,))
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1000, in run_step
outputs = model.train_step(data)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 863, in train_step
self.optimizer.minimize(loss, self.trainable_variables, tape=tape)
File "/usr/local/lib/python3.7/dist-packages/keras/optimizer_v2/optimizer_v2.py", line 531, in minimize
loss, var_list=var_list, grad_loss=grad_loss, tape=tape)
File "/usr/local/lib/python3.7/dist-packages/keras/optimizer_v2/optimizer_v2.py", line 583, in _compute_gradients
grads_and_vars = self._get_gradients(tape, loss, var_list, grad_loss)
File "/usr/local/lib/python3.7/dist-packages/keras/optimizer_v2/optimizer_v2.py", line 464, in _get_gradients
grads = tape.gradient(loss, var_list, grad_loss)
Node: 'gradient_tape/model_7/concatenate_9/ConcatOffset'
All dimensions except 1 must match. Input 1 has shape [64 1] and doesn't match input 0 with shape [28 128].
[[{{node gradient_tape/model_7/concatenate_9/ConcatOffset}}]] [Op:__inference_train_function_37647]
Maybe I'm too much of a newbie but I don't get the sense of the error. Can anybody give me a hint on where I can intervene to address the issue?
The colab notebook is here:
https://colab.research.google.com/drive/17nIpC4OUy5gk0AnVhwNawfel4-HjS_h4?usp=sharing
Any help is warmly welcome

Node: 'mean_absolute_error/sub' required broadcastable shapes [[{{node mean_absolute_error/sub}}]] [Op:__inference_train_function_1827]

I am trying to implement an autoencoder using a datagenerator, the autoencoder works fine if I implement it directly with X_train, but when I try to use the data generator always returns that error.
#!/usr/bin/env python
# coding: utf-8
import zipfile
import pandas as pd
import tensorflow as tf
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Dense, Conv2D, Flatten, Activation, MaxPooling2D, Reshape, Input, Dropout
from tensorflow.keras.layers import BatchNormalization, Lambda, Conv2DTranspose, Add
from tensorflow.keras import regularizers
from tensorflow.keras import initializers
from tensorflow.keras import constraints
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.utils import Sequence
import random
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from matplotlib import pyplot as plt
# descargamos el conjunto de datos
train_zip_path = tf.keras.utils.get_file(
fname='train_femto.zip',
origin='https://hdvirtual.us.es/discovirt/index.php/s/z75ekAqY3tt3aKy/download',
cache_subdir='datasets', extract=False,
)
WINDOW_SIZE = 500
HEADERS = ['Hour', 'Minute', 'Second', 'Microsecond', 'H-acc', 'V-acc']
FEATURES = ['H-acc', 'V-acc']
TEST_RULS = {
'Bearing1_3': 5730,
'Bearing1_4': 339,
'Bearing1_5': 1610,
'Bearing1_6': 1460,
'Bearing1_7': 7570,
'Bearing2_3': 7530,
'Bearing2_4': 1390,
'Bearing2_5': 3090,
'Bearing2_6': 1290,
'Bearing2_7': 580,
'Bearing3_3': 820
}
def read_femto_file(z, file_path, bearing):
with z.open(file_path) as f:
X_aux = pd.read_csv(f, names=HEADERS, delimiter=",")
X_aux['Bearing'] = bearing.split('/')[-1][-3:]
del X_aux['Hour']
del X_aux['Minute']
del X_aux['Second']
del X_aux['Microsecond']
return X_aux
def read_femto_dataset(file_path, RULS=None):
datasets = []
with zipfile.ZipFile(file_path) as z:
files = z.namelist()
dirs = sorted(set(['/'.join(f.split('/')[:-1]) for f in files if len(f.split('/')) > 2]))
ds = []
for bearing in dirs:
bearing_name = bearing.split('/')[-1]
print("Reading", bearing_name)
bearing_files = sorted([f for f in files if bearing in f and 'acc' in f])
for i, bearing_file in enumerate(bearing_files):
ds.append(read_femto_file(z, bearing_file, bearing))
X = pd.concat(ds, axis=0)
X = X.reset_index(drop=True)
# compute RUL
X['RUL'] = (X.index / 256).astype('int')[::-1].values
if RULS is not None:
X['RUL'] += RULS[bearing.split('/')[-1]]
X.RUL = X.RUL.astype('int32')
datasets.append(X)
X = pd.concat(datasets, axis=0)
X['H-acc'] = X['H-acc'].astype('float32')
X['V-acc'] = X['V-acc'].astype('float32') #errata, H-acc en vez de V-acc
X['RUL'] = X['RUL'].astype('int32')
return X
# leemos el conjunto de entrenamiento
X_train = read_femto_dataset(train_zip_path)
# truncamos el RUL y lo normalzamos entre 100 y 0
X_train['RUL'] = X_train.RUL.clip(0, 10000) / 100
# dividimos entre entrenamiento y validación
X_val = X_train[X_train.Bearing.isin(['1_1', '2_1', '3_1'])]
X_train = X_train[X_train.Bearing.isin(['1_2', '2_2', '3_2'])]
# normalizamos los sensores
min_max = {}
for feature in FEATURES:
# calculamos los parámetros de la normalización con el train
min_max[feature] = {}
min_max[feature]['min'] = X_train[feature].min()
min_max[feature]['max'] = X_train[feature].max()
# normalizamos ambos datasets
X_train[feature] = ((X_train[feature] - min_max[feature]['min']) /
(min_max[feature]['max'] - min_max[feature]['min']))
X_val[feature] = ((X_val[feature] - min_max[feature]['min']) /
(min_max[feature]['max'] - min_max[feature]['min']))
print("Feature %s: train(%f, %f), val(%f, %f)" % (feature, X_train[feature].min(),
X_train[feature].max(), X_val[feature].min(),
X_val[feature].max()))
# generador de datos
class DataGenerator(Sequence):
def __init__(self, X, attributes, window_size=10, batch_size=32,
epoch_len_reducer=100, add_extra_channel=False,
return_label=True, y_key='Y', unit_key='id'):
self.batch_size = batch_size
self.return_label = return_label
self.window_size = window_size
self.attributes = attributes
self.epoch_len_reducer = epoch_len_reducer
self._X = {}
self._Y = {}
self._ids = X[unit_key].unique()
self.add_extra_channel = add_extra_channel
for _id in self._ids:
self._X[_id] = X.loc[(X[unit_key]==_id), self.attributes].values
self._Y[_id] = X.loc[(X[unit_key]==_id), y_key].values
self.__len = int((X.groupby(unit_key).size() - self.window_size).sum() /
self.batch_size)
del X
def __len__(self):
return int(self.__len / self.epoch_len_reducer)
def __getitem__(self, index):
X = self._X
_X = []
_y = []
for _ in range(self.batch_size):
sid = random.choice(self._ids)
unit = self._X[sid]
nrows = unit.shape[0]
cut = random.randint(0, nrows - self.window_size)
s = unit[cut: cut + self.window_size].T
y =self._Y[sid][cut + self.window_size-1]
_X.append(s)
_y.append(y)
_X = np.array(_X)
if self.add_extra_channel:
_X = _X.reshape(_X.shape + (1,))
if self.return_label:
return _X, np.array(_y).reshape((self.batch_size, 1))
else:
return _X, _X
def on_epoch_end(self):
pass
# generators
train_gen = DataGenerator(X_train, attributes=FEATURES, window_size=WINDOW_SIZE, batch_size=256,
add_extra_channel=True, return_label=True, y_key='RUL', unit_key='Bearing')
val_gen = DataGenerator(X_val, attributes=FEATURES, window_size=WINDOW_SIZE, batch_size=256,
add_extra_channel=True, return_label=True, y_key='RUL',
unit_key='Bearing', epoch_len_reducer=1000)
#Autoencoder
class AutoEncoder(Model):
def __init__(self):
super(AutoEncoder, self).__init__()
self.encoder = tf.keras.Sequential([
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(32, activation='relu'),
tf.keras.layers.Dense(16, activation='relu'),
tf.keras.layers.Dense(8, activation='relu')])
self.decoder = tf.keras.Sequential([
tf.keras.layers.Dense(16, activation='relu'),
tf.keras.layers.Dense(32, activation='relu'),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(2, activation='sigmoid')
])
def call(self, x):
encoded = self.encoder(x)
decoded = self.decoder(encoded)
return decoded
definition of the autoencoder
model = AutoEncoder()
early_stopping = tf.keras.callbacks.EarlyStopping(monitor = 'val_loss', patience = 2, mode='min')
model.compile(optimizer = 'adam', loss='mae')
history = model.fit_generator(train_gen,
validation_data = val_gen,
epochs = 10)
After this is where I get the error,I have tried using different parameters but the result is always the same:
Epoch 1/10
/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:3: UserWarning: `Model.fit_generator` is deprecated and will be removed in a future version. Please use `Model.fit`, which supports generators.
This is separate from the ipykernel package so we can avoid doing imports until
---------------------------------------------------------------------------
InvalidArgumentError Traceback (most recent call last)
<ipython-input-12-ee4502a47627> in <module>()
1 history = model.fit_generator(train_gen,
2 validation_data = val_gen,
----> 3 epochs = 10)
2 frames
/usr/local/lib/python3.7/dist-packages/tensorflow/python/eager/execute.py in quick_execute(op_name, num_outputs, inputs, attrs, ctx, name)
53 ctx.ensure_initialized()
54 tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,
---> 55 inputs, attrs, num_outputs)
56 except core._NotOkStatusException as e:
57 if name is not None:
InvalidArgumentError: Graph execution error:
Detected at node 'mean_absolute_error/sub' defined at (most recent call last):
File "/usr/lib/python3.7/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/usr/lib/python3.7/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py", line 16, in <module>
app.launch_new_instance()
File "/usr/local/lib/python3.7/dist-packages/traitlets/config/application.py", line 846, in launch_instance
app.start()
File "/usr/local/lib/python3.7/dist-packages/ipykernel/kernelapp.py", line 499, in start
self.io_loop.start()
File "/usr/local/lib/python3.7/dist-packages/tornado/platform/asyncio.py", line 132, in start
self.asyncio_loop.run_forever()
File "/usr/lib/python3.7/asyncio/base_events.py", line 541, in run_forever
self._run_once()
File "/usr/lib/python3.7/asyncio/base_events.py", line 1786, in _run_once
handle._run()
File "/usr/lib/python3.7/asyncio/events.py", line 88, in _run
self._context.run(self._callback, *self._args)
File "/usr/local/lib/python3.7/dist-packages/tornado/platform/asyncio.py", line 122, in _handle_events
handler_func(fileobj, events)
File "/usr/local/lib/python3.7/dist-packages/tornado/stack_context.py", line 300, in null_wrapper
return fn(*args, **kwargs)
File "/usr/local/lib/python3.7/dist-packages/zmq/eventloop/zmqstream.py", line 452, in _handle_events
self._handle_recv()
File "/usr/local/lib/python3.7/dist-packages/zmq/eventloop/zmqstream.py", line 481, in _handle_recv
self._run_callback(callback, msg)
File "/usr/local/lib/python3.7/dist-packages/zmq/eventloop/zmqstream.py", line 431, in _run_callback
callback(*args, **kwargs)
File "/usr/local/lib/python3.7/dist-packages/tornado/stack_context.py", line 300, in null_wrapper
return fn(*args, **kwargs)
File "/usr/local/lib/python3.7/dist-packages/ipykernel/kernelbase.py", line 283, in dispatcher
return self.dispatch_shell(stream, msg)
File "/usr/local/lib/python3.7/dist-packages/ipykernel/kernelbase.py", line 233, in dispatch_shell
handler(stream, idents, msg)
File "/usr/local/lib/python3.7/dist-packages/ipykernel/kernelbase.py", line 399, in execute_request
user_expressions, allow_stdin)
File "/usr/local/lib/python3.7/dist-packages/ipykernel/ipkernel.py", line 208, in do_execute
res = shell.run_cell(code, store_history=store_history, silent=silent)
File "/usr/local/lib/python3.7/dist-packages/ipykernel/zmqshell.py", line 537, in run_cell
return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)
File "/usr/local/lib/python3.7/dist-packages/IPython/core/interactiveshell.py", line 2718, in run_cell
interactivity=interactivity, compiler=compiler, result=result)
File "/usr/local/lib/python3.7/dist-packages/IPython/core/interactiveshell.py", line 2822, in run_ast_nodes
if self.run_code(code, result):
File "/usr/local/lib/python3.7/dist-packages/IPython/core/interactiveshell.py", line 2882, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-7-342600d44df8>", line 3, in <module>
epochs = 10)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 2223, in fit_generator
initial_epoch=initial_epoch)
File "/usr/local/lib/python3.7/dist-packages/keras/utils/traceback_utils.py", line 64, in error_handler
return fn(*args, **kwargs)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1384, in fit
tmp_logs = self.train_function(iterator)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1021, in train_function
return step_function(self, iterator)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1010, in step_function
outputs = model.distribute_strategy.run(run_step, args=(data,))
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1000, in run_step
outputs = model.train_step(data)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 860, in train_step
loss = self.compute_loss(x, y, y_pred, sample_weight)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 919, in compute_loss
y, y_pred, sample_weight, regularization_losses=self.losses)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/compile_utils.py", line 201, in __call__
loss_value = loss_obj(y_t, y_p, sample_weight=sw)
File "/usr/local/lib/python3.7/dist-packages/keras/losses.py", line 141, in __call__
losses = call_fn(y_true, y_pred)
File "/usr/local/lib/python3.7/dist-packages/keras/losses.py", line 245, in call
return ag_fn(y_true, y_pred, **self._fn_kwargs)
File "/usr/local/lib/python3.7/dist-packages/keras/losses.py", line 1457, in mean_absolute_error
return backend.mean(tf.abs(y_pred - y_true), axis=-1)
Node: 'mean_absolute_error/sub'
required broadcastable shapes
[[{{node mean_absolute_error/sub}}]] [Op:__inference_train_function_1827]

Incompatible shapes: [84,6] vs. [128,6]. Error at end of first epoch

This is the model that I built. Please do help me understand if the problem with my model or any other problem I am facing this issue.
The error occurs after this:
Train on 63828 samples, validate on 95743 samples
Epoch 1/1
63744/63828 [============================>.] - ETA: 2s - loss: 0.3427 - acc: 0.9943
The error occurs at the end. So I removed the avlidation set during training.
from tensorflow.python.keras.layers import Embedding, Input
from tensorflow.python.keras.layers import LSTM, Bidirectional, GlobalMaxPool1D, Dropout
embedding_layer = Embedding(num_of_words, EMBEDDING_DIM, weights=[embedding_matrix], input_length=MAX_SEQUENCE_LENGTH, trainable=False)
#building the model
#INPUT LAYER
input_layer = Input((MAX_SEQUENCE_LENGTH,))
#EMBEDDING LAYER
embedding_layer = embedding_layer(input_layer)
#BI-LSTM LAYER
lstm_layer_output = Bidirectional(LSTM(128, return_sequences=True))(embedding_layer)
lstm, forward_h, forward_c, backward_h, backward_c = Bidirectional \
(LSTM
(128,
dropout=0.2,
return_sequences=True,
return_state=True,
recurrent_activation='relu',
recurrent_initializer='glorot_uniform'))(embedding_layer)
from tensorflow.python.keras import backend as K
#CNN LAYER WITH KERNELS 3,4,5
from tensorflow.python.keras.layers import Conv1D, MaxPooling1D
first_conv_layer = Conv1D(128, 3, activation='relu')(lstm_layer_output)
first_max_pooling_layer = MaxPooling1D(3)(first_conv_layer)
second_conv_layer = Conv1D(128, 4, activation='relu')(first_max_pooling_layer)
second_max_pooling_layer = MaxPooling1D(4)(second_conv_layer)
third_conv_layer = Conv1D(128, 5, activation='relu')(second_max_pooling_layer)
#third_max_pooling_layer = MaxPooling1D(5)(third_conv_layer)
global_max_pooling = GlobalMaxPool1D()(third_conv_layer)
#from tensorflow.python.keras.layers import Concatenate
#merged_pooling_layers = Concatenate(axis=1)([first_max_pooling_layer,second_max_pooling_layer,third_max_pooling_layer])
#global_max_pooling = GlobalMaxPool1D()(merged_pooling_layers)
#implementing attentionlayer manually
from tensorflow.python.keras.layers import Add
rnn_output = Add()([forward_h,backward_h])
hidden_size = int(lstm.shape[2])
from tensorflow.python.keras.layers import Lambda
hsf = Lambda(lambda x: x[:, -1], output_shape=(hidden_size,), name='last_hidden_state_forward')(rnn_output)
from tensorflow.python.keras.layers import Multiply
from tensorflow.python.keras.layers import Lambda
def norm(m):
return K.transpose(m)
u_t = Multiply()([Lambda(norm)(rnn_output),hsf])
context_vector = Multiply()([u_t,global_max_pooling])
def ex(m):
return K.exp(context_vector)
exp_u_t = Lambda(ex)(context_vector)
from tensorflow.python.keras.layers import Dense
attention_vector = Dense(128,activation='softmax')(exp_u_t)
x = Dense(64,activation="softmax")(weighted_input)
output_layer = Dense(6,activation="softmax")(x)
from tensorflow.python.keras.models import Model
from tensorflow.python.keras.optimizers import Adam
model = Model(input_layer,output_layer)
from tensorflow.python.keras import optimizers
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy']
)
print('Training model...')
r = model.fit(
data,
target_values,
batch_size=128,
epochs=1,
validation_split=0.0
)
The error I got is this:
InvalidArgumentError (see above for traceback): Incompatible shapes: [84,6] vs. [128,6]
[[Node: training/SGD/gradients/loss/dense_3_loss/mul_grad/BroadcastGradientArgs = BroadcastGradientArgs[T=DT_INT32, _class=["loc:#training/SGD/gradients/loss/dense_3_loss/mul_grad/Reshape_1"], _device="/job:localhost/replica:0/task:0/device:CPU:0"](training/SGD/gradients/loss/dense_3_loss/mul_grad/Shape, training/SGD/gradients/loss/dense_3_loss/mul_grad/Shape_1)]]
Please help me fix this problem.Thank you
Edit:
This is the traceback of the error
Epoch 1/1
31872/31914 [============================>.] - ETA: 1s - loss: 0.2419 Traceback (most recent call last):
File "<ipython-input-1-a7cc2e59a772>", line 165, in <module>
validation_split=0.8
File "C:\Users\JCMat\New\Anaconda3\lib\site-packages\tensorflow\python\keras\_impl\keras\engine\training.py", line 1216, in fit
validation_steps=validation_steps)
File "C:\Users\JCMat\New\Anaconda3\lib\site-packages\tensorflow\python\keras\_impl\keras\engine\training_arrays.py", line 245, in fit_loop
outs = f(ins_batch)
File "C:\Users\JCMat\New\Anaconda3\lib\site-packages\tensorflow\python\keras\_impl\keras\backend.py", line 2824, in __call__
fetches=fetches, feed_dict=feed_dict, **self.session_kwargs)
File "C:\Users\JCMat\New\Anaconda3\lib\site-packages\tensorflow\python\client\session.py", line 900, in run
run_metadata_ptr)
File "C:\Users\JCMat\New\Anaconda3\lib\site-packages\tensorflow\python\client\session.py", line 1135, in _run
feed_dict_tensor, options, run_metadata)
File "C:\Users\JCMat\New\Anaconda3\lib\site-packages\tensorflow\python\client\session.py", line 1316, in _do_run
run_metadata)
File "C:\Users\JCMat\New\Anaconda3\lib\site-packages\tensorflow\python\client\session.py", line 1335, in _do_call
raise type(e)(node_def, op, message)
InvalidArgumentError: Incompatible shapes: [128,6] vs. [42,6]
[[Node: training/SGD/gradients/loss/dense_3_loss/logistic_loss/mul_grad/BroadcastGradientArgs = BroadcastGradientArgs[T=DT_INT32, _class=["loc:#training/SGD/gradients/loss/dense_3_loss/logistic_loss/mul_grad/Reshape"], _device="/job:localhost/replica:0/task:0/device:CPU:0"](training/SGD/gradients/loss/dense_3_loss/logistic_loss/mul_grad/Shape, training/SGD/gradients/loss/dense_3_loss/logistic_loss/mul_grad/Shape_1)]]
Caused by op 'training/SGD/gradients/loss/dense_3_loss/logistic_loss/mul_grad/BroadcastGradientArgs', defined at:
File "C:\Users\JCMat\New\Anaconda3\lib\site-packages\spyder\utils\ipython\start_kernel.py", line 268, in <module>
main()
File "C:\Users\JCMat\New\Anaconda3\lib\site-packages\spyder\utils\ipython\start_kernel.py", line 264, in main
kernel.start()
File "C:\Users\JCMat\New\Anaconda3\lib\site-packages\ipykernel\kernelapp.py", line 478, in start
self.io_loop.start()
File "C:\Users\JCMat\New\Anaconda3\lib\site-packages\zmq\eventloop\ioloop.py", line 177, in start
super(ZMQIOLoop, self).start()
File "C:\Users\JCMat\New\Anaconda3\lib\site-packages\tornado\ioloop.py", line 888, in start
handler_func(fd_obj, events)
File "C:\Users\JCMat\New\Anaconda3\lib\site-packages\tornado\stack_context.py", line 277, in null_wrapper
return fn(*args, **kwargs)
File "C:\Users\JCMat\New\Anaconda3\lib\site-packages\zmq\eventloop\zmqstream.py", line 440, in _handle_events
self._handle_recv()
File "C:\Users\JCMat\New\Anaconda3\lib\site-packages\zmq\eventloop\zmqstream.py", line 472, in _handle_recv
self._run_callback(callback, msg)
File "C:\Users\JCMat\New\Anaconda3\lib\site-packages\zmq\eventloop\zmqstream.py", line 414, in _run_callback
callback(*args, **kwargs)
File "C:\Users\JCMat\New\Anaconda3\lib\site-packages\tornado\stack_context.py", line 277, in null_wrapper
return fn(*args, **kwargs)
File "C:\Users\JCMat\New\Anaconda3\lib\site-packages\ipykernel\kernelbase.py", line 283, in dispatcher
return self.dispatch_shell(stream, msg)
File "C:\Users\JCMat\New\Anaconda3\lib\site-packages\ipykernel\kernelbase.py", line 233, in dispatch_shell
handler(stream, idents, msg)
File "C:\Users\JCMat\New\Anaconda3\lib\site-packages\ipykernel\kernelbase.py", line 399, in execute_request
user_expressions, allow_stdin)
File "C:\Users\JCMat\New\Anaconda3\lib\site-packages\ipykernel\ipkernel.py", line 208, in do_execute
res = shell.run_cell(code, store_history=store_history, silent=silent)
File "C:\Users\JCMat\New\Anaconda3\lib\site-packages\ipykernel\zmqshell.py", line 537, in run_cell
return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)
File "C:\Users\JCMat\New\Anaconda3\lib\site-packages\IPython\core\interactiveshell.py", line 2728, in run_cell
interactivity=interactivity, compiler=compiler, result=result)
File "C:\Users\JCMat\New\Anaconda3\lib\site-packages\IPython\core\interactiveshell.py", line 2850, in run_ast_nodes
if self.run_code(code, result):
File "C:\Users\JCMat\New\Anaconda3\lib\site-packages\IPython\core\interactiveshell.py", line 2910, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-1-a7cc2e59a772>", line 165, in <module>
validation_split=0.8
File "C:\Users\JCMat\New\Anaconda3\lib\site-packages\tensorflow\python\keras\_impl\keras\engine\training.py", line 1216, in fit
validation_steps=validation_steps)
File "C:\Users\JCMat\New\Anaconda3\lib\site-packages\tensorflow\python\keras\_impl\keras\engine\training_arrays.py", line 90, in fit_loop
model._make_train_function()
File "C:\Users\JCMat\New\Anaconda3\lib\site-packages\tensorflow\python\keras\_impl\keras\engine\training.py", line 572, in _make_train_function
params=self._collected_trainable_weights, loss=self.total_loss)
File "C:\Users\JCMat\New\Anaconda3\lib\site-packages\tensorflow\python\keras\_impl\keras\optimizers.py", line 208, in get_updates
grads = self.get_gradients(loss, params)
File "C:\Users\JCMat\New\Anaconda3\lib\site-packages\tensorflow\python\keras\_impl\keras\optimizers.py", line 114, in get_gradients
grads = K.gradients(loss, params)
File "C:\Users\JCMat\New\Anaconda3\lib\site-packages\tensorflow\python\keras\_impl\keras\backend.py", line 2866, in gradients
loss, variables, colocate_gradients_with_ops=True)
File "C:\Users\JCMat\New\Anaconda3\lib\site-packages\tensorflow\python\ops\gradients_impl.py", line 494, in gradients
gate_gradients, aggregation_method, stop_gradients)
File "C:\Users\JCMat\New\Anaconda3\lib\site-packages\tensorflow\python\ops\gradients_impl.py", line 636, in _GradientsHelper
lambda: grad_fn(op, *out_grads))
File "C:\Users\JCMat\New\Anaconda3\lib\site-packages\tensorflow\python\ops\gradients_impl.py", line 385, in _MaybeCompile
return grad_fn() # Exit early
File "C:\Users\JCMat\New\Anaconda3\lib\site-packages\tensorflow\python\ops\gradients_impl.py", line 636, in <lambda>
lambda: grad_fn(op, *out_grads))
File "C:\Users\JCMat\New\Anaconda3\lib\site-packages\tensorflow\python\ops\math_grad.py", line 874, in _MulGrad
rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
File "C:\Users\JCMat\New\Anaconda3\lib\site-packages\tensorflow\python\ops\gen_array_ops.py", line 673, in broadcast_gradient_args
"BroadcastGradientArgs", s0=s0, s1=s1, name=name)
File "C:\Users\JCMat\New\Anaconda3\lib\site-packages\tensorflow\python\framework\op_def_library.py", line 787, in _apply_op_helper
op_def=op_def)
File "C:\Users\JCMat\New\Anaconda3\lib\site-packages\tensorflow\python\framework\ops.py", line 3392, in create_op
op_def=op_def)
File "C:\Users\JCMat\New\Anaconda3\lib\site-packages\tensorflow\python\framework\ops.py", line 1718, in __init__
self._traceback = self._graph._extract_stack() # pylint: disable=protected-access
...which was originally created as op 'loss/dense_3_loss/logistic_loss/mul', defined at:
File "C:\Users\JCMat\New\Anaconda3\lib\site-packages\spyder\utils\ipython\start_kernel.py", line 268, in <module>
main()
[elided 16 identical lines from previous traceback]
File "C:\Users\JCMat\New\Anaconda3\lib\site-packages\IPython\core\interactiveshell.py", line 2910, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-1-a7cc2e59a772>", line 153, in <module>
optimizer='sgd',
File "C:\Users\JCMat\New\Anaconda3\lib\site-packages\tensorflow\python\keras\_impl\keras\engine\training.py", line 428, in compile
output_loss = weighted_loss(y_true, y_pred, sample_weight, mask)
File "C:\Users\JCMat\New\Anaconda3\lib\site-packages\tensorflow\python\keras\_impl\keras\engine\training_utils.py", line 438, in weighted
score_array = fn(y_true, y_pred)
File "C:\Users\JCMat\New\Anaconda3\lib\site-packages\tensorflow\python\keras\_impl\keras\losses.py", line 116, in binary_crossentropy
return K.mean(K.binary_crossentropy(y_true, y_pred), axis=-1)
File "C:\Users\JCMat\New\Anaconda3\lib\site-packages\tensorflow\python\keras\_impl\keras\backend.py", line 3448, in binary_crossentropy
return nn.sigmoid_cross_entropy_with_logits(labels=target, logits=output)
File "C:\Users\JCMat\New\Anaconda3\lib\site-packages\tensorflow\python\ops\nn_impl.py", line 181, in sigmoid_cross_entropy_with_logits
relu_logits - logits * labels,
File "C:\Users\JCMat\New\Anaconda3\lib\site-packages\tensorflow\python\ops\math_ops.py", line 979, in binary_op_wrapper
return func(x, y, name=name)
File "C:\Users\JCMat\New\Anaconda3\lib\site-packages\tensorflow\python\ops\math_ops.py", line 1211, in _mul_dispatch
return gen_math_ops.mul(x, y, name=name)
File "C:\Users\JCMat\New\Anaconda3\lib\site-packages\tensorflow\python\ops\gen_math_ops.py", line 4758, in mul
"Mul", x=x, y=y, name=name)
File "C:\Users\JCMat\New\Anaconda3\lib\site-packages\tensorflow\python\framework\op_def_library.py", line 787, in _apply_op_helper
op_def=op_def)
File "C:\Users\JCMat\New\Anaconda3\lib\site-packages\tensorflow\python\framework\ops.py", line 3392, in create_op
op_def=op_def)
File "C:\Users\JCMat\New\Anaconda3\lib\site-packages\tensorflow\python\framework\ops.py", line 1718, in __init__
self._traceback = self._graph._extract_stack() # pylint: disable=protected-access
InvalidArgumentError (see above for traceback): Incompatible shapes: [128,6] vs. [42,6]
[[Node: training/SGD/gradients/loss/dense_3_loss/logistic_loss/mul_grad/BroadcastGradientArgs = BroadcastGradientArgs[T=DT_INT32, _class=["loc:#training/SGD/gradients/loss/dense_3_loss/logistic_loss/mul_grad/Reshape"], _device="/job:localhost/replica:0/task:0/device:CPU:0"](training/SGD/gradients/loss/dense_3_loss/logistic_loss/mul_grad/Shape, training/SGD/gradients/loss/dense_3_loss/logistic_loss/mul_grad/Shape_1)]]
The issue is that your last batch doesn't contain 128 rows, but only 84, since the length of your dataset isn't divisible without a remainder. Either try to adjust your code to allow for dynamic rows, or maybe try padding the last batch.

how do I feed the inputs of network if it has not fed by model.fit?

I have a simple network in keras and I define a custom layer which does some operations on input tensor and then returns it to the network, but when I want to implement it, it produces the following error and said the input has not been fed while I think when we use fit function it feeds the network. could you please help me with this issue? I could not find a suitable answer to solve my problem. I put my code here too. Thank you.
def C(u):
if u == 0:
return 1. / np.sqrt(2.)
else:
return 1.
def DCT(a, b):
for u in range(8):
for v in range(8):
for x in range(8):
for y in range(8):
b[u,v] = b[u, v] + 0.25 * C(u) * C(v) * a[x, y]* np.cos((2 * x+1) * (u) * np.pi / 16) * np.cos((2 * y+1) * (v) * np.pi / 16)
def IDCT(a, b):
for u in range(8):
for v in range(8):
for x in range(8):
for y in range(8):
b[x,y] = b[x, y] + 0.25 * C(u) * C(v) * a[u,v] * np.cos((2 * x+1) * (u) * np.pi / 16) * np.cos((2 * y+1) * (v) * np.pi / 16)
def quntize_mask(window_size: int, keep_count: int):
mask = np.zeros((window_size, window_size), dtype=np.uint8)
index_order = sorted(((x, y) for x in range(window_size) for y in range(window_size)),
key=lambda p: (p[0] + p[1], -p[1] if (p[0] + p[1]) % 2 else p[1]))
for i, j in index_order[0:keep_count]:
mask[i, j] = 1
return mask
def slicAndJpeg(img):
for i in range (int(img.shape[1].value/8)):
for j in range(int(img.shape[2].value/8)):
temp=(img[:,i*8:i*8+8,j*8:j*8+8])
tempb=np.zeros((8,8))
DCT(temp,tempb)
mask=quntize_mask(8,9)
qunz=Kr.layers.multiply(mask,tempb)
tempc=K.zeros((8,8))
IDCT(qunz,tempc)
img[:,i*8:i*8+8,j*8:j*8+8]=tempc
class JPEGLayer(Layer):
def __init__(self,**kwargs):
super(JPEGLayer, self).__init__(**kwargs)
self.supports_masking = True
def call(self, noised_image, training=True):
def noise():
# noised_image = noised_and_cover
# pad the image so that we can do dct on 8x8 blocks
pad_height = (8 - noised_image.shape[1] % 8) % 8
pad_width = (8 - noised_image.shape[2] % 8) % 8
noised_image_pad = Kr.layers.ZeroPadding2D(padding=(( pad_width, 0),( pad_height,0)))(noised_image)
slicAndJpeg(K.eval(noised_image_pad))
# un-pad
noised_and_cover = noised_image_pad[ :, :noised_image_pad.shape[1]-pad_height, :noised_image_pad.shape[2]-pad_width]
return noised_and_cover
return noise()
#-----------------building w train---------------------------------------------
wt_random=np.random.randint(2, size=(49999,4,4))
w_expand=wt_random.astype(np.float32)
wv_random=np.random.randint(2, size=(9999,4,4))
wv_expand=wv_random.astype(np.float32)
x,y,z=w_expand.shape
w_expand=w_expand.reshape((x,y,z,1))
x,y,z=wv_expand.shape
wv_expand=wv_expand.reshape((x,y,z,1))
#-----------------building w test---------------------------------------------
w_test = np.random.randint(2,size=(1,4,4))
w_test=w_test.astype(np.float32)
w_test=w_test.reshape((1,4,4,1))
#-----------------------encoder------------------------------------------------
#------------------------------------------------------------------------------
image = Input((28, 28, 1))
conv1 = Conv2D(64, (5, 5),activation='relu',padding='same', name='convl1e')(image)
wtm=Input((4,4,1))
#--------------------------------------------------------------
wpad=Kr.layers.Lambda(lambda xy: xy[0] + Kr.backend.spatial_2d_padding(xy[1], padding=((0, 24), (0, 24))))
encoded_merged=wpad([conv1,wtm])#-----------------------decoder------------------------------------------------
#------------------------------------------------------------------------------
decoded = Conv2D(1, (5, 5),activation='relu', padding='same', name='decoder_output')(encoded_merged)
model=Model(inputs=[image,wtm],outputs=decoded)
model.summary()
decoded_noise=JPEGLayer()(decoded)#16
#----------------------w extraction------------------------------------
convw1 = Conv2D(64, (5,5),activation='relu' , name='conl1w')(decoded_noise)#24
convw2 = Conv2D(64, (5,5),activation='relu' , name='conl2w')(convw1)#20
#Avw1=AveragePooling2D(pool_size=(2,2))(convw2)
convw3 = Conv2D(64, (5,5),activation='relu' ,name='conl3w')(convw2)#16
convw4 = Conv2D(64, (5,5), activation='relu' ,name='conl4w')(convw3)#12
#Avw2=AveragePooling2D(pool_size=(2,2))(convw4)
convw5 = Conv2D(64, (5,5), activation='relu' ,name='conl5w')(convw4)#8
convw6 = Conv2D(64, (5,5), activation='relu' ,name='conl6w')(convw5)#4
pred_w = Conv2D(1, (1, 1),activation='relu' ,padding='same', name='reconstructed_W')(convw6)
model1=Model(inputs=[image,wtm],outputs=[decoded,pred_w])
model1.summary()
#----------------------training the model--------------------------------------
#------------------------------------------------------------------------------
#----------------------Data preparesion----------------------------------------
(x_train, _), (x_test, _) = mnist.load_data()
x_validation=x_train[1:10000,:,:]
x_train=x_train[10001:60000,:,:]
#
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
x_validation = x_validation.astype('float32') / 255.
x_train = np.reshape(x_train, (len(x_train), 28, 28, 1)) # adapt this if using `channels_first` image data format
x_test = np.reshape(x_test, (len(x_test), 28, 28, 1)) # adapt this if using `channels_first` image data format
x_validation = np.reshape(x_validation, (len(x_validation), 28, 28, 1))
#---------------------compile and train the model------------------------------
opt=SGD(momentum=0.99,lr=0.0001)
model1.compile(optimizer='adam', loss={'imageprim':'mse','wprim':'binary_crossentropy'}, loss_weights={'imageprim': 0.5, 'wprim': 1.0},metrics=['mae'])
es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=40)
#rlrp = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=20, min_delta=1E-4, verbose=1)
mc = ModelCheckpoint('sendAct.h5', monitor='val_loss', mode='min', verbose=1, save_best_only=True)
history=model1.fit([x_train,w_expand], [x_train,w_expand],
epochs=4000,
batch_size=32,
validation_data=([x_validation,wv_expand], [x_validation,wv_expand]),
callbacks=[TensorBoard(log_dir='/home/jamalm8/tensorboardGNWLoss/', histogram_freq=0, write_graph=False),es,mc])
model1.summary()
Traceback (most recent call last):
File "", line 124, in
decoded_noise=JPEGLayer()(decoded)#16
File
"D:\software\Anaconda3\envs\py36\lib\site-packages\keras\engine\base_layer.py",
line 457, in call
output = self.call(inputs, **kwargs)
File "", line 94, in call
return noise()
File "", line 88, in noise
slicAndJpeg(K.eval(noised_image_pad))
File
"D:\software\Anaconda3\envs\py36\lib\site-packages\keras\backend\tensorflow_backend.py",
line 673, in eval
return to_dense(x).eval(session=get_session())
File
"D:\software\Anaconda3\envs\py36\lib\site-packages\tensorflow\python\framework\ops.py",
line 713, in eval
return _eval_using_default_session(self, feed_dict, self.graph, session)
File
"D:\software\Anaconda3\envs\py36\lib\site-packages\tensorflow\python\framework\ops.py",
line 5157, in _eval_using_default_session
return session.run(tensors, feed_dict)
File
"D:\software\Anaconda3\envs\py36\lib\site-packages\tensorflow\python\client\session.py",
line 929, in run
run_metadata_ptr)
File
"D:\software\Anaconda3\envs\py36\lib\site-packages\tensorflow\python\client\session.py",
line 1152, in _run
feed_dict_tensor, options, run_metadata)
File
"D:\software\Anaconda3\envs\py36\lib\site-packages\tensorflow\python\client\session.py",
line 1328, in _do_run
run_metadata)
File
"D:\software\Anaconda3\envs\py36\lib\site-packages\tensorflow\python\client\session.py",
line 1348, in _do_call
raise type(e)(node_def, op, message)
InvalidArgumentError: You must feed a value for placeholder tensor
'input_1' with dtype float and shape [?,28,28,1] [[node input_1
(defined at
D:\software\Anaconda3\envs\py36\lib\site-packages\keras\backend\tensorflow_backend.py:517)
= Placeholderdtype=DT_FLOAT, shape=[?,28,28,1], _device="/job:localhost/replica:0/task:0/device:GPU:0"]] [[{{node jpeg_layer_1/zero_padding2d_1/Pad/_9}} =
_Recvclient_terminated=false, recv_device="/job:localhost/replica:0/task:0/device:CPU:0",
send_device="/job:localhost/replica:0/task:0/device:GPU:0",
send_device_incarnation=1,
tensor_name="edge_39_jpeg_layer_1/zero_padding2d_1/Pad",
tensor_type=DT_FLOAT,
_device="/job:localhost/replica:0/task:0/device:CPU:0"]]
Caused by op 'input_1', defined at: File
"D:\software\Anaconda3\envs\py36\lib\runpy.py", line 193, in
_run_module_as_main
"main", mod_spec) File "D:\software\Anaconda3\envs\py36\lib\runpy.py", line 85, in _run_code
exec(code, run_globals) File "D:\software\Anaconda3\envs\py36\lib\site-packages\spyder_kernels\console__main__.py",
line 11, in
start.main() File "D:\software\Anaconda3\envs\py36\lib\site-packages\spyder_kernels\console\start.py",
line 310, in main
kernel.start() File "D:\software\Anaconda3\envs\py36\lib\site-packages\ipykernel\kernelapp.py",
line 505, in start
self.io_loop.start() File "D:\software\Anaconda3\envs\py36\lib\site-packages\tornado\platform\asyncio.py",
line 132, in start
self.asyncio_loop.run_forever() File "D:\software\Anaconda3\envs\py36\lib\asyncio\base_events.py", line
438, in run_forever
self._run_once() File "D:\software\Anaconda3\envs\py36\lib\asyncio\base_events.py", line
1451, in _run_once
handle._run() File "D:\software\Anaconda3\envs\py36\lib\asyncio\events.py", line 145, in
_run
self._callback(*self._args) File "D:\software\Anaconda3\envs\py36\lib\site-packages\tornado\ioloop.py",
line 758, in _run_callback
ret = callback() File "D:\software\Anaconda3\envs\py36\lib\site-packages\tornado\stack_context.py",
line 300, in null_wrapper
return fn(*args, **kwargs) File "D:\software\Anaconda3\envs\py36\lib\site-packages\tornado\gen.py",
line 1233, in inner
self.run() File "D:\software\Anaconda3\envs\py36\lib\site-packages\tornado\gen.py",
line 1147, in run
yielded = self.gen.send(value) File "D:\software\Anaconda3\envs\py36\lib\site-packages\ipykernel\kernelbase.py",
line 357, in process_one
yield gen.maybe_future(dispatch(*args)) File "D:\software\Anaconda3\envs\py36\lib\site-packages\tornado\gen.py",
line 326, in wrapper
yielded = next(result) File "D:\software\Anaconda3\envs\py36\lib\site-packages\ipykernel\kernelbase.py",
line 267, in dispatch_shell
yield gen.maybe_future(handler(stream, idents, msg)) File "D:\software\Anaconda3\envs\py36\lib\site-packages\tornado\gen.py",
line 326, in wrapper
yielded = next(result) File "D:\software\Anaconda3\envs\py36\lib\site-packages\ipykernel\kernelbase.py",
line 534, in execute_request
user_expressions, allow_stdin, File "D:\software\Anaconda3\envs\py36\lib\site-packages\tornado\gen.py",
line 326, in wrapper
yielded = next(result) File "D:\software\Anaconda3\envs\py36\lib\site-packages\ipykernel\ipkernel.py",
line 294, in do_execute
res = shell.run_cell(code, store_history=store_history, silent=silent) File
"D:\software\Anaconda3\envs\py36\lib\site-packages\ipykernel\zmqshell.py",
line 536, in run_cell
return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs) File
"D:\software\Anaconda3\envs\py36\lib\site-packages\IPython\core\interactiveshell.py",
line 2819, in run_cell
raw_cell, store_history, silent, shell_futures) File "D:\software\Anaconda3\envs\py36\lib\site-packages\IPython\core\interactiveshell.py",
line 2845, in _run_cell
return runner(coro) File "D:\software\Anaconda3\envs\py36\lib\site-packages\IPython\core\async_helpers.py",
line 67, in _pseudo_sync_runner
coro.send(None) File "D:\software\Anaconda3\envs\py36\lib\site-packages\IPython\core\interactiveshell.py",
line 3020, in run_cell_async
interactivity=interactivity, compiler=compiler, result=result) File
"D:\software\Anaconda3\envs\py36\lib\site-packages\IPython\core\interactiveshell.py",
line 3185, in run_ast_nodes
if (yield from self.run_code(code, result)): File "D:\software\Anaconda3\envs\py36\lib\site-packages\IPython\core\interactiveshell.py",
line 3267, in run_code
exec(code_obj, self.user_global_ns, self.user_ns) File "", line 114, in
image = Input((28, 28, 1)) File "D:\software\Anaconda3\envs\py36\lib\site-packages\keras\engine\input_layer.py",
line 178, in Input
input_tensor=tensor) File "D:\software\Anaconda3\envs\py36\lib\site-packages\keras\legacy\interfaces.py",
line 91, in wrapper
return func(*args, **kwargs) File "D:\software\Anaconda3\envs\py36\lib\site-packages\keras\engine\input_layer.py",
line 87, in init
name=self.name) File "D:\software\Anaconda3\envs\py36\lib\site-packages\keras\backend\tensorflow_backend.py",
line 517, in placeholder
x = tf.placeholder(dtype, shape=shape, name=name) File "D:\software\Anaconda3\envs\py36\lib\site-packages\tensorflow\python\ops\array_ops.py",
line 1747, in placeholder
return gen_array_ops.placeholder(dtype=dtype, shape=shape, name=name) File
"D:\software\Anaconda3\envs\py36\lib\site-packages\tensorflow\python\ops\gen_array_ops.py",
line 5206, in placeholder
"Placeholder", dtype=dtype, shape=shape, name=name) File "D:\software\Anaconda3\envs\py36\lib\site-packages\tensorflow\python\framework\op_def_library.py",
line 787, in _apply_op_helper
op_def=op_def) File "D:\software\Anaconda3\envs\py36\lib\site-packages\tensorflow\python\util\deprecation.py",
line 488, in new_func
return func(*args, **kwargs) File "D:\software\Anaconda3\envs\py36\lib\site-packages\tensorflow\python\framework\ops.py",
line 3274, in create_op
op_def=op_def) File "D:\software\Anaconda3\envs\py36\lib\site-packages\tensorflow\python\framework\ops.py",
line 1770, in init
self._traceback = tf_stack.extract_stack()
InvalidArgumentError (see above for traceback): You must feed a value
for placeholder tensor 'input_1' with dtype float and shape
[?,28,28,1] [[node input_1 (defined at
D:\software\Anaconda3\envs\py36\lib\site-packages\keras\backend\tensorflow_backend.py:517)
= Placeholderdtype=DT_FLOAT, shape=[?,28,28,1], _device="/job:localhost/replica:0/task:0/device:GPU:0"]] [[{{node jpeg_layer_1/zero_padding2d_1/Pad/_9}} =
_Recvclient_terminated=false, recv_device="/job:localhost/replica:0/task:0/device:CPU:0",
send_device="/job:localhost/replica:0/task:0/device:GPU:0",
send_device_incarnation=1,
tensor_name="edge_39_jpeg_layer_1/zero_padding2d_1/Pad",
tensor_type=DT_FLOAT,
_device="/job:localhost/replica:0/task:0/device:CPU:0"]]
It's caused by the line slicAndJpeg(K.eval(noised_image_pad)) in your JPEGLayer class. Basically, you're trying to evaluate a tensor by calling K.eval() without feeding any data to it. You can't evaluate an empty tensor, right? This can be fixed by removing that noise() function completely and do the padding/slicing and other tasks in preprocessing.
Replace the JPEGLayer class by following and it should work (Assuming the input data is padded already)
class JPEGLayer(Layer):
def __init__(self,**kwargs):
super(JPEGLayer, self).__init__(**kwargs)
self.supports_masking = True
def call(self, noised_image, training=True):
return noised_image

Categories