I want to solve this below error.
InvalidArgumentError: Input to reshape is a tensor with 737280 values, but the requested shape requires a multiple of 184832
so I see reference.
reference
Python / Tensorflow - Input to reshape is a tensor with 92416 values, but the requested shape requires a multiple of 2304
However, looking at the answers to this problem, I do not know where to fix it.
So ,I would like to know how to check the size of the input image.
Thank you for your time.
my model:
# For multi_model
activationFunction='elu'
def build_multi2(main_input_shape, output_dim):
inputA = Input(shape=main_input_shape)
ch1_model = create_convolution_layers(inputA)
inputB = Input(shape=main_input_shape)
ch2_model = create_convolution_layers(inputB)
inputC = Input(shape=main_input_shape)
ch3_model = create_convolution_layers(inputC)
inputD = Input(shape=main_input_shape)
ch4_model = create_convolution_layers(inputD)
conv = concatenate([ch1_model, ch2_model, ch3_model, ch4_model])
conv = Flatten()(conv)
dense = Dense(512)(conv)
dense = LeakyReLU(alpha=0.1)(dense)
dense = Dropout(0.5)(dense)
output = Dense(N_class, activation='softmax')(dense)
return Model(inputs=[inputA, inputB, inputC, inputD], outputs=[output])
def create_convolution_layers(input_img):
model = Conv2D(32, (3, 3), padding='same', input_shape=main_input_shape)(input_img)
model = LeakyReLU(alpha=0.1)(model)
model = MaxPooling2D((2, 2),padding='same')(model)
model = Dropout(0.25)(model)
model = Conv2D(64, (3, 3), padding='same')(model)
model = LeakyReLU(alpha=0.1)(model)
model = MaxPooling2D(pool_size=(2, 2),padding='same')(model)
model = Dropout(0.25)(model)
model = Conv2D(128, (3, 3), padding='same')(model)
model = LeakyReLU(alpha=0.1)(model)
model = MaxPooling2D(pool_size=(2, 2),padding='same')(model)
model = Dropout(0.4)(model)
return model
my model call
# For model declaration
N_class = 20
main_input_shape = (150,150, 3)
output_dim = N_class
# opt = tf.keras.optimizers.RMSprop(lr=0.001)
opt = tf.keras.optimizers.Adam()
clf = build_multi2(main_input_shape, output_dim)
clf.compile(optimizer=opt, loss=['categorical_crossentropy'], metrics=['accuracy'])
clf.summary()
my image size: 96×96 pixel
my tensorflow. ImageDataGenerator
train_imgen = ImageDataGenerator(rescale = 1./255,
# shear_range = 0.2,
# zoom_range = 0.2,
# rotation_range=5.,
horizontal_flip = False)
'''
You have specified your input shape as (150, 150, 3) and your image shape is (96, 96, 3), these are incompatible.
You can either resize your images to (150, 150, 3) or change your input shape to be the same as your image shape.
Related
I made a Tensorflow pipeline for loading numpy arrays (video data shape (40,160,160,3)). However, it stops working after loading the first x batches.
The problem is solved when removing num_parallel_calls=AUTOTUNE. However, if I do this, the training becomes significantly slower (ETA/epoch ~30 min -> ETA/epoch ~ 4 hours) . Is there a way to load the numpy arrays in parallel (or apply num_parallel_calls=AUTOTUNE) without any problems?
def get_label(file_path):
import os
parts = tf.strings.split(file_path, os.path.sep)
return parts[-2]
def process_video(file_path):
label = get_label(file_path)
video = np.load(file_path, allow_pickle=True)
return np.float32(video/255), np.float32(label)
def set_shape(video, label):
video.set_shape((40,160,160, 3))
label.set_shape([])
return video, label
## Data pipeline
AUTOTUNE = tf.data.experimental.AUTOTUNE
train_ds = tf.data.Dataset.list_files("path/train/*/*",shuffle=True)
train_ds = train_ds.map(lambda item: tf.numpy_function(
process_video, [item], (tf.float32, tf.float32)) ,num_parallel_calls=AUTOTUNE)
train_ds = train_ds.map(set_shape)
train_ds = train_ds.batch(8)
train_ds = train_ds.prefetch(AUTOTUNE)
## Model
def create_LRCN_model():
model = Sequential()
model.add(TimeDistributed(Conv2D(64, (3, 3), padding='same',activation = 'relu'),
input_shape = (40, 160, 160, 3)))
model.add(TimeDistributed(MaxPooling2D((4, 4))))
model.add(TimeDistributed(Dropout(0.25)))
model.add(TimeDistributed(Conv2D(64, (3, 3), padding='same',activation = 'relu')))
model.add(TimeDistributed(MaxPooling2D((4, 4))))
model.add(TimeDistributed(Dropout(0.25)))
model.add(TimeDistributed(Conv2D(64, (3, 3), padding='same',activation = 'relu')))
model.add(TimeDistributed(MaxPooling2D((2, 2))))
model.add(TimeDistributed(Dropout(0.25)))
model.add(TimeDistributed(Conv2D(32, (3, 3), padding='same',activation = 'relu')))
model.add(TimeDistributed(MaxPooling2D((2, 2))))
#model.add(TimeDistributed(Dropout(0.25)))
model.add(TimeDistributed(Flatten()))
model.add(LSTM(32))
model.add(Dense(1, activation = 'sigmoid'))
model.summary()
return model
LRCN_model = create_LRCN_model()
early_stopping_callback = EarlyStopping(monitor = 'val_loss', patience = 15, mode = 'min', restore_best_weights = True)
LRCN_model.compile(loss='binary_crossentropy', optimizer = 'Adam', metrics = ["accuracy"])
LRCN_model_training_history = LRCN_model.fit(train_ds, validation_data= val_ds, epochs = 70, callbacks = [early_stopping_callback]) #class_weight= class_weights,
I have a data loader pipeline for video data. Although I specify the output of the pipeline, I still get the following error when calling model.fit. "ValueError: as_list() is not defined on an unknown TensorShape". I searched for the error and most people say it is because of the tf.numpy_function that returns an (to the Tensorflow pipeline) unknown shape. Specifying the shape after that function should solve the problem. However, it does not.
AUTOTUNE = tf.data.experimental.AUTOTUNE
#get list of numpy files in directory
train_ds = tf.data.Dataset.list_files("dir")
#load numpy files (video with shape 40,160,160,3), get corresponding label and output both
#video and label
def get_label(file_path):
label = tf.strings.split(file_path, os.path.sep)
return label [-2]
def process_image(file_path):
label = get_label(file_path)
video= np.load(file_path, allow_pickle=True)
video= tf.convert_to_tensor(video/255, dtype=tf.float32)
return video, np.float32(label)
train_ds = train_ds.map(lambda item: tf.numpy_function(
process_image, [item], (tf.float32, tf.float32)),num_parallel_calls = AUTOTUNE )
#Convert video to tf object
def set_shape(video, label):
video = tf.reshape(video, (40,160,160,3))
#video = tf.ensure_shape(video, (40,160,160,3)) #also does not work
#video = tf.convert_to_tensor(video, dtype=tf.float32) #also does not work
return video, label
train_ds = train_ds.map(set_shape)
#batching
train_ds = train_ds.batch(batch_size =5)
#optimazation
train_ds = train_ds.prefetch(AUTOTUNE)
train_ds.take(1)
Although the rest of the code seems fine (it does work when I manually input data), I will paste it in case it is not.
def create_LRCN_model():
'''
This function will construct the required LRCN model.
Returns:
model: It is the required constructed LRCN model.
'''
# We will use a Sequential model for model construction.
model = Sequential()
# Define the Model Architecture.
########################################################################################
model.add(TimeDistributed(Conv2D(128, (3, 3), padding='same',activation = 'relu'),
input_shape = (40, 160, 160, 3)))
model.add(TimeDistributed(MaxPooling2D((4, 4))))
model.add(TimeDistributed(Dropout(0.25)))
model.add(TimeDistributed(Conv2D(256, (3, 3), padding='same',activation = 'relu')))
model.add(TimeDistributed(MaxPooling2D((4, 4))))
model.add(TimeDistributed(Dropout(0.25)))
model.add(TimeDistributed(Conv2D(128, (3, 3), padding='same',activation = 'relu')))
model.add(TimeDistributed(MaxPooling2D((2, 2))))
model.add(TimeDistributed(Dropout(0.25)))
model.add(TimeDistributed(Conv2D(64, (3, 3), padding='same',activation = 'relu')))
model.add(TimeDistributed(MaxPooling2D((2, 2))))
#model.add(TimeDistributed(Dropout(0.25)))
model.add(TimeDistributed(Flatten()))
model.add(LSTM(32))
model.add(Dense(1, activation = 'sigmoid'))
########################################################################################
# Display the models summary.
model.summary()
# Return the constructed LRCN model.
return model
LRCN_model = create_LRCN_model()
early_stopping_callback = EarlyStopping(monitor = 'val_loss', patience = 15, mode = 'min', restore_best_weights = True)
LRCN_model.compile(loss='binary_crossentropy', optimizer = 'Adam', metrics = ["accuracy"])
LRCN_model_training_history = LRCN_model.fit(train_ds, validation_data= val_ds, epochs = 70, callbacks = [early_stopping_callback])
Okay I found another solution. I do not exactly know why it works, just calling the following function does the job.
def set_shape(video, label):
video.set_shape((40,160,160, 3))
label.set_shape([])
return video, label
Got it! You just need to change "accuracy" to "binary_accuracy" in model compile. It worked for me with your code and some dummy video and label input data.
here its my code trying to use KERAS TUNER:
datagen = ImageDataGenerator(
rescale=1.0/255.0,
zoom_range=[-2, 2],
width_shift_range=[-25, 25],
height_shift_range=[-25, 25],
rotation_range=40,
shear_range=40,
horizontal_flip=True,
vertical_flip=True,
brightness_range=[0.98,1.05],
featurewise_center=True,
samplewise_center=True,
# channel_shift_range=1.5,
#featurewise_center=True,
#featurewise_std_normalization=True,
validation_split=0.10)
mean,std=auxfunctions.getMeanStdClassification()
datagen.mean=mean
datagen.std=std
numClasses = 5
width=240 #diabetic retinopaty 120 120, drRafael 40 40, 96 96
height=240
input_shape=(width,height,3)
train_generator = datagen.flow_from_dataframe(
dataframe=trainLabels,
directory='./resized_train_cropped',
x_col="image",
y_col="level",
target_size=(240,240),
batch_size=16,
class_mode='categorical',
color_mode='rgb', #quitar o no quitar
subset='training')
validation_generator =datagen.flow_from_dataframe(
dataframe=trainLabels,
directory='./resized_train_cropped',
x_col="image",
y_col="level",
target_size=(240,240),
batch_size=16,
class_mode='categorical',
color_mode='rgb',
subset='validation')
#----------------------------------------------------------------------------------------
def createBaseNetwork(input_shape):
weight_decay = 1e-4
L2_norm = regularizers.l2(weight_decay)
input = Input(shape=input_shape)
print(input)
x = Conv2D(96, (9, 9), activation='relu', name='conv1', kernel_regularizer=L2_norm)(input)
x = MaxPooling2D((3, 3), name='pool1')(x)
x = BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001)(x)
x = Conv2D(384, (5, 5), activation='relu', name='conv2', kernel_regularizer=L2_norm)(x)
x = MaxPooling2D((3, 3), name='pool2')(x)
x = BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001)(x)
x = Conv2D(384, (3, 3), activation='relu', name='conv3')(x)
x = Conv2D(384, (3, 3), activation='relu', name='conv4')(x)
x = Conv2D(256, (3, 3), activation='relu', name='conv5')(x)
x = MaxPooling2D((3, 3), name='pool3')(x)
x = Flatten()(x)
x = Dense(4096, activation='relu', name='fc1')(x)
return Model(input, x)
# ---------------------------------------------------------------------------------
hp=HyperParameters()
baseNetwork=createBaseNetwork(input_shape)
#baseNetwork.load_weights('./ModelWeights2.h5',by_name=True)
for l in baseNetwork.layers:
l.trainable=True
input_a = Input(shape=input_shape,name='input1')
outLayers = baseNetwork(input_a)
outLayers = Dense(2048, activation='relu', name='fc3')(outLayers)
outLayers= Dropout(0.2)(outLayers)
outLayers = Dense(1024, activation='relu', name='fc4')(outLayers)
outLayers= Dropout(0.2)(outLayers)
outLayers = Dense(hp.Int('input_units',min_value=32,max_value=512), activation='relu', name='fc5')(outLayers)
classifier = Dense(numClasses, activation='softmax', name='predictions')(outLayers)
model = Model(input_a, classifier)
model.summary()
tuner = RandomSearch(
model,
objective='val_accuracy',
max_trials=1,
executions_per_trial=1,
directory='./logtunner'
)
tuner.search(
train_generator,
validation_data=validation_generator,
epochs=1,
)
For now im just trying to use it on the last Dense layer, as you can see i just want to stimate a good number of neurons with this:
hp.Int('input_units',min_value=32,max_value=512)
But i get an error like this:
ValueError: TypeError: object of type 'HyperParameters' has no len()
I dont know how to solve it, i spent hours watching videos and tutorials but no idea of what is happening.
I also realize that there is another error mesage:
This function does not handle the case of the path where all inputs are not already EagerTensors
But i dont have any idea about that too
I was having more or less of the same error.
If you pay attention to to the keras-tuner in the tensorflow website https://www.tensorflow.org/tutorials/keras/keras_tuner or in the keras website, you see the following:
tuner = kt.Hyperband(model_builder,
objective = 'val_accuracy',
max_epochs = 10,
factor = 3,
directory = 'my_dir',
project_name = 'intro_to_kt')
the first input to the tuner is the function model_builder that is declared earlier as
def model_builder(hp):
model = keras.Sequential()
model.add(keras.layers.Flatten(input_shape=(28, 28)))
# Tune the number of units in the first Dense layer
# Choose an optimal value between 32-512
hp_units = hp.Int('units', min_value = 32, max_value = 512, step = 32)
model.add(keras.layers.Dense(units = hp_units, activation = 'relu'))
model.add(keras.layers.Dense(10))
# Tune the learning rate for the optimizer
# Choose an optimal value from 0.01, 0.001, or 0.0001
hp_learning_rate = hp.Choice('learning_rate', values = [1e-2, 1e-3, 1e-4])
model.compile(optimizer = keras.optimizers.Adam(learning_rate = hp_learning_rate),
loss = keras.losses.SparseCategoricalCrossentropy(from_logits = True),
metrics = ['accuracy'])
return model
So, all you need is reorganize your code to follow the same structure. You need to encapsulate the keras model and keras-tuner hp inside a function.
Cheers.
I'm trying to build a model that looks like this:
input
/
convlayers/flatten
/ \
first_output \
\ /
second_output
but it fails at the first conv layers with the error:
ValueError: Layer conv2d_4 was called with an input that isn't a symbolic tensor.
Received type: <class 'keras.layers.convolutional.Conv2D'>.
Full input: [<keras.layers.convolutional.Conv2D object at 0x7f450d7b8630>].
All inputs to the layer should be tensors.
and the error points to the layer after the first conv with the inputshape call.
Help would be appreciated.
Here is the code:
conv1 = Conv2D(8, 4, padding = "same", strides = 2)(inputs)
conv2 = Conv2D(16 ,4, padding = "same", strides = 2)(conv1)
flat = Flatten()(conv2)
dense1 = Dense(32)(flat)
dense2 = Dense(32)(dense1)
first_output = Dense(64)(dense2)
merged = concatenate([flat,first_output])
second_output_dense1 = Dense(32)(merged)
second_output_dense2 = Dense(32)(second_output_dense1)
second_output = Dense(64)(second_output_dense2)
model = Model(inputs=conv1, outputs=[first_output,second_output])
model.compile(loss = "mse", optimizer = "adam" )
Answer:
i was under the impression that you could call the model without an input layer and just define the input in the first layer : conv1 = Conv2D(8, 4, padding = "same", strides = 2, input_shape = (6,8,8,))
but that didnt work so instead you have to delete the input shape thing and create an input layer here is the fixed code
inputs = Input(shape=(6,8,8,))
conv1 = Conv2D(8, 4, padding = "same", strides = 2, input_shape = (6,8,8,))
conv2 = Conv2D(16 ,4, padding = "same", strides = 2)(conv1)
flat = Flatten()(conv2)
dense1 = Dense(32)(flat)
dense2 = Dense(32)(dense1)
first_output = Dense(64)(dense2)
merged = concatenate([flat,first_output])
second_output_dense1 = Dense(32)(merged)
second_output_dense2 = Dense(32)(second_output_dense1)
second_output = Dense(64)(second_output_dense2)
model = Model(inputs=inputs, outputs=[first_output,second_output])
model.compile(loss = "mse", optimizer = "adam" )
I am designing a deep learning model to classify images and I am using the following code to check the predication performance. It compares the images' labels with the predicted classes and then returns the error predication.
I was using sequential model, and this code was working fine. But, now it does not work fine.. predict_generator changes the order of the images. (predict_classes and validation_generator are not in the same order)
I want to make the prediction in the same order of the images in validation_generator, Do you know how can I do that??
validation_generator = validation_datagen.flow_from_directory(
validation_dir,
target_size=(image_size, image_size),
batch_size=val_batchsize,
class_mode='categorical',
shuffle=False)
# Get the filenames from the generator
fnames = validation_generator.filenames
# Get the ground truth from generator
ground_truth = validation_generator.classes
# Get the label to class mapping from the generator
label2index = validation_generator.class_indices
# Getting the mapping from class index to class label
idx2label = dict((v,k) for k,v in label2index.items())
# Get the predictions from the model using the generator
predictions = model.predict_generator(validation_generator, steps=validation_generator.samples/validation_generator.batch_size,verbose=1)
predicted_classes = np.argmax(predictions,axis=1)
errors = np.where(predicted_classes != ground_truth)[0]
print("No of errors = {}/{}".format(len(errors),validation_generator.samples))
# Show the errors
for i in range(len(errors)):
pred_class = np.argmax(predictions[errors[i]])
pred_label = idx2label[pred_class]
title = 'Original label:{}, Prediction :{}, confidence : {:.3f}'.format(
fnames[errors[i]].split('/')[0],
pred_label,
predictions[errors[i]][pred_class])
original = load_img('{}/{}'.format(validation_dir,fnames[errors[i]]))
plt.figure(figsize=[7,7])
plt.axis('off')
plt.title(title)
plt.imshow(original)
plt.show()
The model code
from keras.engine import Model
vgg16_model = keras.applications.vgg16.VGG16()
input_layer2 = vgg16_model.input
vgg16_model.get_layer(index = 0).name = 'input2'
last_layer2 = vgg16_model.get_layer('block4_pool').output
x = Conv2D(512, (3, 3), activation='relu', name='block5_conv1', padding='same')(last_layer2)
x = BatchNormalization(name='bn1')(x)
x = Conv2D(512, (3, 3), activation='relu', name='block5_conv2', padding='same')(x)
x = BatchNormalization(name='bn2')(x)
x = Conv2D(512, (3, 3), activation='relu', name='block5_conv3', padding='same')(x)
x = BatchNormalization(name='bn3')(x)
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name = 'block5_pool')(x)
x = Flatten(name = 'flatten')(x)
x = Dense(4096, activation='relu', name='fc1')(x)
x = Dense(4096, activation='relu', name='fc2')(x)
out2 = Dropout(0.3, name='dropout1')(x)
#Classification layer
output_layer2 = Dense(class_no, activation='softmax', name='prediction')(out2)
vgg16_face_model = Model(input_layer2, output_layer2)
vgg16_face_model.trainable = True
set_trainable = False
for layer in vgg16_face_model.layers:
if layer.name == 'block5_conv1':
set_trainable = True
if set_trainable:
layer.trainable = True
else:
layer.trainable = False
Please Help me..