Keras ImageDataGenerator validation_data - python

Cannot really figure the error here.
Key requisites:
augmented_images and val_data_gen are keras.preprocessing.image.ImageDataGenerator functions.
model = Sequential()
model.add(Conv2D(filters=64, kernel_size=(3,3), strides=(1,1), activation='relu', input_shape=(32,32,3)))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Conv2D(filters=64, kernel_size=(3,3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Conv2D(filters=64, kernel_size=(3,3), strides=(1,1), activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
#model.add(Dropout(rate=0.5))
model.add(Flatten())
model.add(Dense(units=64, activation='relu'))
model.add(Dense(2, activation='sigmoid'))
model.compile(optimizer=tf.keras.optimizers.SGD(learning_rate=0.05),
loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
metrics=['accuracy'])
model.summary()
history = model.fit(x=augmented_images,
validation_data=val_data_gen,
epochs=10,
steps_per_epoch=2000,
validation_steps=1000)
ValueError: Layer sequential_10 expects 1 inputs, but it received 5 input tensors.

Related

My trained model return the same predict for all input images

I'm very new to this. I just trained a model following instruction from kaggle and another website. The trained model works fine with train images but whenever i try to input a new image to predict, the result is always the same.
The accuracy is 0.9975000023841858
`
X_train = np.array(X_train)
X_test = np.array(X_test)
X_train = np.expand_dims(X_train, axis=3)
X_test = np.expand_dims(X_test, axis=3)
Here is the model
weight_decay = 1e-4
num_classes = 12
model = Sequential()
model.add(Conv2D(64, (4,4), padding='same', kernel_regularizer=regularizers.l2(weight_decay), input_shape=(128,128,1)))
model.add(Activation('elu'))
model.add(BatchNormalization())
model.add(Conv2D(64, (4,4), padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('elu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.2))
model.add(Conv2D(128, (4,4), padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('elu'))
model.add(BatchNormalization())
model.add(Conv2D(128, (4,4), padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('elu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.3))
model.add(Conv2D(128, (4,4), padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('elu'))
model.add(BatchNormalization())
model.add(Conv2D(128, (4,4), padding='same', kernel_regularizer=regularizers.l2(weight_decay)))
model.add(Activation('elu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.4))
model.add(Flatten())
model.add(Dense(128, activation="linear"))
model.add(Activation('elu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(X_train, y_train,
epochs=5,
validation_data=(X_test,y_test),
verbose = 1,
initial_epoch=0)
`
When i tried to test the train image
`
predicted_classes = model.predict(X_train[:,:,:,:])
predicted_classes = np.argmax(np.round(predicted_classes), axis=1)
k = X_train.shape[0] #18000
r = np.random.randint(k) #random in 18000
print("Prediction:", predicted_classes[4])
Prediction: 8`
My input image
`
imgTest = tf.keras.utils.load_img('/content/drive/My Drive/Colab Notebooks/finger.jpg')
imgTest = np.array(imgTest)
imgTest = cv2.cvtColor(imgTest, cv2.COLOR_BGR2GRAY)
imgTest = cv2.resize(imgTest, (128, 128))
imgTest = np.expand_dims(imgTest, axis=0)
# imgTest = imgTest.reshape((1, 128, 128, 1, 1, 1))
# imgTest = imgTest/255
print(imgTest.shape)
predicted_classes_test = model.predict(imgTest)
predicted_classes_test = np.argmax(np.round(predicted_classes_test), axis=1)
print("Prediction:", predicted_classes_test)
Prediction: [7]`
I tried changing the shape of the input image so that it's the same as the train images but doesn't work
I looked for people having the same mistake to see if i can find any solutions but nothing works
Can somebody tell me what's wrong with the model? Why is it return the same value for all the images that i input?

Get ValueError when apply transfer learning in federated learning (TFF)

I want to use pre_trained model in federated learning as following code:
first I build my model and set the weights on model and then I freeze convolutional layers and remove 4 last layer.
def create_keras_model():
model = Sequential()
model.add(Conv2D(16, kernel_size=(3,3), strides=(1,1), padding='same', activation='relu', input_shape=(226,232,1)))
model.add(MaxPooling2D((2,2), strides=(2,2), padding='same'))
model.add(Conv2D(32, kernel_size=(3,3), strides=(1,1), padding='same', activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2), padding='same'))
model.add(Conv2D(64, kernel_size=(3,3), strides=(1,1), padding='same', activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2), padding='same'))
model.add(Conv2D(64, kernel_size=(3,3), strides=(1,1), padding='same', activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2), padding='same'))
model.add(Conv2D(128, kernel_size=(3,3), strides=(1,1), padding='same', activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2), padding='same'))
model.add(Conv2D(128, kernel_size=(3,3), strides=(1,1), padding='same', activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2), padding='same'))
model.add(Conv2D(256, kernel_size=(3,3), strides=(1,1), padding='same', activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2), padding='same'))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dense(128, activation='relu'))
model.add(Dense(64, activation='relu'))
model.add(Dense(10, activation='softmax'))
return model
keras_model = create_keras_model()
server_state=FileCheckpointManager(root_dir= '/content/drive/MyDrive',
prefix= 'federated_clustering',
step= 1,
keep_total= 1,
keep_first= True).load_checkpoint(structure=server_state,round_num=10)
keras_model.set_weights(server_state)
for layer in keras_model.layers[:-4]:
layer.trainable = False
model_pre = Model(inputs=keras_model.input,outputs=keras_model.layers[14].output)
next, I build new model.
def create_keras_model1():
model = Sequential()
model.add(model_pre)
model.add(Dense(256, activation='relu'))
model.add(Dense(128, activation='relu'))
model.add(Dense(64, activation='relu'))
model.add(Dense(2, activation='softmax'))
return model
def model_fn():
# We _must_ create a new model here, and _not_ capture it from an external
# scope. TFF will call this within different graph contexts.
keras_model = create_keras_model1()
return tff.learning.from_keras_model(
keras_model,
input_spec=preprocessed_example_dataset.element_spec,
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
but I get ValueError when I want to use tff.learning.build_federated_averaging_process.
iterative_process = tff.learning.build_federated_averaging_process(
model_fn,
client_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=0.001),
server_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=1.0))
ValueError: Your Layer or Model is in an invalid state. This can happen for the following cases:
1. You might be interleaving estimator/non-estimator models or interleaving models/layers made in tf.compat.v1.Graph.as_default() with models/layers created outside of it. Converting a model to an estimator (via model_to_estimator) invalidates all models/layers made before the conversion (even if they were not the model converted to an estimator). Similarly, making a layer or a model inside a a tf.compat.v1.Graph invalidates all layers/models you previously made outside of the graph.
2. You might be using a custom keras layer implementation with custom __init__ which didn't call super().__init__. Please check the implementation of <class 'keras.engine.functional.Functional'> and its bases.
please help me to fix it.
You need to create a new keras model during the invocation of model_fn, as the code comment suggests. It seems you are using model_pre which you have already created before that, which is likely the core problem.

CNN model, How does dense layer in CNN divide into two streams using keras

I am working on the CNN model. In the given CNN model I can not handle how to divide the 4th layer into two streams and get output.
I also built a model in Keras.
def _build_model(self):
model = Sequential()
model.add(Conv2D(8, (3, 3), strides=4, padding='same', input_shape=self.state_size))
model.add(Activation('relu'))
model.add(Conv2D(2, (2, 2), strides=4, padding='same'))
model.add(Activation('relu'))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dense(32, activation='relu'))
model.add(Dense(16, activation='relu'))
model.add(Dense(self.action_size, activation='relu'))
model.compile(loss='mse', optimizer=Adam())
return model
How to handle it? An example would be appreciated.

Super high accuracy with high loss?

I am training a 1D CNN on a bunch of images with multi-label classification. I have an accuracy of almost 100% but loss of ~5 at the same time. What's going on?, any help is appreciated.
model = Sequential()
model.add(Conv1D(filters=128, kernel_size=3, activation='relu', input_shape=(137,236)))
model.add(Dropout(0.2))
model.add(Conv1D(filters=64, kernel_size=3, activation='relu'))
model.add(Dropout(0.2))
model.add(Conv1D(filters=64, kernel_size=3, activation='relu'))
model.add(Dropout(0.2))
model.add(Conv1D(filters=64, kernel_size=3, activation='relu'))
model.add(Dropout(0.2))
model.add(GlobalMaxPooling1D())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.1))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.1))
model.add(Dense(3, activation='softmax'))
model.compile(optimizer='adam', loss='mean_squared_logarithmic_error', metrics=['accuracy'])
Training:

CNN ValueError when I try to fit my model

Here is my code :
from keras import optimizers from keras.layers import Convolution1D, Dense, MaxPooling1D, Flatten
model = Sequential()
# my CNN layers
model.add(Conv1D(101, 101, strides=1, padding='same', dilation_rate=1, input_shape=(None, 120)))
model.add(Activation('relu')) model.add(MaxPooling1D(pool_size=2, padding='same', strides=None))
model.add(Dense(2048)) model.add(Activation('relu'))
model.add(Dense(100)) model.add(Activation('sigmoid'))
model.compile(optimizer=optimizers.Adam(lr=1e-4), loss='binary_crossentropy', metrics=['accuracy'])
model.fit(training_trainX_train, training_trainY_train, epochs=2, batch_size=100, verbose=1)
But I get this error: ValueError: Error when checking model input: expected conv1d_8_input to have 3 dimensions, but got array with shape (27660, 120)
Here is my training-set's shape :
training_trainX_train.shape = (27660, 120)
training_trainY_train.shape = (27660, 101)
Adding model.add(Flatten()) will resolve this
model.add(Conv1D(101, 101, strides=1, padding='same', dilation_rate=1, input_shape=(None, 120)))
model.add(Activation('relu')) model.add(MaxPooling1D(pool_size=2, padding='same', strides=None))
model.add(Flatten())
model.add(Dense(2048)) model.add(Activation('relu'))
model.add(Dense(100)) model.add(Activation('sigmoid'))
For more detail check https://github.com/keras-team/keras/issues/6351

Categories