Can't plot learning curve of keras model - python

It's my NN in keras. The model was compiled and trained. When I try to plot the learning curve of history, only empty window appears.
model = Sequential()
model.add(Dense(64, input_dim=30,
activity_regularizer=regularizers.l2(0.01)))
model.add(BatchNormalization())
model.add(LeakyReLU())
model.add(Dropout(0.5))
model.add(Dense(16,
activity_regularizer=regularizers.l2(0.01)))
model.add(BatchNormalization())
model.add(LeakyReLU())
model.add(Dense(2))
model.add(Activation('softmax'))
opt = Nadam(lr=0.001)
reduce_lr = ReduceLROnPlateau(monitor='val_acc', factor=0.9, patience=25, min_lr=0.000001, verbose=1)
checkpointer = ModelCheckpoint(filepath="test.hdf5", verbose=1, save_best_only=False)
model.compile(optimizer=opt,
loss='categorical_crossentropy',
metrics=['accuracy'])
history = model.fit(X_train, Y_train,
nb_epoch = 1,
batch_size = 128,
verbose=1,
validation_data=(X_test, Y_test),
callbacks=[reduce_lr, checkpointer],
shuffle=True)
plt.plot(history.history['acc'])
When I print history.history['acc'], it's just one number. Not a list.
I'd be happy, if you can help

Try increasing the number of epochs

Related

Keras EarlyCallBack stopping after one epoch

My dataset( Network traffic dataset where we do binary classification)-
Number of features is 25 and I have normalized the dataset.
My model-
verbose=1
epoch_number=1000
batch_size = 32
n_outputs = 1
model = Sequential()
model.add(Conv1D(filters=200, kernel_size=4, strides=3,activation='relu', input_shape=(25,1)))
model.add(Dropout(0.05))
model.add(BatchNormalization())
model.add(Conv1D(filters=200, kernel_size=5, strides=1,activation='relu', input_shape=(25,1)))
model.add(Dropout(0.05))
model.add(BatchNormalization())
model.add(MaxPooling1D(pool_size=2))
model.add(Dropout(0.05))
model.add(Flatten())
model.add(Dense(200, activation='relu'))
model.add(Dropout(0.05))
model.add(Dense(100, activation='relu'))
model.add(Dropout(0.05))
model.add(Dense(50, activation='relu'))
model.add(Dropout(0.05))
model.add(Dense(n_outputs, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam',metrics=['acc',f1_m,precision_m, recall_m])
es = EarlyStopping(monitor='val_loss', mode='min', verbose=1)
# fit network
model.fit(X_train, y_train,validation_data=(X_test, y_test),epochs=epoch_number, batch_size=batch_size, verbose=1,callbacks=[es])
# evaluate model
loss, accuracy, f1_score, precision, recall = model.evaluate(X_test, y_test, batch_size=batch_size, verbose=0)
print(loss,accuracy,f1_score,precision,recall)
My model is stopping after one epoch when I add Keras Earlycall back even though loss is decreasing after every epoch when I remove it.
If you had printed your logs of training of dataset without using early stopping then It would have been easier to diagnose.
Now Let's look at the possibilities. You have set EarlyStopping as mentioned below.
es = EarlyStopping(monitor='val_loss', mode='min', verbose=1)
Then that means your early stopping layer is like mentioned below which has default parameters.
tf.keras.callbacks.EarlyStopping(
monitor="val_loss",
min_delta=0,
patience=0,
verbose=1,
mode="min",
baseline=None,
restore_best_weights=False,
)
Now here your patience=0 , mode='min', 'min_delta= 0' and monitor_loss = 'val_loss'
This simply means that if your validation loss is not decreasing in the next layer then it will stop.
Or if your Validation loss is same or greater than the previous epoch then it will stop.
I would recommend you to change your patience parameter

how can i solve abnormal predict?

The difference between the last value and the next predicted value is very large.
here is my model
model = Sequential()
model.add(LSTM(256, input_shape=(None, 1),return_sequences=True))
model.add(Dropout(0.3))
model.add(LSTM(256, input_shape=(None, 1)))
model.add(Dropout(0.3))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam', metrics=['accuracy'])
model.summary()
model.fit(x_train, y_train, epochs=10, batch_size=32, verbose=2, validation_data=(x_val, y_val))
and I want to know next time series So, I did
y_test[-1] and predicted lastY gap is too large
am I wrong? or something error code?
I do not know why this happened.
Instead of predicting on x, you're predicting on y.
Shouldn't last_X = x_test[-1] instead of last_X = y_test[-1] ?

Hyperparapeters optimization with grid_search in keras and flow_from_directory

I tried to optimize hyperparameters in my keras CNN made for image classification. I decided to use grid search from sklearn. I overcame the fundamental difficulty with making x and y out of keras flow_from_directory but it still doesn't work.
Error in the last line
ValueError: dropout is not a legal parameter
def grid_model(optimizer='adam',
kernel_initializer='random_uniform',
dropout=0.2,
loss='categorical_crossentropy'):
model = Sequential()
model.add(Conv2D(6,(5,5),activation="relu",padding="same",
input_shape=(img_width, img_height, 3)))
model.add(MaxPooling2D((2,2)))
model.add(Dropout(dropout))
model.add(Conv2D(16,(5,5),activation="relu"))
model.add(MaxPooling2D((2,2)))
model.add(Dropout(dropout))
model.add(Flatten())
model.add(Dense(120, activation='relu', kernel_initializer=kernel_initializer))
model.add(Dropout(dropout))
model.add(Dense(84, activation='relu', kernel_initializer=kernel_initializer))
model.add(Dropout(dropout))
model.add(Dense(10, activation='softmax'))
model.compile(loss=loss,
optimizer=optimizer,
metrics=['accuracy'])
return model
train_generator = ImageDataGenerator(rescale=1/255)
validation_generator = ImageDataGenerator(rescale=1/255)
# Retrieve images and their classes for train and validation sets
train_flow = train_generator.flow_from_directory(directory=train_data_dir,
batch_size=batch_size,
target_size=(img_height,img_width))
validation_flow = validation_generator.flow_from_directory(directory=validation_data_dir,
batch_size=batch_size,
target_size=(img_height,img_width),
shuffle = False)
clf = KerasClassifier(build_fn=grid_model(), epochs=epochs, verbose=0)
param_grid = {
'clf__optimizer':['adam', 'Nadam'],
'clf__epochs':[100, 200],
'clf__dropout':[0.1, 0.2, 0.5],
'clf__kernel_initializer':['normal','uniform'],
'clf__loss':['categorical_crossentropy',
'sparse_categorical_crossentropy',
'kullback_leibler_divergence']
}
pipeline = Pipeline([('clf',clf)])
(X_train, Y_train) = train_flow.next()
grid = GridSearchCV(pipeline, cv=2, param_grid=param_grid)
grid.fit(X_train, Y_train)
The problem is in this line:
clf = KerasClassifier(build_fn=grid_model(), epochs=epochs, verbose=0)
change it to
clf = KerasClassifier(build_fn=grid_model, epochs=epochs, verbose=0)
The grid_model method should not be invoked but a reference to it should be passed.
Also, in the list of losses, 'sparse_categorical_crossentropy'(integer) cannot be used because the output shape required of the model is incompatible with that of 'categorical_crossentropy'(one-hot).

When printing the model summary for a CNN model, the input layer is a garbage integer. Also, the trained model has 96% error

I am trying to create a model that recognizes static gestures using CNN. I have 26 gestures and 2400 images for all gestures. However, the model has a missing input layer and has a 96% error.
I am pretty new so have no idea about most of the things. I have tried changing some things to no help.
///this is my model
def cnn_model():
num_of_classes = get_num_of_classes()
model = Sequential()
model.add(Conv2D(16, (2,2), input_shape=(image_x, image_y, 1), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding='same'))
model.add(Conv2D(32, (5,5), activation='relu'))
model.add(MaxPooling2D(pool_size=(5, 5), strides=(5, 5), padding='same'))
model.add(Conv2D(64, (5,5), activation='relu'))
#model.add(MaxPooling2D(pool_size=(5, 5), strides=(5, 5), padding='same'))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(num_of_classes, activation='softmax'))
sgd = optimizers.SGD(lr=1e-2)
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
filepath="cnn_model_keras2.h5"
checkpoint1 = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
#checkpoint2 = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min')
callbacks_list = [checkpoint1]
from keras.utils import plot_model
plot_model(model, to_file='model.png', show_shapes=True)
return model, callbacks_list
/// training and testing
model, callbacks_list = cnn_model()
model.summary()
model.fit(train_images, train_labels, validation_data=(test_images, test_labels), epochs=20, batch_size=500, callbacks=callbacks_list)
scores = model.evaluate(test_images, test_labels, verbose=0)
I expect at most 20% loss but the loss is 96%. Also,
expected model summary:
https://imgur.com/66Yggvh
what I got:
https://imgur.com/AzpgvFN
This is my first question on any forum so please bear with me.
I think you might track this strange bug here : keras plot_model inserts strange first input row containing long integer number #11376
It looks like an open issue in Keras. Maybe check your version.

How to save pipelined estimator in Keras?

I am using Scikit Learn in Python where I pipelined KerasClassifier with StandardScaler().
The code is:
def create_baseline():
model = Sequential()
model.add(Dense(11, input_dim=11, kernel_initializer='normal', activation='relu'))
model.add(Dense(7, kernel_initializer='normal', activation='relu'))
model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
classifier = KerasClassifier(build_fn=create_baseline, nb_epoch=150, batch_size=5)
kfold = StratifiedKFold(n_splits=2, shuffle=True, random_state=seed)
estimators = []
estimators.append(('standardize', StandardScaler()))
estimators.append(('mlp', classifier))
pipeline = Pipeline(estimators)
results = cross_val_score(pipeline, X, Y, cv=kfold, verbose=1, fit_params={'mlp__callbacks':[tbCallBack]})
print("Result: %.2f%% (%.2f%%)" % (results.mean()*100, results.std()*100))
How can I save the cross validation? Taking into consideration that I did not fit the classifier before, I need to save the result and then load it to make predictions.

Categories