I'm trying to improve my val accuracy as it is very low. I have tried changing the batch_size, the number of images being used for validation and training. Added in extra dense levels but none of them have worked. The dataset I'm using has not been split up yet into Training and Validation which is what I have done using partitioning. I have given the values for the samples as you can see below and have tried to increase the VALIDATION_SAMPLES but when I do, my cluster keeps crashing.
TRAINING_SAMPLES = 10000
VALIDATION_SAMPLES = 2000
TEST_SAMPLES = 2000
IMG_WIDTH = 178
IMG_HEIGHT = 218
BATCH_SIZE = 32
NUM_EPOCHS = 20
def generate_df(partition, attr, num_samples):
df_ = df_par_attr[(df_par_attr['partition'] == partition)
& (df_par_attr[attr] == 0)].sample(int(num_samples/2))
df_ = pd.concat([df_,
df_par_attr[(df_par_attr['partition'] == partition)
& (df_par_attr[attr] == 1)].sample(int(num_samples/2))])
# for Training and Validation
if partition != 2:
x_ = np.array([load_reshape_img(images_folder + fname) for fname in df_.index])
x_ = x_.reshape(x_.shape[0], 218, 178, 3)
y_ = np_utils.to_categorical(df_[attr],2)
# for Test
else:
x_ = []
y_ = []
for index, target in df_.iterrows():
im = cv2.imread(images_folder + index)
im = cv2.resize(cv2.cvtColor(im, cv2.COLOR_BGR2RGB), (IMG_WIDTH, IMG_HEIGHT)).astype(np.float32) / 255.0
im = np.expand_dims(im, axis =0)
x_.append(im)
y_.append(target[attr])
return x_, y_
My training model is build after the partitioning which you can see below
# Train data
x_train, y_train = generate_df(0, 'Male', TRAINING_SAMPLES)
# Train - Data Preparation - Data Augmentation with generators
train_datagen = ImageDataGenerator(
preprocessing_function=preprocess_input,
rotation_range=30,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
)
train_datagen.fit(x_train)
train_generator = train_datagen.flow(
x_train, y_train,
batch_size=BATCH_SIZE,
)
The same also goes for the validation
# Validation Data
x_valid, y_valid = generate_df(1, 'Male', VALIDATION_SAMPLES)
# Validation - Data Preparation - Data Augmentation with generators
valid_datagen = ImageDataGenerator(
preprocessing_function=preprocess_input,
)
valid_datagen.fit(x_valid)
validation_generator = valid_datagen.flow(
x_valid, y_valid,
)
I tried playing around with the layers but got told that it wouldn't really affect your val_accuracy
x = inc_model.output
x = GlobalAveragePooling2D()(x)
x = Dense(1024, activation="relu")(x)
x = Dropout(0.5)(x)
x = Dense(256, activation="relu")(x)
predictions = Dense(2, activation="softmax")(x)
I tried using the 'adam' optimizer but it made no difference when compared to sgd
model_.compile(optimizer=SGD(lr=0.0001, momentum=0.9)
, loss='categorical_crossentropy'
, metrics=['accuracy'])
hist = model_.fit_generator(train_generator
, validation_data = (x_valid, y_valid)
, steps_per_epoch= TRAINING_SAMPLES/BATCH_SIZE
, epochs= NUM_EPOCHS
, callbacks=[checkpointer]
, verbose=1
)
Who ever told you modifying the model won't effect validation accuracy in most cases is dead wrong. The problem you have in your model is it is not deep enough to extract the features of the images. Below is the code I have used on hundreds of models and has proved to be very accurate with respect to achieving low training and validation loss and avoid over fitting
from tensorflow import keras
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Dense, Activation,Dropout,Conv2D, MaxPooling2D,BatchNormalization, Flatten
from tensorflow.keras.optimizers import Adam, Adamax
from tensorflow.keras.metrics import categorical_crossentropy
from tensorflow.keras import regularizers
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import Model, load_model
def make_model(img_img_size, class_count,lr=.001, trainable=True):
img_shape=(img_size[0], img_size[1], 3)
model_name='EfficientNetB3'
base_model=tf.keras.applications.efficientnet.EfficientNetB3(include_top=False, weights="imagenet",input_shape=img_shape, pooling='max')
base_model.trainable=trainable
x=base_model.output
x=keras.layers.BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001 )(x)
x = Dense(256, kernel_regularizer = regularizers.l2(l = 0.016),activity_regularizer=regularizers.l1(0.006),
bias_regularizer=regularizers.l1(0.006) ,activation='relu')(x)
x=Dropout(rate=.45, seed=123)(x)
output=Dense(class_count, activation='softmax')(x)
model=Model(inputs=base_model.input, outputs=output)
model.compile(Adamax(learning_rate=lr), loss='categorical_crossentropy', metrics=['accuracy'])
return model, base_model # return the base_model so the callback can control its training state
TRAINING_SAMPLES = 10000
VALIDATION_SAMPLES = 2000
TEST_SAMPLES = 2000
IMG_WIDTH = 178
IMG_HEIGHT = 218
BATCH_SIZE = 32
NUM_EPOCHS = 20
img_size=(IMG_HEIGHT,IMG_WIDTH)
class_count=2
model, base_model=make_model(img_size, class_count, lr=.001, trainable=True)
I also recommend that you use two keras callbacks. One is to control the learning rate. Documentation for that is here. The other controls early stopping and saves the model with the lowest validation loss. Documentation for that is here.
My recommended code for these callbacks is shown below
rlronp=tf.keras.callbacks.ReduceLROnPlateau(monitor="val_loss", factor=0.5, patience=2,verbose=1)
estop=tf.keras.callbacks.EarlyStopping(monitor="val_loss", patience=4, verbose=1,restore_best_weights=True)
callbacks=[rlronp, estop]
put the above code prior to using model.fit. In model.fit set the parameter
callbacks=callbacks
Related
I am using VGG19 pre-trained model with ImageNet weights to do transfer-learning on 4 classes with keras. However I do not know if there really is a difference between these 4 classes, I'd like to discover it. The goal would be to discover if these classes make sense or if there is no difference between these images classes.
These classes are made up of abstract paintings from the same individual.
I tried different models with different hyperparameters (Adam/SGD, learning rate, dropout, l2 regularization, FC layers size, batch size, unfreeze, and also weighted classes as the data is a little bit unbalanced
batch_size = 32
unfreeze = 17
dropout = 0.2
fc = 256
lr = 1e-4
l2_reg = 0.1
train_datagen = ImageDataGenerator(
preprocessing_function = preprocess_input,
horizontal_flip=True,
vertical_flip=True,
fill_mode='nearest'
)
test_datagen = ImageDataGenerator(preprocessing_function = preprocess_input)
train_generator = train_datagen.flow_from_directory(
'C:/Users/train',
target_size=(224, 224),
batch_size=batch_size,
class_mode='categorical')
validation_generator = test_datagen.flow_from_directory(
'C:/Users/test',
target_size=(224, 224),
batch_size=batch_size,
class_mode='categorical')
base_model = VGG19(
weights="imagenet",
input_shape=(224, 224, 3),
include_top=False,
)
last_layer = base_model.get_layer('block5_pool')
last_output = last_layer.output
x = Flatten()(last_output)
x = GlobalMaxPooling2D()(last_output)
x = Dense(fc)(x)
x = Activation('relu')(x)
x = BatchNormalization()(x)
x = Dropout(dropout)(x)
x = Dense(fc, activation='relu', kernel_regularizer = regularizers.l2(l2=l2_reg))(x)
x = layers.Dense(4, activation='softmax')(x)
model = Model(base_model.input, x)
for layer in model.layers:
layer.trainable = False
for layer in model.layers[unfreeze:]:
layer.trainable = True
model.compile(loss='categorical_crossentropy',
optimizer=optimizers.SGD(learning_rate = lr),
metrics=['accuracy'])
class_weights = class_weight.compute_class_weight('balanced',
np.unique(train_generator.classes),
train_generator.classes)
class_weights_dict = dict(enumerate(class_weights))
history = model.fit(train_generator, epochs=epochs, validation_data=validation_generator,
validation_steps=392//batch_size,
steps_per_epoch=907//batch_size)
plot_model_history(history)
I also did feature extractions at every layer, and fed the extracted features to a SVM (for each layer), and the accuracy of these SVM was about 40%, which is higher than this model (30 to 33%). So, I may be wrong but I think this model could achieve a higher accuracy.
I have a few questions about my model.
First, is my code correct, or am I doing something wrong ?
If the validation set accuracy for a 4-classes classification task is ~30% (assuming the data are balanced or weighted), is it likely or very not likely to be able to improve it to something significantly better with other hyperparameters ?
What else can I try to have a better accuracy ?
When and how can I conclude that these classes do not make sense ?
Hi to everyone!!
I have a problem with my model.
I am training a CNN with transfer learning using the MobileNet base model.
My dataset is made up of 3 classes "paper, scissors, rock" (8751 images, and all class are perfectly balanced) and I use it to create a hand gesture recognition model for the "paper, scissors, rock" game.
In the training phase with keras I get excellent results both with the training set and with the test set (accuracy, precision, AUC all more or less on 0.98%):
This is the last epochs.
When I go to use the validation set, these metrics have a very low result:
I think this could be due to overfitting and that I should do some tuning on my model, in fact through augmentation I increase the number of images in my dataset and then I try to modify the base model of the MobileNet by adding layers.
But things are not getting better ... Can you help me? I'm going crazy.
This is my model training code:
import matplotlib.pyplot as plt
import tensorflow
from tensorflow.keras.layers import Dense, Flatten, GlobalAveragePooling2D, Dropout
from tensorflow.keras.models import Model
from tensorflow.keras import Sequential
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import keras
from keras.metrics import Precision, Recall
from collections import Counter
IMAGE_SIZE = (224, 224)
IMG_SHAPE = IMAGE_SIZE + (3,)
TRAIN_DATASET_DIR = "/content/PAPER_SCISSOR_ROCK/TRAIN"
TEST_DATASET_DIR = "/content/PAPER_SCISSOR_ROCK/TEST"
NUM_CLASSES = 3
BATCH_SIZE = 16
EPOCHS = 40
FC_LAYERS = [512, 512, 256, 256]
DROPOUT = 0.4
LEARNING_RATE = 0.0001
train_datagen = ImageDataGenerator(
vertical_flip=True,
validation_split=0.20,
rescale=1. / 255,
fill_mode = 'wrap',
rotation_range = 45,
brightness_range=[0.2,1.0]
#brightness_range=[1, 2],
#preprocessing_function = keras.applications.mobilenet.preprocess_input
)
# ONLY FOR TEST, SPLITT IN VALIDATION AND TEST IMAGES (TO CALCULATE PRECSION AND CONFUSION MATRIX AFTER)
test_datagen = ImageDataGenerator(
rescale=1./255,
validation_split=0.3
)
train_generator = train_datagen.flow_from_directory(
TRAIN_DATASET_DIR,
target_size=IMAGE_SIZE,
batch_size=BATCH_SIZE,
class_mode="categorical",
shuffle=True
)
val_generator = test_datagen.flow_from_directory(
TEST_DATASET_DIR,
target_size=IMAGE_SIZE,
batch_size=BATCH_SIZE,
class_mode="categorical",
subset='training',
shuffle=True
)
test_generator = test_datagen.flow_from_directory(
TEST_DATASET_DIR,
target_size=IMAGE_SIZE,
batch_size=BATCH_SIZE,
class_mode="categorical",
subset="validation",
shuffle=True
)
def build_finetune_model(base_model, dropout, fc_layers, num_classes):
# prevents weights from being updated in a given layer during training.
for layer in base_model.layers:
layer.trainable = False
# THE NEW PART SUGGESTED
for layer in base_model.layers[-30:]:
layer.trainable=True
for layer in base_model.layers:
if "BatchNormalization" in layer.__class__.__name__:
layer.trainable = False
x = base_model.output
x = Flatten()(x)
for fc in fc_layers:
print(fc)
x = Dense(fc, activation='relu')(x)
x = Dropout(dropout)(x)
preditions = Dense(num_classes, activation='softmax')(x)
finetune_model = Model(inputs = base_model.input, outputs = preditions)
return finetune_model
mobielNetV2 = tensorflow.keras.applications.MobileNetV2(input_shape=IMG_SHAPE, include_top=False, weights='imagenet')
finetune_model = build_finetune_model(mobielNetV2, dropout = DROPOUT, fc_layers = FC_LAYERS, num_classes = NUM_CLASSES)
finetune_model.compile(tensorflow.keras.optimizers.Adam(learning_rate=LEARNING_RATE), loss='categorical_crossentropy', metrics=['accuracy', 'AUC', Precision(), Recall()])
# Imposed EarlyStopping, in any era in which the model is seen to overfit, it stops.
es = keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=0, patience=5, verbose=0, mode='auto')
r = finetune_model.fit_generator(train_generator, validation_data=val_generator, epochs=EPOCHS, steps_per_epoch=len(train_generator)/BATCH_SIZE,
validation_steps=len(test_generator), callbacks=[es])
print("\nSAVE THE MODEL")
finetune_model.save(f"/content/drive/My Drive/Computer_Vision/Models/MobileNet_ScissorPaperRock_{EPOCHS}_epochs.h5")
EDITED
This is the code about how I calculate the precision, recall and f1-scor of validation set:
import numpy as np
from sklearn.metrics import classification_report
# test_steps_per_epoch = np.math.ceil(val_generator.samples / val_generator.batch_size)
# print(test_steps_per_epoch)
predictions = finetune_model.predict(val_generator)
# Get most likely class
predicted_classes = np.argmax(predictions, axis=1)
print(val_generator, len(val_generator))
# Get ground-truth classes and class-labels
true_classes = val_generator.classes
#print(true_classes)
class_labels = list(val_generator.class_indices.keys())
#print(class_labels)
# Use scikit-learn to get statistics
report = classification_report(true_classes, predicted_classes, target_names=class_labels)
print(report)
Since you are fine-tuning a MobileNet V2 model, then it is a good idea to update the weights of the last few layers. MobileNet V2 is trained to classify 1000 differnet classes, but your domain contains only 3 classes of similar features. The first few layers are usually used for the general features, while the last few layers are less general, and those would affect your model the most since your domain is a lot smaller. I'd suggest that you allow the last 20%-30% layers of MobileNet V2 to update weights.
I created a model for mask-detection through the transfer learning of a MobileNet CNN, for a multiclass problem: NoMask, Mask, UncorrectMask.
Below is the code:
import matplotlib.pyplot as plt
import tensorflow
from tensorflow.keras.layers import Dense, Flatten
from tensorflow.keras.models import Model
from tensorflow.keras.preprocessing.image import ImageDataGenerator
IMAGE_SIZE = (224, 224)
IMG_SHAPE = IMAGE_SIZE + (3,)
DATASET_DIR = "./DATASET/Mask_Detection/"
BATCH_SIZE = 32
EPOCHS = 15
datagen = ImageDataGenerator(
validation_split = 0.2,
rescale = 1./255, #per processare piĆ¹ velocemente i dati
brightness_range=[1,2]
)
train_generator = datagen.flow_from_directory(
DATASET_DIR,
target_size = IMAGE_SIZE,
batch_size = BATCH_SIZE,
class_mode = "categorical",
subset = "training"
)
test_generator = datagen.flow_from_directory(
DATASET_DIR,
target_size = IMAGE_SIZE,
batch_size = BATCH_SIZE,
class_mode = "categorical",
subset = "validation"
)
mobielNetV2 = tensorflow.keras.applications.MobileNetV2(input_shape=IMG_SHAPE,include_top=False,weights='imagenet')
for layer in mobielNetV2.layers:
layer.trainable = False
x = Flatten()(mobielNetV2.output)
prediction = Dense(3, activation='softmax')(x)
model = Model(inputs=mobielNetV2.input, outputs=prediction)
model.summary()
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
r = model.fit(train_generator, validation_data=test_generator, epochs=EPOCHS,steps_per_epoch=len(train_generator),validation_steps=len(test_generator))
model.save("MobileNet.h5")
I have a problem, I would like to calculate the precision, recall, F1 and confusion matrix for my model, but I can't figure out how to do it, can someone help me?
I was able to easily calculate the accuracy and logloss.
See this tutorial. Basically you should get your model's predictions on your test set by
y_pred_logits = model.predict(test_generator)
y_pred = tf.math.argmax(y_pred_logits)
The link above also has a example for displaying the confusion matrix.
Precision, recall and F1 could be computed with sklearn as in https://stackoverflow.com/a/9092866/11664800.
I'm using a pre-trained model Vgg16 to do 100 classification problem. The dataset is tiny-imagenet, each class has 500 images, and I random choose 100 class from tiny-imagenet for my training(400) and validation(100) data. So I change input_shape of vgg16 for 32*32 size.
The results always look like overfitting. Training acc is high, but val_acc always stuck at almost 40%.
I used dropout, regularization L2, data augmentation ... , but val_acc is also stuck at almost 40%.
How could I do for overfitting or correct my code.
Thanks
img_width, img_height = 32, 32
epochs = 50
learning_rate = 1e-4
steps_per_epoch = 2500
train_path='./training_set_100A/'
valid_path='./testing_set_100A/'
test_path='./testing_set_100A/'
class_num = 100
train_batches = ImageDataGenerator(rescale=1. / 255
,rotation_range=20, zoom_range=0.15,
width_shift_range=0.2, height_shift_range=0.2,
shear_range=0.15,
horizontal_flip=True, fill_mode="nearest"
).flow_from_directory(
train_path, target_size=(img_width,img_height),
batch_size=32, shuffle=True)
valid_batches = ImageDataGenerator(rescale=1. / 255).flow_from_directory(
valid_path, target_size=(img_width,img_height),
batch_size=10, shuffle=False)
test_batches = ImageDataGenerator(rescale=1. / 255).flow_from_directory(
test_path, target_size=
(img_width,img_height),batch_size=10,shuffle=False)
seqmodel = Sequential()
VGG16Model = VGG16(weights='imagenet', include_top=False)
input = Input(shape=(img_width, img_height, 3), name='image_intput')
output_vgg16_conv = VGG16Model(input)
x = Flatten()(output_vgg16_conv)
x = Dense(4096, activation='relu')(x)
x = Dropout(0.5)(x)
x = Dense(4096, activation='relu')(x)
x = Dropout(0.5)(x)
x = Dense(class_num, activation='softmax')(x)
funcmodel = Model([input], [x])
funcmodel.summary()
funcmodel.compile(optimizer=SGD(lr=learning_rate, momentum=0.9),
loss='categorical_crossentropy', metrics=['accuracy'])
train_history = funcmodel.fit_generator(train_batches,
steps_per_epoch=steps_per_epoch, validation_data=valid_batches,
validation_steps=1000, epochs=epochs, verbose=1)
`
It seems you followed examples of implementing this from other sites, but you're training samples are very small to train the 2 new Dense layers of 4096 size each.
you have to either lower the size of you layers or add a lot more samples 20,000 instead of 500.
1) 50epoch is too much. Try running smaller epoch?
2) Check your validation accuracy for every epoch?
3) VGG is too deep for your small(32 * 32) image data. Try building your own network with lesser number of parameters. or Try Lenet?
I am using resnet50 transfer learning for the Oxford-IIIT Pet Dataset to classify 37 breeds of cats and dogs. The idea is to follow fastai implementation closely using Keras code. However, I managed to get a training accuracy as high as 90% but can't seem to increase my val_accuracy higher than a random guess (1/37 or ~ 3% val_acc).
Any idea how do Keras compute the validation acc and how can I improve it? Or is there something wrong with my preprocessing steps? Thanks a lot.
To get my validation steps, I use sklearn StratifiedShuffleSplit to get a balanced validation set.
# Create dataframe with labels and filenames
annotations = pd.read_csv("annotation/list.txt",header=None,delim_whitespace=True)
annotations.drop([1,2,3],axis=1, inplace=True)
annotations.columns = ["filenames"]
# Create label columns
trans = str.maketrans("_0123456789"," ")
annotations["labels"] = annotations["filenames"].str.translate(trans).str.strip()
annotations["filenames"] = annotations["filenames"] +".jpg"
# Creating a validation set
from sklearn.model_selection import StratifiedShuffleSplit
df_array = annotations.to_numpy(copy=True)
sss = StratifiedShuffleSplit(n_splits = 1, test_size=0.2)
valid_idx = [test for _,test in sss.split(df_array[:,0],df_array[:,1])]
validation = annotations.iloc[valid_idx[0]]
annotations.drop(valid_idx[0], inplace=True)
Then, constructing my generator and training my model.
from tensorflow.keras.preprocessing.image import ImageDataGenerator
bs = 64
def normalize(x):
imagenet_mean = np.array([0.485, 0.456, 0.406]).reshape(1,1,3)
imagenet_sd = np.array([0.229, 0.224, 0.225]).reshape(1,1,3)
return (x- imagenet_mean)/imagenet_sd
train_datagen = ImageDataGenerator(rescale=1/255.,
horizontal_flip = True,
rotation_range=10,
width_shift_range = 0.1,
height_shift_range =0.1,
brightness_range =(0.9,1.1),
shear_range =0.1,
preprocessing_function=normalize)
train_generator = train_datagen.flow_from_dataframe(dataframe=annotations,
directory =os.getcwd(),
x_col="filenames",
y_col="labels",
target_size = (224,224),
batch_size = bs,
)
val_datagen = ImageDataGenerator(rescale=1/255.,
preprocessing_function=normalize)
validation_generator = val_datagen.flow_from_dataframe(dataframe=validation,
directory =os.getcwd(),
x_col="filenames",
y_col="labels",
target_size = (224,224),
batch_size=bs,
)
from tensorflow.keras.applications.resnet50 import ResNet50
from tensorflow.keras import optimizers
from tensorflow.keras import Model
from tensorflow.keras.layers import Dense, GlobalAveragePooling2D, Flatten, BatchNormalization, Dropout
base_model = ResNet50(include_top=False,weights="imagenet")
x = base_model.output
x = GlobalAveragePooling2D()(x)
x = Flatten()(x)
x = BatchNormalization(epsilon=1e-05,momentum=0.1)(x)
x = Dropout(0.25)(x)
x = Dense(512,activation="relu")(x)
x = BatchNormalization(epsilon=1e-05,momentum=0.1)(x)
x = Dropout(0.5)(x)
predictions = Dense(37,activation="softmax")(x)
model = Model(inputs=base_model.input,outputs=predictions)
for layer in base_model.layers:
layer.trainable = False
lr= 0.001
opti = optimizers.Adam(lr=lr, decay=lr/50)
model.compile(optimizer=opti,
loss="categorical_crossentropy",
metrics=["accuracy"])
model.fit_generator(train_generator,
epochs=10,
validation_data = validation_generator)
for layer in base_model.layers:
layer.trainable = True
model.fit_generator(train_generator,
epochs=10,
validation_data = validation_generator)
By the 10 epochs before unfreezing my layers
loss = 0.2189
acc = 0.9255
val_loss = 5.5082
val_acc = 0.0401