How can I use transfer learning for a Keras regression problem? - python

I am trying to build a CNN using transfer learning and fine tuning. The task is to build a CNN with Keras getting a dataset of images (photos of houses) and CSV file (photos names and prices), and train CNN with these inputs. But I have a problem that I cannot fix.
This is my code:
import pandas as pd
from google.colab import drive
from sklearn.model_selection import train_test_split
from keras import applications
from keras import optimizers
from keras import backend
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Model, load_model
from keras.layers import GlobalAveragePooling2D, Dense, Flatten
from matplotlib import pyplot
drive.mount('/content/gdrive')
!unzip -n '/content/gdrive/My Drive/HOUSEPRICES.zip' >> /dev/null
data_path = 'HOUSEPRICES/'
imgs_path = data_path + "images/"
labels_path = data_path + "prices.csv"
labels = pd.read_csv(labels_path), dtype = {"prices": "float64"})
seed = 0
train_data, test_data = train_test_split(labels, test_size=0.25, random_state=seed)
dev_data, test_data = train_test_split(test_data, test_size=0.5, random_state=seed)
train_data = train_data.reset_index(drop=True)
dev_data = dev_data.reset_index(drop=True)
test_data = test_data.reset_index(drop=True)
datagen = ImageDataGenerator(rescale=1./255)
img_width = 320
img_height = 240
x_col = 'image_name'
y_col = 'prices'
batch_size = 64
train_dataset = datagen.flow_from_dataframe(dataframe=train_data, directory=imgs_path, x_col=x_col, y_col=y_col, has_ext=True,
class_mode="input", target_size=(img_width,img_height), batch_size=batch_size)
dev_dataset = datagen.flow_from_dataframe(dataframe=dev_data, directory=imgs_path, x_col=x_col, y_col=y_col, has_ext=True,
class_mode="input",target_size=(img_width,img_height), batch_size=batch_size)
test_dataset = datagen.flow_from_dataframe(dataframe=test_data, directory=imgs_path, x_col=x_col, y_col=y_col, has_ext=True,
class_mode="input", target_size=(img_width,img_height), batch_size=batch_size)
base_model = applications.InceptionV3(weights='imagenet', include_top=False, input_shape=(img_width,img_height,3))
for layer in base_model.layers:
layer.trainable = False
x = base_model.output
x = GlobalAveragePooling2D()(x)
x = Flatten()(x)
x = Dense(512, activation='relu')(x)
predictions = Dense(1, activation='linear')(x)
model = Model(inputs=[base_model.input], outputs=[predictions])
model.summary()
model.compile(loss='mse',
optimizer=optimizers.adam(lr=1e-5),
metrics=['mse'])
model.fit_generator(train_dataset,
epochs=20,
verbose=2,
steps_per_epoch=len(train_data)/batch_size,
validation_data=dev_dataset,
validation_steps=len(dev_data)/batch_size)
test_loss, test_mse = model.evaluate_generator(test_dataset, steps=len(test_data)/batch_size, verbose=1)
And I get this error:
ValueError: Input 0 is incompatible with layer flatten_9: expected
min_ndim=3, found ndim=2
What is the problem with my code? Probably I am not building the dataset (images + numerical prices) properly? Or it has a problem with the model architecture? How can I fix the code?

Flatten(), converts higher dimensional vectors into 2 dimensional. If you already have a 2 dimensional vector, then you don't need Flatten().

import pandas as pd
from google.colab import drive
from sklearn.model_selection import train_test_split
from keras import applications
from keras import optimizers
from keras import backend
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Model, load_model
from keras.layers import GlobalAveragePooling2D, Dense, Flatten
from matplotlib import pyplot
drive.mount('/content/gdrive')
!unzip -n '/content/gdrive/My Drive/HOUSEPRICES.zip' >> /dev/null
data_path = 'HOUSEPRICES/'
imgs_path = data_path + "images/"
labels_path = data_path + "prices.csv"
labels = pd.read_csv(labels_path), dtype = {"prices": "float64"})
seed = 0
train_data, test_data = train_test_split(labels, test_size=0.25, random_state=seed)
dev_data, test_data = train_test_split(test_data, test_size=0.5, random_state=seed)
train_data = train_data.reset_index(drop=True)
dev_data = dev_data.reset_index(drop=True)
test_data = test_data.reset_index(drop=True)
datagen = ImageDataGenerator(rescale=1./255)
img_width = 320
img_height = 240
x_col = 'image_name'
y_col = 'prices'
batch_size = 64
train_dataset = datagen.flow_from_dataframe(dataframe=train_data, directory=imgs_path, x_col=x_col, y_col=y_col, has_ext=True,
class_mode="other", target_size=(img_width,img_height), batch_size=batch_size)
dev_dataset = datagen.flow_from_dataframe(dataframe=dev_data, directory=imgs_path, x_col=x_col, y_col=y_col, has_ext=True,
class_mode="other",target_size=(img_width,img_height), batch_size=batch_size)
test_dataset = datagen.flow_from_dataframe(dataframe=test_data, directory=imgs_path, x_col=x_col, y_col=y_col, has_ext=True,
class_mode="other", target_size=(img_width,img_height), batch_size=batch_size)
base_model = applications.InceptionV3(weights='imagenet', include_top=False, input_shape=(img_width,img_height,3))
for layer in base_model.layers:
layer.trainable = False
x = base_model.output
x = GlobalAveragePooling2D()(x)
x = Dense(256, activation='relu')(x)
x = Dropout(0.4)(x)
x = Dense(256, activation='relu')(x)
predictions = Dense(1, activation='linear')(x)
model = Model(inputs=[base_model.input], outputs=[predictions])
model.summary()
model.compile(loss='mse',
optimizer=optimizers.adam(lr=1e-5),
metrics=['mse'])
model.fit_generator(train_dataset,
epochs=20,
verbose=2,
steps_per_epoch=len(train_data)/batch_size,
validation_data=dev_dataset,
validation_steps=len(dev_data)/batch_size)
test_loss, test_mse = model.evaluate_generator(test_dataset, steps=len(test_data)/batch_size, verbose=1)

GlobalAveragePooling2D does pooling over the spatial data. The output shape is (batch_size, channels). So, this can be directly fed to a Dense layer without the need for a Flatten. To fix the code, remove this line:
x = Flatten()(x)
Refer this link for more examples on how to fine-tune your network.
https://keras.io/applications/
class_mode="input" is for auto encoders; that is why there was an error about the target not having the same shape as input.
class_mode = 'other' works because y_col is defined.
https://keras.io/preprocessing/image/#flow_from_dataframe

Related

Accuracy fixed at 50% keras

I am new to tensorflow and keras, I am trying to follow a tutorial on keras (https://www.youtube.com/watch?v=qFJeN9V1ZsI min.38:40) and everything seems to work but as soon as I run the fit, accuracy remains almost fixed at 50% and I can't understand why, can someone help me?
Here is the code:
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Activation, Dense
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.metrics import categorical_crossentropy
import numpy as np
from random import randint
from sklearn.utils import shuffle
from sklearn.preprocessing import MinMaxScaler
train_labels = []
train_samples = []
for i in range(50):
random_younger = randint(13,64)
train_samples.append(random_younger)
train_labels.append(1)
random_older = randint(65,100)
train_samples.append(random_older)
train_labels.append(0)
for i in range(950):
random_younger = randint(13,64)
train_samples.append(random_younger)
train_labels.append(0)
random_older = randint(65,100)
train_samples.append(random_older)
train_labels.append(1)
train_label = np.array(train_labels)
train_samples = np.array(train_samples)
train_labels, train_labels = shuffle(train_labels, train_labels)
scaler = MinMaxScaler(feature_range=(0,1))
scaled_train_samples = scaler.fit_transform(train_samples.reshape(-1,1))
scaled_train_samples = np.array(scaled_train_samples)
model = Sequential([
Dense(units=16, input_shape = (1,), activation='relu'),
Dense(units=32, activation='relu'),
Dense(units=2, activation='softmax')
])
#model.summary()
train_labels = np.array(train_labels)
scaled_train_samples = np.array(scaled_train_samples)
model.compile(optimizer = Adam(learning_rate=0.01), loss='sparse_categorical_crossentropy', metrics=['accuracy'])
model.fit(x=scaled_train_samples, y=train_labels, batch_size=10, epochs=30, shuffle=True, verbose =2)
input()
You have code
train_labels, train_labels = shuffle(train_labels, train_labels)
you shuffle the labels but not the train samples suspect you want
train_labels, train_samples= shuffle(train_label, train_samples)
this code shuffles the labels and the samples. Also curious why for the first 50 samples you have younger label as 1 and older label as 0, then for next 950 samples
the labels are reversed?

Keras: one-hot for labels in `image_dataset_from_directory`

I am trying to do a binary image classification using efficientNet. The following is my code.
import matplotlib.pyplot as plt
from tensorflow.keras.applications import EfficientNetB0
from tensorflow.keras.preprocessing import image_dataset_from_directory
from tensorflow.keras.models import Sequential
from tensorflow.keras import layers
from tensorflow.data import AUTOTUNE
DATA_DIR = "img/"
IMG_SIZE = 224
NUM_CLASSES = 2
EPOCH = 50
def train_val_split(DATA_DIR, IMG_SIZE):
val_data = image_dataset_from_directory(
DATA_DIR,
labels="inferred",
label_mode="binary",
color_mode="rgb",
batch_size=32,
image_size=(IMG_SIZE, IMG_SIZE),
validation_split=0.2,
subset="training",
seed=1
)
train_data = image_dataset_from_directory(
DATA_DIR,
labels="inferred",
label_mode="binary",
color_mode="rgb",
batch_size=32,
image_size=(IMG_SIZE, IMG_SIZE),
validation_split=0.2,
subset="validation",
seed=1
)
train_data = train_data.cache().prefetch(buffer_size=AUTOTUNE)
val_data = val_data.cache().prefetch(buffer_size=AUTOTUNE)
return train_data, val_data
def model_arch(NUM_CLASSES, IMG_SIZE):
"""efficientnet transfer learning"""
inputs = layers.Input(shape=(IMG_SIZE, IMG_SIZE, 3))
img_augmentation = Sequential(
[
layers.RandomRotation(factor=0.15),
layers.RandomTranslation(height_factor=0.1, width_factor=0.1),
layers.RandomFlip(),
layers.RandomContrast(factor=0.1),
],
name="img_augmentation",
)
x = img_augmentation(inputs)
# model = EfficientNetB0(include_top=False, input_tensor=x, weights="imagenet")
model = EfficientNetB0(include_top=False, input_tensor=x, weights='model/efficientnetb0_notop.h5')
# Freeze the pretrained weights
model.trainable = False
# Rebuild top
x = layers.GlobalAveragePooling2D(name="avg_pool")(model.output)
x = layers.BatchNormalization()(x)
top_dropout_rate = 0.2
x = layers.Dropout(top_dropout_rate, name="top_dropout")(x)
outputs = layers.Dense(NUM_CLASSES, activation="softmax", name="pred")(x)
# Compile
model = tf.keras.Model(inputs, outputs, name="EfficientNet")
optimizer = tf.keras.optimizers.Adam(learning_rate=1e-2)
model.compile(
optimizer=optimizer,
loss="binary_crossentropy",
metrics=["accuracy"]
)
return model
if __name__ == "__main__":
train_data, val_data = train_val_split(DATA_DIR, IMG_SIZE)
model = model_arch(NUM_CLASSES, IMG_SIZE)
hist = model.fit(train_data,
epochs=EPOCH,
validation_data=val_data,
verbose=1)
However, I encountered the following error.
ValueError: logits and labels must have the same shape ((None, 2) vs (None, 1))
I found out that this is because the labels loaded in image_dataset_from_directory is not one-hot encoded.
print(train_data)
<PrefetchDataset shapes: ((None, 224, 224, 3), (None, 1)), types: (tf.float32, tf.float32)>
How can I tweak the code for train_data and val_data so that it can fit into the model without issues?
Thanks.
Managed to figure out the answer!
import tensorflow as tf
# one-hot encoding
train_data = train_data.map(lambda x, y: (x, tf.one_hot(y, depth=NUM_CLASSES)))
val_data = val_data.map(lambda x, y: (x, tf.one_hot(y, depth=NUM_CLASSES)))
You need to change label_mode to a better option.
In your case, I think you want label_mode='categorical',.
As #Dr. Snoopy said the information is here: https://www.tensorflow.org/api_docs/python/tf/keras/utils/image_dataset_from_directory

Tensorflow keras: Problem with loading the weights of the optimizer

I have run the base model to a good accuracy and now i want to load these weights and use them for a model with a few additional layers and later for hyperparameter tuning.
First i construct this new model
input_tensor = Input(shape=train_generator.image_shape)
base_model = applications.ResNet152(weights='imagenet', include_top=False, input_tensor=input_tensor)
for layer in base_model.layers[:]:
layer.trainable = False
x = Flatten()(base_model.output)
x = Dense(1024, kernel_regularizer=tf.keras.regularizers.L2(l2=0.01),
kernel_initializer=tf.keras.initializers.HeNormal(), kernel_constraint=tf.keras.constraints.UnitNorm(axis=0))(x)
x = LeakyReLU()(x)
x = BatchNormalization()(x)
x = Dropout(rate=0.1)(x)
x = Dense(512, kernel_regularizer=tf.keras.regularizers.L2(l2=0.01),
kernel_initializer=tf.keras.initializers.HeNormal(), kernel_constraint=tf.keras.constraints.UnitNorm(axis=0))(x)
x = LeakyReLU()(x)
x = BatchNormalization()(x)
predictions = Dense(num_classes, activation= 'softmax')(x)
model = Model(inputs = base_model.input, outputs = predictions)
Then i compile it because that is necessary at this stage because i have to run the model fit with dummy input before i load the weights. (i think, i have tried to put these code blocks in many different orders to make it work, but i have failed each time)
opt = tfa.optimizers.LazyAdam(lr=0.000074)
model.compile(
loss='sparse_categorical_crossentropy',
optimizer=opt,
metrics=['accuracy']
)
dummy_input = tf.random.uniform([32, 224, 224, 3])
dummy_label = tf.random.uniform([32,])
hist = model.fit(dummy_input, dummy_label)
Then i load the weights for the base model:
base_model.load_weights('/content/drive/MyDrive/MODELS_SAVED/model_RESNET152/model_weights2.h5', by_name=True)
Then i load the weights for the optimizer:
import pickle
with open("/content/drive/MyDrive/weight_values2optimizer.pkl", "rb") as f:
weights = pickle.load(f)
opt = model.optimizer.set_weights(weights)
This results in the following error:
ValueError: You called `set_weights(weights)` on optimizer LazyAdam
with a weight list of length 1245,
but the optimizer was expecting 13 weights.
Provided weights: [63504, array([[[[ 0.00000000e+00, -5.74126025e-04...
Anyone have ideas on how to solve this?
If you have a solution with Adam instead of LazyAdam that is fine too.(i have no idea if that would make a difference)
edit:
I have tried many new things last couple of days but nothing is working. Here is the entire code where i stand right now. It includes both the part where i am saving and the part where i am loading.
import tarfile
my_tar2 = tarfile.open('test.tgz')
my_tar2.extractall('test') # specify which folder to extract to
my_tar2.close()
import zipfile
with zipfile.ZipFile("/content/tot_train_bremoved2.zip", 'r') as zip_ref:
zip_ref.extractall("/content/train/")
import pandas as pd
train_info = pd.read_csv("/content/drive/MyDrive/train_info.csv")
test_info = pd.read_csv("/content/drive/MyDrive/test_info.csv")
train_folder = "/content/train"
test_folder = "/content/test/test"
import tensorflow as tf
import tensorflow.keras as keras
from keras.layers import Input, Lambda, Dense, Flatten, BatchNormalization, Dropout, PReLU, GlobalAveragePooling2D, LeakyReLU, MaxPooling2D
from keras.models import Model
from tensorflow.keras.applications.resnet_v2 import ResNet152V2, preprocess_input
from keras import applications
from keras.preprocessing import image
from keras.preprocessing.image import ImageDataGenerator
from keras.losses import sparse_categorical_crossentropy
from keras.callbacks import ReduceLROnPlateau, ModelCheckpoint, EarlyStopping, TensorBoard
import tensorflow_addons as tfa
from sklearn.metrics import confusion_matrix
import numpy as np
import matplotlib.pyplot as plt
num_classes = 423
epochs = 20
batch_size = 32
img_height = 224
img_width = 224
IMAGE_SIZE = [img_height, img_width]
_train_generator = ImageDataGenerator(
rotation_range=180,
zoom_range=0.2,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.3,
horizontal_flip=True,
vertical_flip=True,
preprocessing_function=preprocess_input)
_val_generator = ImageDataGenerator(
preprocessing_function=preprocess_input)
train_generator = _train_generator.flow_from_dataframe(dataframe = train_info,
directory = train_folder, x_col = "filename",
y_col = "artist", seed = 42,
batch_size = batch_size, shuffle = True,
class_mode="sparse", target_size = IMAGE_SIZE)
valid_generator = _val_generator.flow_from_dataframe(dataframe = test_info,
directory = test_folder, x_col = "filename",
y_col = "artist", seed = 42,
batch_size = batch_size, shuffle = True,
class_mode="sparse", target_size = IMAGE_SIZE)
def get_uncompiled_model():
input_tensor = Input(shape=train_generator.image_shape)
base_model = applications.ResNet152(weights='imagenet', include_top=False, input_tensor=input_tensor)
for layer in base_model.layers[:]:
layer.trainable = True
x = Flatten()(base_model.output)
predictions = Dense(num_classes, activation= 'softmax')(x)
model = Model(inputs = base_model.input, outputs = predictions)
return model
opt = keras.optimizers.Adam(lr=0.000074)
def get_compiled_model():
model = get_uncompiled_model()
model.compile(
loss='sparse_categorical_crossentropy',
optimizer=opt,
metrics=['accuracy']
)
return model
earlyStopping = EarlyStopping(monitor='val_loss', patience=5, verbose=0, mode='min')
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=2, verbose=1, min_delta=1e-4, mode='min')
model = get_compiled_model()
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
model.fit(
train_generator,
validation_data=valid_generator,
epochs=epochs,
verbose = 1,
steps_per_epoch=len_train // batch_size,
validation_steps=len_test // batch_size,
callbacks=[earlyStopping, reduce_lr]
)
import keras.backend as K
import pickle
model.save_weights('/content/drive/MyDrive/MODELS_SAVED/model_RESNET152/model_weights5.h5')
symbolic_weights = getattr(model.optimizer, 'weights')
weight_values = K.batch_get_value(symbolic_weights)
with open('/content/drive/MyDrive/MODELS_SAVED/optimizer3.pkl', 'wb') as f:
pickle.dump(weight_values, f)
#Here i am building the new model and its from here i am having problems
input_tensor = Input(shape=train_generator.image_shape)
base_model = applications.ResNet152(weights='imagenet', include_top=False, input_tensor=input_tensor)
for layer in base_model.layers[:]:
layer.trainable = False
x = Flatten()(base_model.output)
x = Dense(512, kernel_regularizer=tf.keras.regularizers.L2(l2=0.01),
kernel_initializer=tf.keras.initializers.HeNormal(),
kernel_constraint=tf.keras.constraints.UnitNorm(axis=0))(x)
x = LeakyReLU()(x)
x = BatchNormalization()(x)
predictions = Dense(num_classes, activation= 'softmax')(x)
model = Model(inputs = base_model.input, outputs = predictions)
model.compile(
loss='sparse_categorical_crossentropy',
optimizer='adam',
metrics=['accuracy']
)
base_model.load_weights('/content/drive/MyDrive/MODELS_SAVED/model_RESNET152/model_weights5.h5', by_name=True)
with open('/content/drive/MyDrive/MODELS_SAVED/optimizer3.pkl', 'rb') as f:
weight_values = pickle.load(f)
model.optimizer.set_weights(weight_values)
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
epochs = 2
model.fit(
train_generator,
validation_data=valid_generator,
epochs=epochs,
steps_per_epoch=len_train // batch_size,
validation_steps=len_test // batch_size,
verbose = 1,
callbacks=[earlyStopping, reduce_lr]
)
Now i am getting the following error running this code block (which above in the complete code is right before the model.fit):
with open('/content/drive/MyDrive/MODELS_SAVED/optimizer3.pkl', 'rb') as f:
weight_values = pickle.load(f)
model.optimizer.set_weights(weight_values)
ValueError: You called `set_weights(weights)` on optimizer Adam with a weight list of length 1245, but the optimizer was expecting 13 weights. Provided weights: [11907, array([[[[ 0.00000000e+00, -8.27514916e-04...
All i am trying to do is to save the weights for the model and optimizer and then build a new model where i am adding a few layers and loading the weights from the base of the model and the weights from the optimizer.
Both models have different architectures so weights of one can't be loaded into another,irrespective of that they inherited same base model. I think it is a simple case of fine-tuning a model (saved model in your case).
What you should do is change the way to create new model, i.e. rather than loading the original resnet model as base model with include_top = False, you should try loading the saved model and implementing your own include_top. This can be done as:
for layer in saved_model.layers[:]:
layer.trainable = False
x = Flatten()(saved_model.layers[-2].output)
Here the key thing is saved_model.layers[-2].output which means output from the second last layer.
Hope it helps, if not please clarify your doubts or let me know what I missed.
Please refer to the following notebook:
https://colab.research.google.com/drive/1j_zLqG1zUMi6UYPdc6gtmkJvHuawL4Sk?usp=sharing
{save,load}_weights on a model includes the weights of the optimizer. That would be the preferred way to initialise the optimizer weights.
You can copy the optimizer from one model to another.
The reason you are getting the error above is that the optimizer doesn't allocate its weights until training starts; If you really want to do it manually, just trigger model.fit() for 1 epoch 1 datapoint and then load the data manually.
You can replace
base_model.load_weights('/content/drive/MyDrive/MODELS_SAVED/model_RESNET152/model_weights5.h5', by_name=True)
with open('/content/drive/MyDrive/MODELS_SAVED/optimizer3.pkl', 'rb') as f:
weight_values = pickle.load(f)
model.optimizer.set_weights(weight_values)
with:
base_model.load_weights('/content/drive/MyDrive/MODELS_SAVED/model_RESNET152/model_weights5.h5', by_name=True)
model.optimizer = base_model.optimizer
After saving the first model's weights with model.save_weights('name.h5'), you should build a second model, exactly like the first one, let's call it model2. Then load the weights you saved before into it. The code should be model2.load_weights('name.h5'). See model.summary() to see the names and number of the first model's layers. For each layer, you need to define a variable and add those weights (and also biases) to that variable with a method called get_weights() . Here is an example:
x1 = model2.layers[1].get_weights()
Here, I put the weights and biases of the first layer (which in mine was a convolution layer) in the variable x1.
x1[0] is a list of the weights of the layer #1.
x1[1] is a list of the biases of the layer #1.

Error “IndexError: How to predict input image using trained model in Keras?

I trained a model to classify images from 9 classes and saved it using model.save(). Here is the code I used:
from keras.applications.resnet50 import ResNet50, preprocess_input
from keras.layers import Dense, Dropout
from keras.models import Model
from keras.optimizers import Adam, SGD
from keras.preprocessing.image import ImageDataGenerator, image
from keras.callbacks import EarlyStopping, ModelCheckpoint
from sklearn.metrics import confusion_matrix, classification_report, accuracy_score
from keras import backend as K
import numpy as np
import matplotlib.pyplot as plt
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGE = True
# Define some constant needed throughout the script
N_CLASSES = 9
EPOCHS = 2
PATIENCE = 5
TRAIN_PATH= '/Datasets/Train/'
VALID_PATH = '/Datasets/Test/'
MODEL_CHECK_WEIGHT_NAME = 'resnet_monki_v1_chk.h5'
# Define model to be used we freeze the pre trained resnet model weight, and add few layer on top of it to utilize our custom dataset
K.set_learning_phase(0)
model = ResNet50(input_shape=(224,224,3),include_top=False, weights='imagenet', pooling='avg')
K.set_learning_phase(1)
x = model.output
x = Dense(512, activation='relu')(x)
x = Dropout(0.5)(x)
x = Dense(512, activation='relu')(x)
x = Dropout(0.5)(x)
output = Dense(N_CLASSES, activation='softmax', name='custom_output')(x)
custom_resnet = Model(inputs=model.input, outputs = output)
for layer in model.layers:
layer.trainable = False
custom_resnet.compile(Adam(lr=0.001), loss='categorical_crossentropy', metrics=['accuracy'])
custom_resnet.summary()
# 4. Load dataset to be used
datagen = ImageDataGenerator(preprocessing_function=preprocess_input)
traingen = datagen.flow_from_directory(TRAIN_PATH, target_size=(224,224), batch_size=32, class_mode='categorical')
validgen = datagen.flow_from_directory(VALID_PATH, target_size=(224,224), batch_size=32, class_mode='categorical', shuffle=False)
# 5. Train Model we use ModelCheckpoint to save the best model based on validation accuracy
es_callback = EarlyStopping(monitor='val_acc', patience=PATIENCE, mode='max')
mc_callback = ModelCheckpoint(filepath=MODEL_CHECK_WEIGHT_NAME, monitor='val_acc', save_best_only=True, mode='max')
train_history = custom_resnet.fit_generator(traingen, steps_per_epoch=len(traingen), epochs= EPOCHS, validation_data=traingen, validation_steps=len(validgen), verbose=2, callbacks=[es_callback, mc_callback])
model.save('custom_resnet.h5')
It successfully trained. To load and test this model on new images, I used the below code:
from keras.models import load_model
import cv2
import numpy as np
class_names = ['A', 'B', 'C', 'D', 'E','F', 'G', 'H', 'R']
model = load_model('custom_resnet.h5')
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
img = cv2.imread('/path to image/4.jpg')
img = cv2.resize(img,(224,224))
img = np.reshape(img,[1,224,224,3])
classes = np.argmax(model.predict(img), axis = -1)
print(classes)
It outputs:
[1915]
Why wouldn't it give out the actual value of the class and why the index is too big? I only have 9 classes!
Thanks
You have saved the original resnet_base instead of your custom model.
You did model.save('custom_resnet.h5')
But, model = ResNet50(input_shape=(224,224,3),include_top=False, weights='imagenet', pooling='avg')
You need to save the custom_resnet model with custom_resnet.save('custom_resnet.h5')
That's why when you're using predict, you're getting (1,2048) shaped features not actual predictions.
Updated code:
from keras.applications.resnet50 import ResNet50, preprocess_input
from keras.layers import Dense, Dropout
from keras.models import Model
from keras.optimizers import Adam, SGD
from keras.preprocessing.image import ImageDataGenerator, image
from keras.callbacks import EarlyStopping, ModelCheckpoint
from sklearn.metrics import confusion_matrix, classification_report, accuracy_score
from keras import backend as K
import numpy as np
import matplotlib.pyplot as plt
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGE = True
# Define some constant needed throughout the script
N_CLASSES = 9
EPOCHS = 2
PATIENCE = 5
TRAIN_PATH= '/Datasets/Train/'
VALID_PATH = '/Datasets/Test/'
MODEL_CHECK_WEIGHT_NAME = 'resnet_monki_v1_chk.h5'
# Define model to be used we freeze the pre trained resnet model weight, and add few layer on top of it to utilize our custom dataset
K.set_learning_phase(0)
model = ResNet50(input_shape=(224,224,3),include_top=False, weights='imagenet', pooling='avg')
K.set_learning_phase(1)
x = model.output
x = Dense(512, activation='relu')(x)
x = Dropout(0.5)(x)
x = Dense(512, activation='relu')(x)
x = Dropout(0.5)(x)
output = Dense(N_CLASSES, activation='softmax', name='custom_output')(x)
custom_resnet = Model(inputs=model.input, outputs = output)
for layer in model.layers:
layer.trainable = False
custom_resnet.compile(Adam(lr=0.001), loss='categorical_crossentropy', metrics=['accuracy'])
custom_resnet.summary()
# 4. Load dataset to be used
datagen = ImageDataGenerator(preprocessing_function=preprocess_input)
traingen = datagen.flow_from_directory(TRAIN_PATH, target_size=(224,224), batch_size=32, class_mode='categorical')
validgen = datagen.flow_from_directory(VALID_PATH, target_size=(224,224), batch_size=32, class_mode='categorical', shuffle=False)
# 5. Train Model we use ModelCheckpoint to save the best model based on validation accuracy
es_callback = EarlyStopping(monitor='val_acc', patience=PATIENCE, mode='max')
mc_callback = ModelCheckpoint(filepath=MODEL_CHECK_WEIGHT_NAME, monitor='val_acc', save_best_only=True, mode='max')
train_history = custom_resnet.fit_generator(traingen, steps_per_epoch=len(traingen), epochs= EPOCHS, validation_data=traingen, validation_steps=len(validgen), verbose=2, callbacks=[es_callback, mc_callback])
custom_resnet.save('custom_resnet.h5')
Inference code:
from keras.models import load_model
import cv2
import numpy as np
class_names = ['A', 'B', 'C', 'D', 'E','F', 'G', 'H', 'R']
model = load_model('custom_resnet.h5')
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
img = cv2.imread('/path to image/4.jpg')
img = cv2.resize(img,(224,224))
img = np.reshape(img,[1,224,224,3])
classes = np.argmax(model.predict(img), axis = -1)
print(classes)
use
np.argmax(model.predict(img)[0], axis = -1)
i am reading from zero index of model.predict

Keras model with high accuracy but low val_acc

I am using resnet50 transfer learning for the Oxford-IIIT Pet Dataset to classify 37 breeds of cats and dogs. The idea is to follow fastai implementation closely using Keras code. However, I managed to get a training accuracy as high as 90% but can't seem to increase my val_accuracy higher than a random guess (1/37 or ~ 3% val_acc).
Any idea how do Keras compute the validation acc and how can I improve it? Or is there something wrong with my preprocessing steps? Thanks a lot.
To get my validation steps, I use sklearn StratifiedShuffleSplit to get a balanced validation set.
# Create dataframe with labels and filenames
annotations = pd.read_csv("annotation/list.txt",header=None,delim_whitespace=True)
annotations.drop([1,2,3],axis=1, inplace=True)
annotations.columns = ["filenames"]
# Create label columns
trans = str.maketrans("_0123456789"," ")
annotations["labels"] = annotations["filenames"].str.translate(trans).str.strip()
annotations["filenames"] = annotations["filenames"] +".jpg"
# Creating a validation set
from sklearn.model_selection import StratifiedShuffleSplit
df_array = annotations.to_numpy(copy=True)
sss = StratifiedShuffleSplit(n_splits = 1, test_size=0.2)
valid_idx = [test for _,test in sss.split(df_array[:,0],df_array[:,1])]
validation = annotations.iloc[valid_idx[0]]
annotations.drop(valid_idx[0], inplace=True)
Then, constructing my generator and training my model.
from tensorflow.keras.preprocessing.image import ImageDataGenerator
bs = 64
def normalize(x):
imagenet_mean = np.array([0.485, 0.456, 0.406]).reshape(1,1,3)
imagenet_sd = np.array([0.229, 0.224, 0.225]).reshape(1,1,3)
return (x- imagenet_mean)/imagenet_sd
train_datagen = ImageDataGenerator(rescale=1/255.,
horizontal_flip = True,
rotation_range=10,
width_shift_range = 0.1,
height_shift_range =0.1,
brightness_range =(0.9,1.1),
shear_range =0.1,
preprocessing_function=normalize)
train_generator = train_datagen.flow_from_dataframe(dataframe=annotations,
directory =os.getcwd(),
x_col="filenames",
y_col="labels",
target_size = (224,224),
batch_size = bs,
)
val_datagen = ImageDataGenerator(rescale=1/255.,
preprocessing_function=normalize)
validation_generator = val_datagen.flow_from_dataframe(dataframe=validation,
directory =os.getcwd(),
x_col="filenames",
y_col="labels",
target_size = (224,224),
batch_size=bs,
)
from tensorflow.keras.applications.resnet50 import ResNet50
from tensorflow.keras import optimizers
from tensorflow.keras import Model
from tensorflow.keras.layers import Dense, GlobalAveragePooling2D, Flatten, BatchNormalization, Dropout
base_model = ResNet50(include_top=False,weights="imagenet")
x = base_model.output
x = GlobalAveragePooling2D()(x)
x = Flatten()(x)
x = BatchNormalization(epsilon=1e-05,momentum=0.1)(x)
x = Dropout(0.25)(x)
x = Dense(512,activation="relu")(x)
x = BatchNormalization(epsilon=1e-05,momentum=0.1)(x)
x = Dropout(0.5)(x)
predictions = Dense(37,activation="softmax")(x)
model = Model(inputs=base_model.input,outputs=predictions)
for layer in base_model.layers:
layer.trainable = False
lr= 0.001
opti = optimizers.Adam(lr=lr, decay=lr/50)
model.compile(optimizer=opti,
loss="categorical_crossentropy",
metrics=["accuracy"])
model.fit_generator(train_generator,
epochs=10,
validation_data = validation_generator)
for layer in base_model.layers:
layer.trainable = True
model.fit_generator(train_generator,
epochs=10,
validation_data = validation_generator)
By the 10 epochs before unfreezing my layers
loss = 0.2189
acc = 0.9255
val_loss = 5.5082
val_acc = 0.0401

Categories