How to train imbalanced data with Keras? - python

I am trying to experiment on ISIC 2019 data as a newbie. Firstly, I downloaded the training data and divided the data into 3 parts as train, test, and validation data, and every dataset folder contains 2 subfolders which are benign and malignant. In short, I just moved all the categories into benign folders except the melenoma category and melanoma images are inside malignant folders. After the division, I get imbalanced data. In the training dataset for benign data, I get 16596 images and for malignant data, I get 3629 images. I tried to train my data and I couldn't get a good result for malignant and my precision value was about 0.18 for malignant. I used ResNet50 to train my model and I would like to ask how can I train my model without data augmentation and oversampling? I am also trying decayed learning metrics at the moment and it seems it won't give a good result too.
import os
import tensorflow as tf
import math
import numpy as np
import matplotlib.pyplot as plt
from tensorflow import keras
from keras.applications.resnet50 import ResNet50, preprocess_input
from keras.layers import Dense, GlobalMaxPooling2D
from keras.models import Model
from keras.optimizers import Adam
from sklearn.metrics import roc_curve
from tensorflow.keras.preprocessing.image import ImageDataGenerator
train_examples = 20225
test_examples = 2551
validation_examples = 2555
img_height = img_width = 224
channel = 3
batch_size = 32
base_model = ResNet50(weights = 'imagenet' , include_top = False, input_shape = (img_height, img_width, channel))
x = base_model.output
x = GlobalMaxPooling2D()(x)
x = Dense(1, activation= 'sigmoid')(x)
model = Model(
inputs = base_model.input,
outputs = x)
model.summary()
train_datagen = ImageDataGenerator(
rotation_range = 20,
width_shift_range=0.10,
height_shift_range=0.10,
zoom_range = 0.10,
horizontal_flip = True,
preprocessing_function = preprocess_input,
fill_mode='nearest'
)
validation_datagen = ImageDataGenerator(
preprocessing_function = preprocess_input,
)
test_datagen = ImageDataGenerator(
preprocessing_function = preprocess_input,
)
train_gen = train_datagen.flow_from_directory(
"dataset/train/",
target_size = (img_height, img_width),
batch_size = batch_size,
color_mode = "rgb",
class_mode = "binary",
shuffle = True,
seed = 123,
)
validation_gen = validation_datagen.flow_from_directory(
"dataset/validation/",
target_size = (img_height, img_width),
batch_size = batch_size,
color_mode = "rgb",
class_mode = "binary",
shuffle = True,
seed = 123,
)
test_gen = test_datagen.flow_from_directory(
"dataset/test/",
target_size =(img_height, img_width),
batch_size = batch_size,
color_mode = "rgb",
class_mode = "binary",
shuffle = True,
seed = 123,
)
METRICS = [
keras.metrics.Precision(name = "precision"),
keras.metrics.Recall(name = "recall"),
keras.metrics.AUC(name = "auc"),
]
model.compile(
optimizer = Adam(lr = 3e-4),
loss = [keras.losses.BinaryCrossentropy(from_logits = False)],
metrics = METRICS,
)
history = model.fit(train_gen,
epochs=50,
verbose=1,
validation_data=validation_gen,
callbacks=[keras.callbacks.ModelCheckpoint("isic_binary_model")],
)

Related

Keras ImageDataGenerator not handling symlink files

I am trying to train a DenseNet121 model on chest X-ray images using tensorflow.keras, and using ImageDataGenerator for augmentation. I have directories of files containing symlinks to the images that I believe is set up in the correct format for ImageDataGenerator:
Train
Normal
Abnormal
Val
Normal
Abnormal
However, when I call model.fit(), it throws FileNotFoundError: [Errno 2] No such file or directory: '.\\Train\\Normal\\00017275_014.png' which is a symlink file. .flow_from_directory(follow_links = True) did not solve the problem. Also, calling os.islink() with that path returns True.
In addition: calling imagedatagenerator returns:
Found 84090 images belonging to 2 classes. Found 28030 images belonging to 2 classes.
Any suggestions? Code below:
from tensorflow.keras.applications.densenet import preprocess_input
from tensorflow.keras import Model,layers
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.optimizers import Adam, SGD
from tensorflow.keras.metrics import binary_accuracy
from tensorflow.keras.losses import binary_crossentropy
batch_size = 64
train_datagen = ImageDataGenerator(
preprocessing_function = preprocess_input,
brightness_range = [0.75, 1.25],
horizontal_flip=True,
)
train_generator = train_datagen.flow_from_directory(
directory = '.\\Train',
color_mode = 'rgb',
classes = ['Normal', 'Abnormal'],
class_mode = 'binary',
batch_size = batch_size,
target_size = (224,224),
follow_links=True,
)
val_datagen = ImageDataGenerator(
preprocessing_function = preprocess_input,
)
val_generator = val_datagen.flow_from_directory(
directory = '.\\Val',
color_mode = 'rgb',
class_mode = 'binary',
classes = ['Normal', 'Abnormal'],
batch_size = batch_size,
target_size = (224,224),
follow_links = True,
)
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
model_name = "Imagenet DenseNet121 on NIH full dataset 375 locked brightness flip.h5"
callback_checkpoint = [
EarlyStopping(monitor = 'val_loss', patience = 10, verbose = 1),
ModelCheckpoint(model_name,
verbose = 1,
monitor = 'val_loss',
save_best_only = True,
)
]
model.compile(
optimizer = Adam(),
#optimizer = SGD(learning_rate = 0.001, momentum = 0.9, decay = 0.0001),
loss = 'binary_crossentropy',
metrics = ['binary_accuracy'],
)
history = model.fit(
train_generator,
steps_per_epoch=1250,
epochs=50,
validation_data=val_generator,
validation_steps=437,
callbacks = [callback_checkpoint],
)
`os.path.islink((os.path.join(os.getcwd(),
"Train",
"Normal",
"00017275_014.png")))
True`
At least for pathlib.Path the combined notation with dot and double backslash is not valid. I guess this is the problem here also. Try using forward slashes. Instead of directory = ".\\Val" try
directory = "./Val"
or simply
directory = "Val"

How do I solve this problem during transfer learning ResNet-50?

I am training Resnet-50 to classify 9 classes. I am using following code, transfer learning, to train the model.
Train and test loss and accuracy seem to be fine but when I am testing network against new images I see lots of mistakes.
I feel like that the model is not learning well, I was wondering if you please let me know what is wrong in my approach? How do I solve this problem?
NUM_CLASSES = 9
CHANNELS = 3
IMAGE_RESIZE = 224
RESNET50_POOLING_AVERAGE = 'avg'
DENSE_LAYER_ACTIVATION = 'softmax'
OBJECTIVE_FUNCTION = 'categorical_crossentropy'
LOSS_METRICS = ['accuracy']
NUM_EPOCHS = 100
EARLY_STOP_PATIENCE = 3
STEPS_PER_EPOCH_TRAINING = 10
STEPS_PER_EPOCH_VALIDATION = 10
BATCH_SIZE_TRAINING = 100
BATCH_SIZE_VALIDATION = 100
BATCH_SIZE_TESTING = 1
from tensorflow.python.keras.applications import ResNet50
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.layers import Dense
resnet_weights_path = '/path/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'
model = Sequential()
model.add(ResNet50(include_top = False, pooling = RESNET50_POOLING_AVERAGE, weights = resnet_weights_path))
model.add(Dense(NUM_CLASSES, activation = DENSE_LAYER_ACTIVATION))
model.layers[0].trainable = False
from tensorflow.python.keras import optimizers
sgd = optimizers.SGD(lr = 0.01, decay = 1e-6, momentum = 0.9, nesterov = True)
model.compile(optimizer = sgd, loss = OBJECTIVE_FUNCTION, metrics = LOSS_METRICS)
model.summary()
from tensorflow.python.keras import optimizers
sgd = optimizers.SGD(lr = 0.01, decay = 1e-6, momentum = 0.9, nesterov = True)
model.compile(optimizer = sgd, loss = OBJECTIVE_FUNCTION, metrics = LOSS_METRICS)
from keras.applications.resnet50 import preprocess_input
from keras.preprocessing.image import ImageDataGenerator
image_size = IMAGE_RESIZE
data_generator = ImageDataGenerator(preprocessing_function=preprocess_input)
train_generator = data_generator.flow_from_directory(
'/path train folder/train',
target_size=(image_size, image_size),
batch_size=BATCH_SIZE_TRAINING,
class_mode='categorical')
validation_generator = data_generator.flow_from_directory(
'/path test folder/test',
target_size=(image_size, image_size),
batch_size=BATCH_SIZE_VALIDATION,
class_mode='categorical')
(BATCH_SIZE_TRAINING, len(train_generator), BATCH_SIZE_VALIDATION, len(validation_generator))
from tensorflow.python.keras.callbacks import EarlyStopping, ModelCheckpoint
cb_early_stopper = EarlyStopping(monitor = 'val_loss', patience = EARLY_STOP_PATIENCE)
cb_checkpointer = ModelCheckpoint(filepath = '/path/best.hdf5', monitor = 'val_loss', save_best_only = True, mode = 'auto')
fit_history = model.fit_generator(
train_generator,
steps_per_epoch=STEPS_PER_EPOCH_TRAINING,
epochs = NUM_EPOCHS,
validation_data=validation_generator,
validation_steps=STEPS_PER_EPOCH_VALIDATION,
callbacks=[cb_checkpointer, cb_early_stopper]
)
model.load_weights("/path/best.hdf5")
model.save('transfer_resnet.h5')
print(fit_history.history.keys())
This can have many reasons.
For one, it is possible that your data set is too small or not varied enough.
What you can try is to add a few more Dense Layers in the top section.

TypeError: flow() missing 1 required positional argument: 'x'

I tried to run this code but I'm still stuck.
In this code I use a pretrained neural resnet50 and I tried to extract a deep feature and predict my classes.
Please, if anyone had this error, let me know how I can fix it ?
Thanks
NUM_CLASSES = 2
CHANNELS = 3
IMAGE_RESIZE = 224
RESNET50_POOLING_AVERAGE = 'avg'
DENSE_LAYER_ACTIVATION = 'softmax'
OBJECTIVE_FUNCTION = 'binary_crossentropy'
LOSS_METRICS = ['accuracy']
NUM_EPOCHS = 10
EARLY_STOP_PATIENCE = 3
STEPS_PER_EPOCH_TRAINING = 10
STEPS_PER_EPOCH_VALIDATION = 10
batch_size = 32
from keras.models import load_model
BATCH_SIZE_TRAINING = 100
BATCH_SIZE_VALIDATION = 100
image_size = IMAGE_RESIZE
WEIGHTS_PATH = "C:\\Users\\Desktop\\RESNET \\resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5"
model = Sequential()
train_data_dir = "C:\\Users\\Desktop\\RESNET"
model = ResNet50(include_top=True, weights='imagenet')
model.layers.pop()
model = Model(input=model.input,output=model.layers[-1].output)
model.summary()
model.compile(loss='binary_crossentropy', optimizer=SGD(lr=0.01, momentum=0.9), metrics=['binary_accuracy'])
data_dir = "C:\\Users\\Desktop\\RESNET"
data_generator = ImageDataGenerator(preprocessing_function=preprocess_input)
train_datagenerator = ImageDataGenerator(rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
validation_split=0.2)
train_generator = train_datagenerator.flow_from_directory(
train_data_dir,
target_size=(image_size, image_size),
batch_size=BATCH_SIZE_TRAINING,
class_mode='categorical', shuffle=False, subset='training') # set as training data
validation_generator = train_datagenerator.flow_from_directory(
train_data_dir, # same directory as training data kifkif
target_size=(image_size, image_size),
batch_size=BATCH_SIZE_TRAINING,
class_mode='categorical', shuffle=False, subset='validation') # set as validation data
generator = data_generator.flow(batch_size=batch_size)
batch_size = 32
X_train = np.zeros((len(train_generator.images_ids_in_subset),2048))
Y_train = np.zeros((len(train_generator.images_ids_in_subset),2))
nb_batches = int(len(train_generator.images_ids_in_subset) / batch_size) + 1
Let me know if you have any issue of this problem
Thanks for your help
Delete this line
generator = data_generator.flow(batch_size=batch_size)
It does nothing if your code ends there.
The flow method is for transform the already in the ram data but your code doesn't have that.

ValueError: Error when checking target: expected flatten_1 to have shape (2048,) but got array with shape (2,)

I'm trying to run this code, and I have this error:
ValueError: Error when checking target: expected flatten_4 to have shape (2048,) but got array with shape (2,)
NUM_CLASSES = 2
CHANNELS = 3
IMAGE_RESIZE = 224
RESNET50_POOLING_AVERAGE = 'avg'
DENSE_LAYER_ACTIVATION = 'softmax'
OBJECTIVE_FUNCTION = 'categorical_crossentropy'
NUM_EPOCHS = 10
EARLY_STOP_PATIENCE = 3
STEPS_PER_EPOCH_TRAINING = 10
STEPS_PER_EPOCH_VALIDATION = 10
BATCH_SIZE_TRAINING = 100
BATCH_SIZE_VALIDATION = 100
BATCH_SIZE_TESTING = 1
resnet_weights_path = '../input/resnet50/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'
model = Sequential()
train_data_dir = "C:\\Users\\Desktop\\RESNET"
model = ResNet50(include_top=True, weights='imagenet')
model.layers.pop()
model = Model(input=model.input,output=model.layers[-1].output)
model.summary()
sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='binary_crossentropy', optimizer=SGD(lr=0.01, momentum=0.9), metrics= ['binary_accuracy'])
data_dir = "C:\\Users\\Desktop\\RESNET"
batch_size = 32
from keras.applications.resnet50 import preprocess_input
from keras.preprocessing.image import ImageDataGenerator
image_size = IMAGE_RESIZE
data_generator = ImageDataGenerator(preprocessing_function=preprocess_input)
def append_ext(fn):
return fn+".jpg"
from os import listdir
from os.path import isfile, join
dir_path = os.path.dirname(os.path.realpath(__file__))
train_dir_path = dir_path + '\data'
onlyfiles = [f for f in listdir(dir_path) if isfile(join(dir_path, f))]
data_labels = [0, 1]
t = []
maxi = 25145
LieOffset = 15799
i = 0
while i < maxi: # t = tuple
if i <= LieOffset:
t.append(label['Lie'])
else:
t.append(label['Truth'])
i = i+1
train_datagenerator = ImageDataGenerator(rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
validation_split=0.2)
train_generator = train_datagenerator.flow_from_directory(
train_data_dir,
target_size=(image_size, image_size),
batch_size=BATCH_SIZE_TRAINING,
class_mode='categorical', shuffle=False, subset='training')
validation_generator = train_datagenerator.flow_from_directory(
train_data_dir, # same directory as training data kifkif
target_size=(image_size, image_size),
batch_size=BATCH_SIZE_TRAINING,
class_mode='categorical', shuffle=False, subset='validation')
(BATCH_SIZE_TRAINING, len(train_generator), BATCH_SIZE_VALIDATION, len(validation_generator))
from tensorflow.python.keras.callbacks import EarlyStopping, ModelCheckpoint
cb_early_stopper = EarlyStopping(monitor = 'val_loss', patience = EARLY_STOP_PATIENCE)
cb_checkpointer = ModelCheckpoint(filepath = '../working/best.hdf5', monitor = 'val_loss', save_best_only = True, mode = 'auto')
from sklearn.grid_search import ParameterGrid
param_grid = {'epochs': [5, 10, 15], 'steps_per_epoch' : [10, 20, 50]}
grid = ParameterGrid(param_grid)
val_loss as final model
for params in grid:
print(params)
fit_history = model.fit_generator(
train_generator,
steps_per_epoch=STEPS_PER_EPOCH_TRAINING,
epochs = NUM_EPOCHS,
validation_data=validation_generator,
validation_steps=STEPS_PER_EPOCH_VALIDATION,
callbacks=[cb_checkpointer, cb_early_stopper])
model.load_weights("../working/best.hdf5")
The error suggests that your models output layer should have 2 nodes whereas you have 2048 as you are using the output of avg_pool layer of ResNet50 model as your model output. So, you can add a Dense layer having 2 nodes on top of the avg_pool layer to solve the problem.
model = ResNet50(include_top=True, weights='imagenet')
print(model.summary())
x = model.get_layer('avg_pool').output
predictions = Dense(2, activation='sigmoid')(x)
model = Model(input = model.input, output = predictions)
print(model.summary())
As I'm not quite sure about what type of problem you are solving, i assumed that multilabel (2) classification as your data label shape is (2,).
However, if you are solving a binary classification problem then you need to change your label so that it's either 1 or 0. So, Change class_mode='categorical' to class_mode='binary' in both train_generator and validation_generator. In that case the model output layer should have 1 node.
predictions = Dense(1, activation='sigmoid')(x)

shape mismatch model InceptionResNetV2 & weigts

I am using InceptionResNetV2 for image classification & using repective weight. But get error :
ValueError: You are trying to load a weight file containing 449 layers into a model with 448 layers.
img_ht = 96
img_wid = 96
img_chnl = 3
import tensorflow as tf
from tensorflow import keras
from keras_preprocessing.image import ImageDataGenerator
train_generator = train_datagen.flow_from_directory(
directory = "../input/cassava-disease/train/train/",
subset="training",
batch_size = 49,
seed=42,
shuffle=False,
class_mode="categorical",
target_size=(img_ht, img_wid))
valid_generator = train_datagen.flow_from_directory(
directory = "../input/cassava-disease/train/train/",
subset="validation",
batch_size=49,
seed=42,
shuffle=False,
class_mode="categorical",
target_size = (img_ht, img_wid))
from keras.applications import InceptionResNetV2 as InceptionResNetV2
base_model = keras.applications.InceptionResNetV2(input_shape=(img_ht, img_wid, 3),
include_top = False,
weights = "../input/inception/inception_resnet_v2_weights_tf_dim_ordering_tf_kernels.h5")
base_model.trainable = False
print(base_model.summary())
Got the answer. It's because of line --> include_top = False.
Quite new to python & Machine Learning

Categories