My image folder is set up as one main folder with 130 separate folders, each folder with its own images
folder_with_130_folders-
folder1_class1-
img_in_class1_folder.jpg
img_in_class1_folder.jpg
...
folder130_class130-
img_in_class130_folder.jpg
img_in_class130_folder.jpg
train_dataset = prod_images.flow_from_directory(directory, target_size=(225, 225), class_mode='categorical', subset='training', save_format='jpg')
validation_set = prod_images.flow_from_directory(directory, target_size=(225, 225), class_mode='categorical', subset='validation', save_format='jpg')
(x_train, y_train), (x_test, y_test) = train_dataset.next(), validation_set.next()
model = models.Sequential()
model.add(layers.Conv2D(filters=128, kernel_size=2, padding='same', activation='relu', input_shape=(225, 225, 3)))
model.add(layers.MaxPooling2D(pool_size=2))
model.add(layers.Dropout(0.3))
model.add(layers.Conv2D(filters=64, kernel_size=2, padding='same', activation='relu'))
model.add(layers.MaxPooling2D(pool_size=2))
model.add(layers.Dropout(0.3))
model.add(layers.Conv2D(filters=32, kernel_size=2, padding='same', activation='relu'))
model.add(layers.MaxPooling2D(pool_size=2))
model.add(layers.Dropout(0.3))
model.add(layers.Conv2D(filters=16, kernel_size=2, padding='same', activation='relu'))
model.add(layers.MaxPooling2D(pool_size=2))
model.add(layers.Dropout(0.3))
model.add(layers.Flatten())
model.add(layers.Dense(256, activation=None)) # No activation on final dense layer
model.add(layers.Lambda(lambda x: tf.math.l2_normalize(x, axis=1))) # L2 normalize embeddings
model.summary()
model.compile(optimizer = tf.keras.optimizers.Adam(learning_rate=0.001), loss=tfa.losses.TripletSemiHardLoss())
model_fit = model.fit(train_dataset, steps_per_epoch=4, epochs=20, verbose=1, validation_data = validation_set)```
As stated in the docs regarding the tfa.losses.TripletSemiHardLoss:
We expect labels y_true to be provided as 1-D integer Tensor with
shape [batch_size] of multi-class integer labels. And embeddings
y_pred must be 2-D float Tensor of l2 normalized embedding vectors
You should, therefore, use sparse integer labels (sparse_categorical) instead of one-hot encoded labels (categorical). Here is a working example:
import tensorflow as tf
import tensorflow_addons as tfa
import pathlib
dataset_url = "https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz"
data_dir = tf.keras.utils.get_file('flower_photos', origin=dataset_url, untar=True)
data_dir = pathlib.Path(data_dir)
batch_size = 32
train_ds = tf.keras.utils.image_dataset_from_directory(
data_dir,
validation_split=0.2,
label_mode='int', # sparse categorical
subset="training",
seed=123,
image_size=(225, 225),
batch_size=batch_size)
model = tf.keras.Sequential()
model.add(tf.keras.layers.Conv2D(filters=128, kernel_size=2, padding='same', activation='relu', input_shape=(225, 225, 3)))
model.add(tf.keras.layers.MaxPooling2D(pool_size=2))
model.add(tf.keras.layers.Dropout(0.3))
model.add(tf.keras.layers.Conv2D(filters=64, kernel_size=2, padding='same', activation='relu'))
model.add(tf.keras.layers.MaxPooling2D(pool_size=2))
model.add(tf.keras.layers.Dropout(0.3))
model.add(tf.keras.layers.Conv2D(filters=32, kernel_size=2, padding='same', activation='relu'))
model.add(tf.keras.layers.MaxPooling2D(pool_size=2))
model.add(tf.keras.layers.Dropout(0.3))
model.add(tf.keras.layers.Conv2D(filters=16, kernel_size=2, padding='same', activation='relu'))
model.add(tf.keras.layers.MaxPooling2D(pool_size=2))
model.add(tf.keras.layers.Dropout(0.3))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(256, activation=None)) # No activation on final dense layer
model.add(tf.keras.layers.Lambda(lambda x: tf.math.l2_normalize(x, axis=-1))) # L2 normalize embeddings
model.summary()
model.compile(optimizer = tf.keras.optimizers.Adam(learning_rate=0.001), loss=tfa.losses.TripletSemiHardLoss())
model_fit = model.fit(train_ds, epochs=5, verbose=1)
In your case you have to set the parameter class_mode to sparse:
flow_from_directory(directory, target_size=(225, 225), class_mode='sparse', subset='training', save_format='jpg')
Related
++ The fit_generator has been modified to fit.
The total number of DataSets is 12,507, True is 6,840 and False is 7,056.
The Data Set configuration is the same.
The model is the same.
A Model is :
model = Sequential()
model.add(Conv2D(filters=32, kernel_size=(3, 3), strides=(1, 1), activation='relu', padding='same', input_shape=(192, 112, 1)))
model.add(Conv2D(filters=32, kernel_size=(3, 3), strides=(1, 1), activation='relu', padding='same'))
model.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
.
.
.
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(2, activation='softmax'))
model.summary()
model.compile(optimizer=tf.keras.optimizers.Adam(), loss=tf.keras.losses.categorical_crossentropy, metrics=['accuracy'])
history = model.fit(train_X, train_Y, epochs=15, batch_size=64, validation_split=0.2, verbose=2)
The accuracy when using fit is close to 100%.
B Modeil is :
train_datagen = ImageDataGenerator(rescale=1./255)
val_datagen = ImageDataGenerator(rescale=1./255)
test_datagen = ImageDataGenerator(rescale=1./255)
train_gen = train_datagen.flow_from_directory(
TRAIN_PATH,
target_size=(192, 112),
classes=['true', 'false'],
class_mode='categorical',
batch_size=64,
color_mode='grayscale',
shuffle=True)
val_gen = val_datagen.flow_from_directory(
VAL_PATH,
target_size=(192, 112),
classes=['true', 'false'],
class_mode='categorical',
batch_size=64,
color_mode='grayscale',
shuffle=False)
test_gen = val_datagen.flow_from_directory(
VAL_PATH,
target_size=(192, 112),
classes=['true', 'false'],
class_mode='categorical',
batch_size=64,
color_mode='grayscale',
shuffle=False)
model = Sequential()
model.add(Conv2D(filters=32, kernel_size=(3, 3), strides=(1, 1), activation='relu', padding='same', input_shape=(192, 112, 1)))
model.add(Conv2D(filters=32, kernel_size=(3, 3), strides=(1, 1), activation='relu', padding='same'))
model.add(MaxPooling2D(pool_size=(2, 2), padding='same'))
.
.
.
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(2, activation='softmax'))
model.compile(optimizer=tf.keras.optimizers.Adam(),
loss=tf.keras.losses.categorical_crossentropy,
metrics=['accuracy'])
model.summary()
history = model.fit(
train_gen,
validation_data=val_gen,
epochs=15,
steps_per_epoch=len(train_gen)//64, # 64 is the batch_size
validation_steps=len(val_gen)//64,
verbose=2)
model.evaluate(test_gen,
batch_size=64,
verbose=2)
In this case, the accuracy is close to 50%.
Isn't A model and B model the same way?
Why do other precisions come out?
++
Here's how to load data from the A model:
true_Data_list = np.array(os.listdir(TRUE_DIR))
false_Data_list = np.array(os.listdir(FALSE_DIR))
# -------------------------------- Load True Set ----------------------------------------- #
for index in range(len(true_Data_list)): # 이미지 폴더 리스트 만들기
path_true = os.path.join(TRUE_DIR, true_Data_list[index])
image_true = ImageOps.grayscale(Image.open(path_true)) # True 이미지
image_true = np.reshape(np.asarray(image_true), (192, 112, 1)).astype(np.float32)
data_X.append([np.array(image_true)])
data_Y.append([1, 0])
Load False Set is repeated in the same way.
Then I will reshape and split.
data_X = np.reshape(data_X, (-1, 192, 112, 1)).astype(np.float32)
data_Y = np.reshape(data_Y, (-1, 2)).astype(np.int8)
train_X, test_X, train_Y, test_Y = train_test_split(data_X, data_Y, test_size=0.25, shuffle=True, random_state=625)
In the case of B model
TRAIN_PATH = 'dataset/train'
VAL_PATH = 'dataset/val'
TEST_PATH = 'dataset/test'
The created PATH will now be in
train_gen = train_datagen.flow_from_directory(TRAIN_PATH, ...
with each PATH having true and false folders
The photo is to be output via verbose = 2 in 1 epoch.
enter image description here
fit_generator is deprecated. Although they should give the slightly same results. You have a typo(?) I think,
train_batch_size = len(train_X) // 64
test_batch_size = len(test_X) // 64
They supposed to be the steps_per_epoch, while fitting you set them as batch_size. I am not sure whether you augmented data in both cases but in the first approach you use a high batch size. The data points you see in an epoch is different in both cases. Second approach seems more reliable, you can use fit() with generators also.
I was doing a classification machine learning with an input of (700,50,34) (batch, step,features)
def convLSTM_model(X_train, y_train, X_test, y_test, num_classes,loss, batch_size=68, units=128, learning_rate=0.005,
epochs=20, dropout=0.2, recurrent_dropout=0.2):
class myCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if (logs.get('acc') > 0.9):
print("\nReached 90% accuracy so cancelling training!")
self.model.stop_training = True
callbacks = myCallback()
model = tf.keras.models.Sequential()
model.add(Masking(mask_value=0.0, input_shape=(None,X_train.shape[0],X_train.shape[1], X_train.shape[2])))
model.add(ConvLSTM2D(filters=40, kernel_size=(3, 3), padding="same", return_sequences=True))
model.add(BatchNormalization())
model.add(Bidirectional(LSTM(units, dropout=dropout, recurrent_dropout=recurrent_dropout, return_sequences=True)))
model.add(Bidirectional(LSTM(units, dropout=dropout, recurrent_dropout=recurrent_dropout, return_sequences=True)))
model.add(Bidirectional(LSTM(units, dropout=dropout, recurrent_dropout=recurrent_dropout)))
model.add(Dense(30, activation='relu'))
model.add(Dense(10, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
adamopt = tf.keras.optimizers.Adam(lr=learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-8)
model.compile(loss=loss,
optimizer=adamopt,
metrics=['accuracy'])
history = model.fit(X_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(X_test, y_test),
verbose=1,
callbacks=[callbacks])
score, acc = model.evaluate(X_test, y_test,
batch_size=batch_size)
yhat = model.predict(X_test)
return history, that
Apparently, changing the input_shape and simply adding
model.add(ConvLSTM2D(filters=40, kernel_size=(3, 3), padding="same", return_sequences=True))
model.add(BatchNormalization())
does not work.
ValueError: Dimension 1 in both shapes must be equal, but are 708 and 501264. Shapes are [?,708,50,40] and [?,501264,2500,40]. for 'conv_lst_m2d/while/Select' (op: 'Select') with input shapes: [?,501264,2500,40], [?,708,50,40], [?,708,50,40].
How should I approach?
Is there any suggestion on the number of filter?
Try this one, I believe the CNN layer should be implemented before the masking layer.
model = tf.keras.models.Sequential()
model.add(Conv1D(filters=32, kernel_size=8, strides=1, activation="relu", padding="same",input_shape=(X_train.shape[1], X_train.shape[2])))
model.add(MaxPooling1D(pool_size = 2))
model.add(Conv1D(filters=16, kernel_size=8, strides=1, activation="relu", padding="same"))
model.add(MaxPooling1D(pool_size = 2))
model.add(Masking(mask_value=0.0))
model.add(LSTM(units, dropout=dropout, recurrent_dropout=recurrent_dropout, return_sequences=True))
model.add(Bidirectional(LSTM(units, dropout=dropout, recurrent_dropout=recurrent_dropout, return_sequences=True)))
model.add(Bidirectional(LSTM(units, dropout=dropout, recurrent_dropout=recurrent_dropout, return_sequences=True)))
model.add(Bidirectional(LSTM(units, dropout=dropout, recurrent_dropout=recurrent_dropout)))
model.add(Dense(30, activation='relu'))
model.add(Dense(10, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
I now tried to use your code. You will get better anwers at stackoverflow when you supply a minimum working example. I build some test data with the same shape that you used.
When usind an LSTMConv2D I could reproduce your problem. But since your Data just doesn´t fit to the Conv2D Layer and substituted this with a normal LSTM Layer.
If instead you want to convolute the timestep I would suggest using an CONV1D Layer first and LSTM afterwards.
Minimum working example:
import tensorflow as tf
import numpy as np
from tensorflow.python.keras.layers import Masking, ConvLSTM2D, LSTM, Bidirectional, Dense
from tensorflow.python.keras.layers.normalization_v2 import BatchNormalization
batch_size=68
units=128
learning_rate=0.005
epochs=20
dropout=0.2
recurrent_dropout=0.2
X_train = np.random.rand(700, 50,34)
y_train = np.random.choice([0, 1], 700)
X_test = np.random.rand(100, 50, 34)
y_test = np.random.choice([0, 1], 100)
loss = tf.losses.binary_crossentropy
model = tf.keras.models.Sequential()
model.add(Masking(mask_value=0.0, input_shape=(X_train.shape[1], X_train.shape[2])))
# uncomment the line beneath for convolution
# model.add(Conv1D(filters=32, kernel_size=8, strides=1, activation="relu", padding="same"))
model.add(LSTM(units, dropout=dropout, recurrent_dropout=recurrent_dropout, return_sequences=True))
model.add(BatchNormalization())
model.add(Bidirectional(LSTM(units, dropout=dropout, recurrent_dropout=recurrent_dropout, return_sequences=True)))
model.add(Bidirectional(LSTM(units, dropout=dropout, recurrent_dropout=recurrent_dropout, return_sequences=True)))
model.add(Bidirectional(LSTM(units, dropout=dropout, recurrent_dropout=recurrent_dropout)))
model.add(Dense(30, activation='relu'))
model.add(Dense(10, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
adamopt = tf.keras.optimizers.Adam(lr=learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-8)
model.compile(loss=loss,
optimizer=adamopt,
metrics=['accuracy'])
history = model.fit(X_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(X_test, y_test),
verbose=1)
score, acc = model.evaluate(X_test, y_test,
batch_size=batch_size)
yhat = model.predict(X_test)
I've build my model, but do not know how to fit it. Could anyone give me some tip so I can use ImageDataGenerator in my models while working with images, or it is better to use other ways like using Dataset?
import tensorflow as tf
from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D
import numpy as np
import matplotlib.pyplot as plt
import cv2
import os
# const
IMG_HEIGHT = 150
IMG_WIDTH = 150
BATCH = 32
EPOCHS = 5
train_dir = "data/images/train"
val_dir = "data/images/val"
# train image data generator
train_generator = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1./255,
horizontal_flip=True,
dtype=tf.float32
)
train_generator.flow_from_directory(
directory=train_dir,
target_size=(IMG_WIDTH, IMG_HEIGHT)
)
# validation image data generator
val_generator = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1./255,
horizontal_flip=False
)
val_generator.flow_from_directory(
directory = val_dir,
target_size=(IMG_WIDTH, IMG_HEIGHT)
)
# count train cats & dogs
train_cats_len = len(os.listdir(os.path.join(train_dir, "cats")))
train_dogs_len = len(os.listdir(os.path.join(train_dir, "dogs")))
train_len = train_cats_len + train_dogs_len
# count validation cats & dogs
val_cats_len = len(os.listdir(os.path.join(val_dir, "cats")))
val_dogs_len = len(os.listdir(os.path.join(val_dir, "dogs")))
val_len = val_cats_len + val_dogs_len
# build a model
model = tf.keras.Sequential([
Conv2D(16, 3, padding='same', activation='relu', input_shape=(IMG_HEIGHT, IMG_WIDTH , 3)),
MaxPooling2D(),
Dropout(0.2),
Flatten(),
Dense(128, activation='relu'),
Dense(64, activation='relu'),
Dense(2, activation='sigmoid')
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
# fit?
# history = model.fit_generator(
# train_generator,
# steps_per_epoch=train_len // BATCH,
# epochs=EPOCHS,
# validation_data=val_generator,
# validation_steps=val_len // BATCH,
# verbose=True
# )
# raises error:
# ValueError: Failed to find data adapter that can handle input: <class 'tensorflow.python.keras.preprocessing.image.ImageDataGenerator'>, <class 'NoneType'>
My directory architecture:
data-
|-images-
|-train-
|-cats
|-dogs
|-val-
|-cats
|-dogs
PS:
I found article where same method used and everything seems to work, but not in my case
your problem is you have the code
train_generator.flow_from_directory(
directory=train_dir,
target_size=(IMG_WIDTH, IMG_HEIGHT)
You need to change that to
train_generator=train_generator.flow_from_directory( directory=train_dir,
target_size=(IMG_WIDTH, IMG_HEIGHT)
do the same for the val_generator. In addition the default class_mode for the ImageDataGenerator is "categorical". Therefore in model.compile you should specify the loss as 'categorical_crossentropy'. In your model layer with the 2 nodes the activation function should be 'softmax'. As an aside I think your model may not perform very well as it may be a bit to simple to process the features of the data. I suggest adding several more convolutional layers with more filters. An example for a more complex model is shown below
model = tf.keras.Sequential([
Conv2D(16, 3, padding='same', activation='relu', input_shape=(IMG_HEIGHT, IMG_WIDTH , 3)),
MaxPooling2D(),
Conv2D(32, 3, padding='same', activation='relu', input_shape=(IMG_HEIGHT, IMG_WIDTH , 3)),
MaxPooling2D(),
Conv2D(64, 3, padding='same', activation='relu', input_shape=(IMG_HEIGHT, IMG_WIDTH , 3)),
MaxPooling2D(),
Conv2D(128, 3, padding='same', activation='relu', input_shape=(IMG_HEIGHT, IMG_WIDTH , 3)),
MaxPooling2D(),
Flatten(),
Dense(128, activation='relu'),
Dropout(.3),
Dense(64, activation='relu'),
Dropout(.3),
Dense(2, activation='softmax')
])
history = model.fit(train_generator,
validation_data=validation_generator,
steps_per_epoch=100,
epochs=15,
validation_steps=50,
verbose=2)
You can follow the example at the colab
im working on a multi class image classification problem in keras. Using the dog-breeds dataset on kaggle. My accuracy for 12 breeds is 95% yet, my validation accuracy is only 50%. It looks like the model is overfitting, but im not sure what i would need to do to prevent overfitting
Here's my basic training setup
from keras.utils.np_utils import to_categorical
from keras.layers import Conv2D, Activation, MaxPooling2D
from keras import optimizers
from keras.layers.normalization import BatchNormalization
img_width, img_height = 224, 224
datagen_top = ImageDataGenerator(
rotation_range=180,
width_shift_range=0.2,
height_shift_range=0.2,
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
generator_top = datagen_top.flow_from_directory(
train_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='categorical',
shuffle=False)
nb_train_samples = len(generator_top.filenames)
num_classes = len(generator_top.class_indices)
train_data = bottleneck_features_train
# get the class lebels for the training data, in the original order
train_labels = generator_top.classes
# https://github.com/fchollet/keras/issues/3467
# convert the training labels to categorical vectors
train_labels = to_categorical(train_labels, num_classes=num_classes)
generator_top = datagen_top.flow_from_directory(
validation_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode=None,
shuffle=False)
nb_validation_samples = len(generator_top.filenames)
validation_data = bottleneck_features_validation
validation_labels = generator_top.classes
validation_labels = to_categorical(
validation_labels, num_classes=num_classes)
input_shape = train_data.shape[1:]
model = Sequential()
model.add(Flatten(input_shape=input_shape))
model.add(Dense(num_classes, activation='softmax'))
model.compile(optimizer=optimizers.RMSprop(lr=2e-4),
loss='categorical_crossentropy', metrics=['accuracy'])
history = model.fit(train_data, train_labels,
epochs=epochs,
batch_size=batch_size,
callbacks=[],
validation_data=(validation_data, validation_labels))
model.save_weights(top_model_weights_path)
(eval_loss, eval_accuracy) = model.evaluate(
validation_data, validation_labels, batch_size=batch_size, verbose=1)
notebook is on colab.
https://colab.research.google.com/drive/13RzXpxE-yMEuMFPHnmBpzD1gFXWxVyXK
A single layer network isn't gonna fly with an image classification problem. The network will never be able to generalize because there is no opportunity to. Try expanding the network with a few more layers and maybe try a CNN.
Example:
model = Sequential()
model.add(Conv2D(32, (3, 3), padding='same',
activation='relu',
input_shape=input_shape))
model.add(Conv2D(32, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), padding='same', activation='relu'))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=optimizers.RMSprop(),
metrics=['accuracy'])
This usually happens when you have too many layers and the resulting dimensionality (after striding and pooling) is lower than the minimum input size (convolutional kernel) of a subsequent layer.
Which is the image size of the dog-breeds data?
Have you made sure that the reshaping works correctly?
The code shown below giving me value error
model = Sequential()
model.add(Conv2D(32, (8,8),
padding='valid',
strides=1,
activation="relu", input_shape = (256,256,3)))
The error I'm getting is
ValueError: `Layer conv2d_37 was called with an input that isn't a symbolic tensor.` Received type: <class 'tensorflow.python.framework.ops.Tensor'>. Full input: [<tf.Tensor 'conv2d_37_input:0' shape=(?, 256, 256, 3) dtype=float32>]. All inputs to the layer should be tensors.
Here I'm using Tensorflow 1.2.1 version and keras 2.1.5, and while running the main, I'm getting this error here.
Guys, please help with this.
The complete code is below
def cnn_model(X_train, y_train, kernel_size, nb_filters, channels, nb_epoch, batch_size, nb_classes, nb_gpus):
model = Sequential()
model.add(Conv2D(nb_filters, (kernel_size[0], kernel_size[1]),
padding='valid',
strides=1,
input_shape=(img_rows, img_cols, channels), activation="relu"))
model.add(Conv2D(nb_filters, (kernel_size[0], kernel_size[1]), activation="relu"))
model.add(Conv2D(nb_filters, (kernel_size[0], kernel_size[1]), activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
print("Model flattened out to: ", model.output_shape)
model.add(Dense(128))
model.add(Activation('sigmoid'))
model.add(Dropout(0.25))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
model = multi_gpu_model(model, gpus=nb_gpus)
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
stop = EarlyStopping(monitor='val_acc',
min_delta=0.001,
patience=2,
verbose=0,
mode='auto')
tensor_board = TensorBoard(log_dir='./Graph', histogram_freq=0, write_graph=True, write_images=True)
model.fit(X_train, y_train, batch_size=batch_size, epochs=nb_epoch,
verbose=1,
validation_split=0.2,
class_weight='auto',
callbacks=[stop, tensor_board])
return model
Thank you