model.fit_generator : 'tuple' object cannot be interpreted as an integer - python

I was doing dog vs cat classification using deep learning. When I am fitting the model using fit generator, the following error is coming.:
'tuple' object cannot be interpreted as an integer
I don't know where I am doing wrong! My full code is below.
I was following the tutorial from https://data-flair.training/blogs/cats-dogs-classification-deep-learning-project-beginners/ the code is also same. but I am getting the error!
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from keras.models import Sequential
from keras.layers import Conv2D,MaxPooling2D,Dropout,Flatten,Dense,Activation,BatchNormalization
model=Sequential()
model.add(keras.Input(shape=(128,128,3)))
model.add(layers.Conv2D(32, 3, activation="relu"))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(layers.Conv2D(64,3,activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(layers.Conv2D(128,3,activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512,activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(2,activation='sigmoid'))
model.compile(loss='binary_crossentropy',optimizer='rmsprop',metrics=['accuracy'])
train_df, validate_df = train_test_split(df, test_size = 0.2, random_state = 42)
train_df = train_df.reset_index(drop=True)
validate_df = validate_df.reset_index(drop=True)
from keras.callbacks import EarlyStopping,ReduceLROnPlateau
earlystop=EarlyStopping(patience=10)
learning_rate_reduction=ReduceLROnPlateau(monitor='val_acc',patience=2,verbose=1,factor=0.5,min_lr=0.00001)
callbacks=[earlystop,learning_rate_reduction]
train_datagen = ImageDataGenerator(rotation_range=15,
rescale=1./255,
shear_range=0.1,
zoom_range=0.2,
horizontal_flip=True,
width_shift_range=0.1,
height_shift_range=0.1
)
train_generator = train_datagen.flow_from_dataframe(train_df,
"/content/drive/MyDrive/Cat_Dog/dogs-vs-cats/train/train/",x_col='filename',y_col='category',
target_size=Image_Size,
class_mode='categorical',
batch_size=batch_size)
validation_datagen = ImageDataGenerator(rescale=1./255)
validation_generator = validation_datagen.flow_from_dataframe(
validate_df,
"/content/drive/MyDrive/Cat_Dog/dogs-vs-cats/train/train/",
x_col='filename',
y_col='category',
target_size=Image_Size,
class_mode='categorical',
batch_size=batch_size
)
test_datagen = ImageDataGenerator(rotation_range=15,
rescale=1./255,
shear_range=0.1,
zoom_range=0.2,
horizontal_flip=True,
width_shift_range=0.1,
height_shift_range=0.1)
test_generator = train_datagen.flow_from_dataframe(train_df,
"/content/drive/MyDrive/Cat_Dog/dogs-vs-cats/test1",x_col='filename',y_col='category',
target_size=Image_Size,
class_mode='categorical',
batch_size=batch_size)
df["category"] = df["category"].replace({0:'cat',1:'dog'})
train_df,validate_df = train_test_split(df,test_size=0.20,
random_state=42)
train_df = train_df.reset_index(drop=True)
validate_df = validate_df.reset_index(drop=True)
total_train=train_df.shape[0]
total_validate=validate_df.shape[0]
batch_size=15
epochs=10
history = model.fit_generator(
train_generator,
epochs=epochs,
validation_data=validation_generator,
validation_steps=total_validate//batch_size,
steps_per_epoch=total_train//batch_size,
callbacks=callbacks
)

Not sure if this is the cause but there is one issue I notice. The cat-vs-dog dataset is a kinda binary classification problem. The set-up should be either
model.add(Dense(1,activation='sigmoid'))
model.compile(loss='binary_crossentropy',optimizer='rmsprop',metrics=['accuracy'])
train_generator = train_datagen.flow_from_dataframe(...
class_mode='binary'
)
validation_generator = validation_datagen.flow_from_dataframe(
....
class_mode='binary',
...
)
test_generator = train_datagen.flow_from_dataframe(
...
class_mode='binary'
)
or,
model.add(Dense(2,activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',metrics=['accuracy'])
train_generator = train_datagen.flow_from_dataframe(...
class_mode='categorical'
)
validation_generator = validation_datagen.flow_from_dataframe(
....
class_mode='category',
...
)
test_generator = train_datagen.flow_from_dataframe(
...
class_mode='category'
)

Related

Using datagen.flow_from_directory with image segmination and number of classes

I used "flow_from_directory" but my "lose" is not decreasing. I notice When I run "fit_generator". Its says there is 1 classes, even though my mask have 3 classes.
My question is, do we need to indicate in the "datagen.flow_from_directory" how many number of classes? do yo see any mistake in the "datagen.flow_from_directory" call:
My directory structure as shown below:
My code is shown below:
inputs = tf.keras.layers.Input(shape=(IMAGE_SIZE, IMAGE_SIZE, 3), name="input_image")
model = tf.keras.applications.ResNet50(input_tensor=inputs, weights=None, include_top=true)
LR = 0.0001
optim = keras.optimizers.Adam(LR)
dice_loss_se2 = sm.losses.DiceLoss()
mae = tf.keras.losses.MeanAbsoluteError( )
metrics = [ mae,sm.metrics.IOUScore(threshold=0.5), sm.metrics.FScore(threshold=0.5) , dice_loss_se2]
model.compile(optimizer=optim,loss= dice_loss_se2,metrics= metrics)
image_datagen = ImageDataGenerator()
mask_datagen = ImageDataGenerator()
image_generator =image_datagen.flow_from_directory( "/mydata/train/image", target_size=(IMAGE_SIZE, IMAGE_SIZE)
, class_mode = None,
)
mask_generator = mask_datagen.flow_from_directory("/mydata/train/mask" , target_size=(IMAGE_SIZE, IMAGE_SIZE)
, class_mode = None,
)
train_generator = zip(image_generator, mask_generator)
train_steps = 1212//batch_size
#---------------------------
image_generator_val =image_datagen.flow_from_directory( "/mydata/Validation/image", target_size=(IMAGE_SIZE, IMAGE_SIZE)
, class_mode = None,
)
mask_generator_val = mask_datagen.flow_from_directory("/mydata/Validation/mask" , target_size=(IMAGE_SIZE, IMAGE_SIZE)
, class_mode = None,
)
)
val_generator = zip(image_generator_val, mask_generator_val)
val_steps = 250//batch_size
history =model.fit_generator(train_generator, validation_data=val_generator , steps_per_epoch=train_steps, validation_steps=val_steps , epochs=epochs, verbose=1)
your problem is in your directory structure. What you want is a directory structure as shown below
mydata
---- train
---- image
------1.jpg
------2.jpg
---- mask
------1.png
------2.png
you are only getting one class because the generator only sees the class img. So just move your images as shown in the above directory structure
They also doing with one the way, specific subset for training or validation or specify the folder where my foloder sturtures ( directory ) are see as in below.
F:\datasets\downloads\example\image
F:\datasets\downloads\example\image\Bee
F:\datasets\downloads\example\image\Shiny Jumbo
F:\datasets\downloads\example\image\Sleepy cat
...
def gen():
train_generator = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
train_generator = train_generator.flow_from_directory(
directory,
target_size=(150, 150),
batch_size=32,
class_mode='binary', # None # categorical # binary
subset='training')
target = np.array([[i] for i in range(10)])
return train_generator
train_generator = gen()
val_generator = train_generator
inputs = tf.keras.layers.Input(shape=(150, 150, 3), name="input_image")
model = tf.keras.applications.ResNet50(input_tensor=inputs, weights=None, include_top=True)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Optimizer
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
optimizer = tf.keras.optimizers.Nadam(
learning_rate=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-07,
name='Nadam'
) # 0.00001
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Loss Fn
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
# 1
# lossfn = tf.keras.losses.MeanSquaredLogarithmicError(reduction=tf.keras.losses.Reduction.AUTO, name='mean_squared_logarithmic_error')
# 2
lossfn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False)
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Model Summary
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
model.compile(optimizer=optimizer, loss=lossfn, metrics=['accuracy'])
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
: Training
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""
history = model.fit_generator(train_generator, validation_data=val_generator, steps_per_epoch=train_steps, validation_steps=val_steps , epochs=epochs, verbose=1)
input('...')
None
Found 10 images belonging to 10 classes.

Tensorflow: `y` argument is not supported when using `keras.utils.Sequence` as input

I am creating a mask_detection model on 3 classes "CorrectMask", "UncorrectMask", "NoMask". I am creating my CNN, but I have the following error:
Traceback (most recent call last):
File "/home/andrea/Scrivania/Biometrics/covid_mask_train.py", line 70, in <module>
model.fit(train_generator, 25)
File "/home/andrea/.local/lib/python3.9/site-packages/keras/utils/traceback_utils.py", line 67, in error_handler
raise e.with_traceback(filtered_tb) from None
File "/home/andrea/.local/lib/python3.9/site-packages/keras/engine/data_adapter.py", line 919, in __init__
raise ValueError("`y` argument is not supported when using "
ValueError: `y` argument is not supported when using `keras.utils.Sequence` as input.
and this is my code to create my CNN:
datagen = ImageDataGenerator(
validation_split = 0.3,
rescale = 1./255,
horizontal_flip = True,
zoom_range = 0.2,
brightness_range = [1,2]
)
train_generator = datagen.flow_from_directory(
DATASET_DIR,
target_size = DIM_IMG,
batch_size = BATCH_SIZE,
class_mode = "binary",
subset = "training"
)
test_generator = datagen.flow_from_directory(
DATASET_DIR,
target_size = DIM_IMG,
batch_size = BATCH_SIZE,
class_mode = "binary",
subset = "validation"
)
model = Sequential()
model.add(Conv2D(32, kernel_size=(3,3), padding='same',activation='relu', input_shape=(224,224, 3)))
model.add(MaxPool2D(pool_size=(2,2), strides=2))
model.add(Dropout(0.5))
model.add(Conv2D(64, kernel_size=(3,3), padding='same',activation='relu', ))
model.add(MaxPool2D(pool_size=(2,2), strides=2))
model.add(Dropout(0.5))
model.add(Conv2D(128, kernel_size=(3,3), padding='same',activation='relu', ))
model.add(MaxPool2D(pool_size=(2,2), strides=2))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(256,activation='relu'))
model.add(Dense(128,activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1,activation='softmax')) # uso softamx perchè ho più di due classi
model.summary()
model.compile(optimizer = "adam", loss = "binary_crossentropy", metrics = ["accuracy"])
model.fit(train_generator, EPOCHS)
metrics_train = model.evaluate(train_generator)
metrics_test = model.evaluate(test_generator)
print(f"TRAIN_SET: {metrics_train}")
print("--------------------------------------------")
print(f"TEST_SET: {metrics_test}")
# save the model
model.save("model_MaskDetect_25_epochs.h5")
print("Saved!")
I've read various things about Stack Overflow too but I can't figure out how to apply it to my case. Can someone help me?
Change your fit function call to explicitly set the epoch parameter:
model.fit(train_generator, epochs = EPOCHS)
What is happening is fit is using EPOCHS as the input for the second parameter which is the y argument you are getting an error for.
Keras docs

Input error concatenating two CNN branches

I'm trying to implement a 3D facial recognition algorithm using CNNs with multiple classes. I have an image generator for rgb images, and an image generator for depth images (grayscale). As I have two distinct inputs, I made two different CNN models, one with shape=(height, width, 3) and another with shape=(height, width, 1). Independently I can fit the models with its respective image generator, but after concatenating the two branches and merging both image generators, I got this warning and error:
WARNING:tensorflow:Model was constructed with shape (None, 400, 400, 1) for input KerasTensor(type_spec=TensorSpec(shape=(None, 400, 400,
1), dtype=tf.float32, name='Depth_Input_input'),
name='Depth_Input_input', description="created by layer
'Depth_Input_input'"), but it was called on an input with incompatible
shape (None, None)
"ValueError: Input 0 of layer Depth_Input is incompatible with the
layer: : expected min_ndim=4, found ndim=2. Full shape received:
(None, None)"
What can i do to solve this? Thanks
Here is my code:
height=400
width=400
shape=(height,width)
# ########################### RGB ############################
model_rgb = tf.keras.models.Sequential()
model_rgb.add(Conv2D(filters=16, kernel_size=3, activation='relu', name="RGB_Input", input_shape=(height,width, 3)))
model_rgb.add(MaxPooling2D(pool_size=2))
model_rgb.add(Dropout(0.3))
model_rgb.add(Conv2D(filters=32, kernel_size=3, activation='relu'))
model_rgb.add(MaxPooling2D(pool_size=2))
model_rgb.add(Conv2D(filters=32, kernel_size=3, activation='relu'))
model_rgb.add(MaxPooling2D(pool_size=2))
model_rgb.add(Conv2D(filters=64, kernel_size=3, activation='relu'))
model_rgb.add(MaxPooling2D(pool_size=2))
model_rgb.add(Conv2D(filters=64, kernel_size=3, activation='relu'))
model_rgb.add(MaxPooling2D(pool_size=2))
#model_rgb.add(Dropout(0.2))
model_rgb.add(Conv2D(filters=128, kernel_size=3, activation='relu'))
model_rgb.add(MaxPooling2D(pool_size=2))
#model_rgb.add(Dropout(0.2))
model_rgb.add(Flatten())
model_rgb.add(Dense(units=512, activation='relu'))
model_rgb.add(Dropout(0.3))
model_rgb.add(Dense(units=128, activation='relu'))
model_rgb.add(Dropout(0.3))
# ########################### DEPTH ###########################
model_depth = tf.keras.models.Sequential()
model_depth.add(Conv2D(filters=16, kernel_size=3, activation='relu', name="Depth_Input", input_shape=(height, width, 1)))
model_depth.add(MaxPooling2D(pool_size=2))
model_depth.add(Dropout(0.3))
model_depth.add(Conv2D(filters=16, kernel_size=3, activation='relu'))
model_depth.add(MaxPooling2D(pool_size=2))
model_depth.add(Conv2D(filters=32, kernel_size=3, activation='relu'))
model_depth.add(MaxPooling2D(pool_size=2))
model_depth.add(Conv2D(filters=32, kernel_size=3, activation='relu'))
model_depth.add(MaxPooling2D(pool_size=2))
model_depth.add(Conv2D(filters=64, kernel_size=3, activation='relu'))
model_depth.add(MaxPooling2D(pool_size=2))
model_depth.add(Conv2D(filters=64, kernel_size=3, activation='relu'))
model_depth.add(MaxPooling2D(pool_size=2))
model_depth.add(Flatten())
model_depth.add(Dense(units=512, activation='relu'))
model_depth.add(Dropout(0.3))
model_depth.add(Dense(units=128, activation='relu'))
model_depth.add(Dropout(0.3))
#### Concatenating branches ####
merge = Concatenate()([model_rgb.output, model_depth.output])
merged_out = Dense(units=16, activation='relu')(merge)
merged_out = Dense(units=2, activation='softmax')(merged_out)
merged_model = Model([model_rgb.input, model_depth.input], merged_out)
merged_model.compile(optimizer=Adam(learning_rate=0.0001), loss='categorical_crossentropy', metrics=['accuracy'])
history_merged = merged_model.fit(gen_flow,
epochs=70,
shuffle=True,
)
Here is the code for the generators:
train_datagen = ImageDataGenerator(rescale=1./255,
rotation_range=20,
width_shift_range=0.4,
height_shift_range=0.4,
shear_range=0.4,
zoom_range=0.4,
horizontal_flip=True,
fill_mode='nearest')
val_datagen = ImageDataGenerator(rescale=1./255)
test_datagen = ImageDataGenerator(rescale=1./255)
# ########################### RGB ###########################
print("RGB Generators: \n")
train_generator_rgb = train_datagen.flow_from_directory(directory=train_data_rgb, target_size=shape,
class_mode='categorical',
batch_size=16)
val_generator_rgb = val_datagen.flow_from_directory(directory=val_data_rgb,
target_size=shape,
class_mode='categorical',
batch_size=12)
# ########################### --- ###########################
# ########################### DEPTH ###########################
print("\n\nDepth Generators: \n")
train_generator_depth = train_datagen.flow_from_directory(directory=train_data_depth,
target_size=shape,
color_mode="grayscale",
class_mode='categorical',
batch_size=16)
val_generator_depth = val_datagen.flow_from_directory(directory=val_data_depth,
target_size=shape,
color_mode="grayscale",
class_mode='categorical',
batch_size=12)
# ########################### ----- ###########################
def gen_flow_for_two_inputs(X1, X2):
while True:
X1i = train_generator_rgb.next()
X2i = train_generator_depth.next()
yield [X1i[0], X2i[1]], X1i[1]
# Create generator
gen_flow = gen_flow_for_two_inputs(train_data_rgb, train_data_depth)
Plotted model of the merged branches:
From comments
The problem was with the union of the generators in the function
gen_flow_for_two_inputs(X1, X2). The correct form is yield [X1i[0], X2i[0]], X1i[1] instead of yield [X1i[0], X2i[1]], X1i[1] (paraphrased from sergio_baixo)
Working code for the generators
train_datagen = ImageDataGenerator(rescale=1./255,
rotation_range=20,
width_shift_range=0.4,
height_shift_range=0.4,
shear_range=0.4,
zoom_range=0.4,
horizontal_flip=True,
fill_mode='nearest')
val_datagen = ImageDataGenerator(rescale=1./255)
test_datagen = ImageDataGenerator(rescale=1./255)
# ########################### RGB ###########################
print("RGB Generators: \n")
train_generator_rgb = train_datagen.flow_from_directory(directory=train_data_rgb, target_size=shape,
class_mode='categorical',
batch_size=16)
val_generator_rgb = val_datagen.flow_from_directory(directory=val_data_rgb,
target_size=shape,
class_mode='categorical',
batch_size=12)
# ########################### --- ###########################
# ########################### DEPTH ###########################
print("\n\nDepth Generators: \n")
train_generator_depth = train_datagen.flow_from_directory(directory=train_data_depth,
target_size=shape,
color_mode="grayscale",
class_mode='categorical',
batch_size=16)
val_generator_depth = val_datagen.flow_from_directory(directory=val_data_depth,
target_size=shape,
color_mode="grayscale",
class_mode='categorical',
batch_size=12)
# ########################### ----- ###########################
def gen_flow_for_two_inputs(X1, X2):
while True:
X1i = train_generator_rgb.next()
X2i = train_generator_depth.next()
yield [X1i[0], X2i[0]], X1i[1]
# Create generator
gen_flow = gen_flow_for_two_inputs(train_data_rgb, train_data_depth)

Shape error for LSTM layer in my image classification model

Hello I am trying to classify grayscale images (224x224) and I am trying to use LSTM for that but I keep getting the shape errors
my train datagenerator looks like this:
def train_datagenerator(train_batchsize):
train_datagen = ImageDataGenerator(
rescale=1 / 255.0,
rotation_range=20,
zoom_range=0.05,
width_shift_range=0.05,
height_shift_range=0.05,
shear_range=0.05,
horizontal_flip=True,
fill_mode="nearest")
train_generator = train_datagen.flow_from_directory(train_dir,
target_size=(image_size, image_size),
batch_size=train_batchsize,
class_mode='categorical')
return train_generator
this is my code for my model:
def LSTM_model():
model = Sequential()
model.add(LSTM(512, input_shape=(224, 224)))
model.add(Flatten())
model.add(Dense(1024))
model.add(Activation('relu'))
model.add(Dense(50))
model.add(Activation('sigmoid'))
model.add(Dense(3))
model.add(Activation('softmax'))
model.build()
model.summary()
return model
model.fit
def train(model):
train_generator = train_datagenerator(train_batchsize)
model.compile(loss='categorical_crossentropy',
#optimizer='sgd',
optimizer='adam',
metrics=['acc'])
train_start = time.clock()
print('Started training...')
history = model.fit_generator(train_generator,
steps_per_epoch=train_generator.samples / train_generator.batch_size,
epochs=epochs,
verbose=1)
train_finish = time.clock()
train_time = train_finish - train_start
print('Training completed in {0:.3f} minutes!'.format(train_time / 60))
print('Saving the trained model...')
model.save('/content/drive/My Drive/Project/trained_models/rnn_model.h5')
print("Saved trained model in 'trained_models/ folder'!")
return model, history
I get this error: Input 0 of layer lstm_5 is incompatible with the layer: expected ndim=3, found ndim=2. Full shape received: [None, 150528]
Please help
I m not sure but can you try to this
model.add(LSTM(512, return_sequences=True, input_shape=(224, 224)))

How do i interpret results from Keras predict_generator?

I am performing binary classification of data. I am using predict_generator to obtain the classification results. The input to the predict_generator are 44 examples, 22 positive, 22 negative. The following is the output obtained:
[9.98187363e-01 1.81267178e-03]
[5.02341951e-04 9.99497652e-01]
[8.41189444e-01 1.58810586e-01]
[7.26610771e-04 9.99273360e-01]
[9.96826649e-01 3.17337317e-03]
[8.83008718e-01 1.16991334e-01]
[3.84130690e-04 9.99615788e-01]
[8.65039527e-01 1.34960532e-01]
[1.78014021e-03 9.98219788e-01]
[9.96107757e-01 3.89222591e-03]
[6.16264821e-04 9.99383688e-01]
[2.98170745e-03 9.97018337e-01]
[9.92357790e-01 7.64221745e-03]
[9.93237853e-01 6.76209433e-03]
[9.98248339e-01 1.75163767e-03]
[1.17816392e-03 9.98821795e-01]
[9.84322488e-01 1.56775210e-02]
[3.11790430e-03 9.96882081e-01]
[4.62388212e-04 9.99537587e-01]
[1.42699364e-03 9.98572946e-01]
[9.43281949e-01 5.67180961e-02]
[9.98008907e-01 1.99115812e-03]
[4.12312744e-04 9.99587715e-01]
[9.29474115e-01 7.05258474e-02]
[3.37766513e-04 9.99662280e-01]
[1.75693433e-03 9.98243093e-01]
[9.92154586e-04 9.99007881e-01]
[1.87152205e-03 9.98128474e-01]
[9.20654461e-02 9.07934546e-01]
[9.95722532e-01 4.27750358e-03]
[9.96877313e-01 3.12273414e-03]
[9.87601459e-01 1.23985587e-02]
[1.11398198e-01 8.88601840e-01]
[1.48968585e-02 9.85103130e-01]
[6.73048152e-03 9.93269503e-01]
[1.65761902e-03 9.98342395e-01]
[9.94634032e-01 5.36595425e-03]
[5.00697970e-01 4.99302000e-01]
[1.65578525e-03 9.98344183e-01]
[9.68859911e-01 3.11401486e-02]
CODE:
from keras.applications import Xception
from keras.models import Model
from keras.layers import Dense, Input, Dropout
from keras.optimizers import Nadam
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.preprocessing.image import ImageDataGenerator
img_height = 299
img_width = 299
no_of_frames = 15
channels = 3
no_of_epochs = 50
batch_size_value = 60
cnn_base = Xception(input_shape=(img_width, img_height, channels),
weights="imagenet", include_top=False, pooling='avg')
cnn_base.trainable = False
hidden_layer_1 = Dense(activation="relu", units=1024)(cnn_base.output)
drop_layer=Dropout(0.2)(hidden_layer_1)
hidden_layer_2 = Dense(activation="relu", units=512)(drop_layer)
outputs = Dense(2, activation="softmax")(hidden_layer_2)
model = Model(cnn_base.input, outputs)
nadam_optimizer = Nadam(lr=0.0001, beta_1=0.9, beta_2=0.999,
epsilon=1e-08, schedule_decay=0.004)
model.compile(loss="categorical_crossentropy",
optimizer=nadam_optimizer, metrics=["accuracy"])
# for data augmentation
train_datagen = ImageDataGenerator( zoom_range=.1, rotation_range=8,
width_shift_range=.2, height_shift_range=.2)
train_generator = train_datagen.flow_from_directory(
'/home/Train', # this is the target directory
target_size=(img_width, img_height),
batch_size=batch_size_value,
class_mode="categorical")
validation_generator = ImageDataGenerator().flow_from_directory(
'/home/Val',
target_size=(img_width, img_height),
batch_size=batch_size_value,
class_mode="categorical")
history = model.fit_generator(
train_generator,
validation_data=validation_generator,
verbose=1,
epochs=no_of_epochs,
steps_per_epoch=17,
validation_steps=7)
Test_generator = ImageDataGenerator().flow_from_directory(
'/home/Test',
target_size=(img_width, img_height),
batch_size=batch_size_value,
class_mode="categorical")
Prob_val = model.predict_generator(test_set)
print((Prob_val))
I assume they are probabilities, but the column contains only 40 entries. How do they correspond with the 44 inputs example ?

Categories