So I am trying to figure out this issue of getting less length of Numpy array when I run my model. I have a total of 2000 images which is divided into 2 categories of 1000 each. Now when I pass these images through my vgg16 model for feature extraction it passes me only 1998 images back. Following is my code.
VGG16 model
def load_vgg16(weights_path='vgg16.h5'):
if K.image_data_format() == 'channels_first':
inputShape = (3, 256, 256)
else:
inputShape = (256, 256, 3)
model = Sequential()
model.add(ZeroPadding2D((1,1),input_shape = inputShape))
model.add(Convolution2D(64, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(128, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(128, (3, 3), activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(256, (3, 3), activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, (3, 3), activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1,1)))
model.add(Convolution2D(512, (3, 3), activation='relu'))
model.add(MaxPooling2D((2,2), strides=(2,2)))
if weights_path:
model.load_weights(weights_path, by_name= True)
return model
Feature extraction through bottleneck feature:
def save_bottleneck_features(location):
batch_size = 16
datagen = ImageDataGenerator(rescale= 1./255)
model = load_vgg16()
generator = datagen.flow_from_directory(location+'/training-data/',
target_size=(image_width, image_height),
batch_size = 16,
class_mode = None,
shuffle = False)
bottleneck_features_train = model.predict_generator(
generator, nb_training_samples / batch_size)
np.save(open(location+'/bottleneck_features_train.npy', 'wb'), bottleneck_features_train)
# repeat with the validation data
generator = datagen.flow_from_directory(location+'/validation-data/',
target_size=(image_width, image_height),
batch_size=16,
class_mode=None,
shuffle=False)
bottleneck_features_validation = model.predict_generator(
generator, nb_validation_samples / batch_size)
np.save(open(location+'/bottleneck_features_validation.npy', 'wb'), bottleneck_features_validation)
running the above functions:
top_model_weights_path='/top_model_weights.h5'
location = 'newdata'
nb_epoch = 50
training_samples= [len(os.listdir('newdata/training-data'+'/'+i))
for i in sorted(os.listdir('newdata/training-data/'))]
nb_training_samples = sum(training_samples)
#Calucalating number of data prsent in the folders or damage and whole cars in validation folder
validation_samples = [len(os.listdir('newdata/validation-data/' + i))
for i in sorted(os.listdir('newdata/validation-data/'))]
nb_validation_samples = sum(validation_samples)
print(training_samples)
print(validation_samples)
save_bottleneck_features('newdata')
So when I run the above code, I am expecting the total length of the array bottleneck_features_train to be 2000 but I am getting 1998 only. Similarly, the total length of bottleneck_features_validation comes 1840 but I am expecting to get 1840 (927+909).
The output for training_samples and validation_samples is below.
[1000, 1000]
[927, 909]
I think I am passing the wrong number of steps required in model.predict_generator(). When I am trying to label these images by using this code.
training_data = np.load(open(location+'/bottleneck_features_train.npy','rb'))
training_label = np.array([0] * training_samples[0] +
[1] * training_samples[1])
validation_data = np.load(open(location+'/bottleneck_features_validation.npy','rb'))
validation_label = np.array([0] * validation_samples[0] +
[1] * validation_samples[1])
and try to fit the model then I receive this error about training data and training labels are n not of the same length.
How can I fix this problem and start fitting my model? Thanks for any help. Cheers!
Related
I'm using tensorflow keras for training my model. My images shape are (224, 224, 3) for trainning my image classification model. I am using VGG face weights which is why i cannot change the convolutional layers.
this is my code
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, Flatten, Input, Dropout, Conv2D, MaxPooling2D, Activation, \
BatchNormalization, ZeroPadding2D, Convolution2D
from tensorflow.keras.applications.resnet_v2 import ResNet50V2
from tensorflow.keras.applications.resnet import ResNet50
from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.callbacks import *
from tensorflow.keras.optimizers import Adam
from keras.models import model_from_json
from sklearn.model_selection import train_test_split
gpus = tf.config.experimental.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(gpus[0], True)
folder = "images"
train = pd.read_csv("face_attribute_train.csv")
train_d, val_d = train_test_split(train, test_size=0.1, random_state=2)
train_datagen = ImageDataGenerator(rescale=1. / 255.,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
fill_mode='nearest',
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1. / 255.)
columns = ['Thick_Lips_Nose']
trainGen = train_datagen.flow_from_dataframe(
train_d, folder, x_col='image_id', y_col=columns, class_mode='raw', target_size=(224, 224), batch_size=128,
color_mode='rgb', shuffle=True)
print(trainGen.image_shape)
valGen = test_datagen.flow_from_dataframe(
val_d, folder, x_col='image_id', y_col=columns, class_mode='raw', target_size=(224, 224), batch_size=128,
color_mode='rgb', shuffle=True)
model = tf.keras.models.Sequential()
model.add(ZeroPadding2D((1, 1), input_shape=(224, 224, 3)))
model.add(Convolution2D(64, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(Convolution2D(4096, (7, 7), activation='relu'))
model.add(Dropout(0.5))
model.add(Convolution2D(4096, (1, 1), activation='relu'))
model.add(Dropout(0.5))
model.add(Convolution2D(2622, (1, 1)))
model.add(Flatten())
model.add(Activation('softmax'))
model.compile(loss='binary_crossentropy', optimizer=Adam(learning_rate = 0.0001), metrics='accuracy')
model.load_weights("vgg_face_weights.h5")
x= Flatten()(model.layers[-3].output)
x = Dense(1748, activation='relu')(x)
x = Dropout(0.3)(x)
x = Dense(874)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Dense(256)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
outputPred = Dense(1, activation='sigmoid')(x)
model2 = Model(inputs = model.layers[0].input, outputs = outputPred)
model2.compile(loss='binary_crossentropy', optimizer=Adam(learning_rate = 0.0001), metrics='accuracy')
model.save("Model1/")
model2.fit(trainGen, epochs=2, validation_data=valGen)
model2.save("Model2/")
sub_df = pd.read_csv('sampleSubmission.csv')
datagen = ImageDataGenerator(rescale=1./255.)
testGen = datagen.flow_from_dataframe(
sub_df, folder, x_col='image_id', y_col=None, class_mode=None, target_size=(224, 224), batch_size=128,
color_mode='rgb', shuffle=False)
preds = []
preds = model.predict(testGen)
sub_df[['Thick_Lips_Nose']] = preds
sub_df.to_csv("Submission.csv", index=False)
sub_df
I am getting this error at line 105. This error is initiated by fitting of the model:
tensorflow/core/kernels/data/generator_dataset_op.cc:107] Error occurred when finalizing GeneratorDataset iterator:
Failed precondition: Python interpreter state is not initialized. The process may be terminated.
[[{{node PyFunc}}]]
I'm trying to train a basic CNN on the image dataset that contains faces of celebrities with the class assigned corresponding to each person. Given that there are about 10,000 classes I used sparse_categorical_crossentropy rather than one-hot encoding the classes, however as soon as the network starts training the loss is stuck at one number and after several batches is goes to NaN I tried different scaling of the images and a smaller network but with no luck. Any clues on what might be causing the NaN?
Function that generates batches:
def Generator(data, label, batch_size):
url = "../input/celeba-dataset/img_align_celeba/img_align_celeba/"
INPUT_SHAPE = (109, 109)
i = 0
while True:
image_batch = [ ]
label_batch = [ ]
for b in range(batch_size):
if i == len(data):
i = 0
data, label = shuffle(data, label)
sample = data[i]
label_batch.append(label[i])
i += 1
image = cv2.resize(cv2.imread(url + sample), INPUT_SHAPE)
image_batch.append((image.astype(float)) / 255)
yield (np.array(image_batch), np.array(label_batch))
The model:
class CNN():
def __init__(self, train, val, y_train, y_val, batch_size):
## Load the batch generator
self.train_batch_gen = Generator(train, y_train, batch_size)
self.val_batch_gen = Generator(val, y_val, batch_size)
self.input_shape = (109, 109, 3)
self.num_classes = len(np.unique(y_train))
self.len_train = len(train)
self.len_val = len(val)
self.batch_size = batch_size
self.model = self.buildModel()
def buildModel(self):
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu', padding="same", input_shape=self.input_shape))
model.add(layers.Conv2D(64, (3, 3), activation='relu', padding="same", input_shape=self.input_shape))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu', padding="same"))
model.add(layers.Conv2D(128, (3, 3), activation='relu', padding="same"))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(96, (3, 3), activation='relu', padding="same"))
model.add(layers.Conv2D(192, (3, 3), activation='relu', padding="same"))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3), activation='relu', padding="same"))
model.add(layers.Conv2D(256, (3, 3), activation='relu', padding="same"))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(160, (3, 3), activation='relu', padding="same"))
model.add(layers.Conv2D(320, (3, 3), activation='relu', padding="same"))
model.add(layers.AveragePooling2D(pool_size=(4, 4)))
model.add(layers.Flatten())
model.add(layers.Dense(128, activation='tanh'))
model.add(layers.Dropout(rate=0.1))
model.add(layers.Dense(self.num_classes, activation = "softmax")) #Classification layer or output layer
opt = tf.keras.optimizers.Adam(learning_rate=0.00001)
model.compile(optimizer=opt, loss='sparse_categorical_crossentropy', metrics=['accuracy'])
return model
def trainModel(self, epochs):
self.model.fit_generator(generator=self.train_batch_gen,
steps_per_epoch = int(self.len_train // self.batch_size),
epochs=epochs,
validation_data = self.val_batch_gen,
validation_steps = int(self.len_val // self.batch_size))
In my case, I used sparse_categorical_crossentropy with labels numbered from [1,2,3] (3 classes). In this case it produced NaNs from the start.
When I changed the labels from [1,2,3] to [0,1,2] the problem has disappeared.
Not sure why you are seeing those nans. I suspect it has something to do with your tanh activation on your dense layer. I would replace it with relu. I also suggest using more neurons on this dense layer cause 128 is probably small for a 10000 output.
If i were you, i would also try a pre-trained model and/or Siamese networks.
This looks like Exploding Gradients problem. I would recommend you to check how the weights and gradients are varying. See this: https://github.com/keras-team/keras/issues/2226
Check https://www.dlology.com/blog/how-to-deal-with-vanishingexploding-gradients-in-keras/ on how to spot exploding gradient problem and solutions to it. Also try out Xavier initialization in your dense layers to prevent exploding gradients.
I'm using model.fit_generator and it gives me an error that the input size does not match with expected size. But I reshaped it using image_datagen.flow_from_directory using target_size=(224, 224), I cannot set it to (1, 224, 224) or it gives me another error.
I am not sure how to check my input size when using the train_generator = image_datagen.flow_from_directory(target_size =(224,224))
train_generator = image_datagen.flow_from_directory(
'C:/output/train/',
class_mode="categorical",
seed=seed,
batch_size=batch_size,
target_size=(input_size, input_size),
color_mode='grayscale',
shuffle=True)
valid_generator = image_datagen.flow_from_directory(
'C:/output/valid/',
class_mode="categorical",
seed=seed,
batch_size=batch_size,
target_size=(input_size, input_size),
color_mode='grayscale',
shuffle=True)
# https://github.com/keras-team/keras/blob/master/keras/callbacks.py
class MyCheckPoint(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs=None):
loss = logs["loss"]
val_loss = logs["val_loss"]
fileName = "model.%02d_%0.5f_%0.5f.h5" % (epoch, loss, val_loss)
self.model.save(fileName)
#weight_saver = MyCheckPoint()
model = models.getVGGModel(num_classes)
#model = models. getStandardModel(input_size)
model.compile(optimizer=Adam(lr=1e-5, decay=1e-8), loss=keras.losses.categorical_crossentropy)
#model.load_weights("weights.26-1.48.h5")
weight_saver = ModelCheckpoint('weights.{epoch:02d}-{val_loss:.2f}.h5',save_best_only=True, save_weights_only=True)
hist = model.fit_generator(train_generator, validation_data=valid_generator, validation_steps=80, steps_per_epoch=400, epochs=200, callbacks=[weight_saver])
def getVGGModel(num_classes):
model = Sequential()
model.add(Dense(32, input_shape=(1, 224, 224)))
# Reshape((784,), input_shape=(1, 224, 224))
model.add(Conv2D(64, (3, 3), activation='relu', strides=(1,1), padding='same',input_shape=(1, 224, 224), data_format="channels_first"))
model.add(Conv2D(64, (3, 3), activation='relu', strides=(1,1), padding='same',data_format = 'channels_first'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2,2),data_format = 'channels_first'))
model.add(Conv2D(128, (3, 3), activation='relu', padding='same',data_format = 'channels_first'))
model.add(Conv2D(128, (3, 3), activation='relu', padding='same', data_format = 'channels_first'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2,2), padding='same',data_format = 'channels_first'))
model.add(Conv2D(256, (3, 3), activation='relu', padding='same',data_format = 'channels_first'))
model.add(Conv2D(256, (3, 3), activation='relu', padding='same',data_format = 'channels_first'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2,2), padding='same',data_format = 'channels_first'))
model.add(Conv2D(512, (3, 3), activation='relu', padding='same',data_format = 'channels_first'))
model.add(Conv2D(512, (3, 3), activation='relu', padding='same',data_format = 'channels_first'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2,2), padding='same',data_format = 'channels_first'))
model.add(Conv2D(512, (3, 3), activation='relu', padding='same',data_format = 'channels_first'))
model.add(Conv2D(512, (3, 3), activation='relu', padding='same',data_format = 'channels_first'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2,2), padding='same',data_format = 'channels_first'))
model.add(Flatten())
model.add(Dense(4096, activation="relu"))
model.add(Dropout(0.5))
model.add(Dense(4096, activation="relu"))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation="softmax"))
return model
The problem come from the input of your model :
model.add(Dense(32, input_shape=(1, 224, 224)))
By default target_size=(224, 224) will gives you a tensor of shape (224, 224, 1) with the channel last.
But you are specifying an input with channel first for your model.
Just change your input to :
model.add(Dense(32, input_shape=(224, 224, 1)))
And of course with that, you don't need anymore to specifying the data_format for all your layers, Keras use channel_last by default.
Quick disclaimer: I'm pretty new to Keras, machine learning, and programming in general.
I'm trying to create a basic autoencoder for (currently) a single image. While it seems to run just fine, the output is just a white image. Here's what I've got:
img_height, img_width = 128, 128
input_img = '4.jpg'
output_img = '5.jpg'
# load image
x = load_img(input_img)
x = img_to_array(x) # array with shape (128, 128, 3)
x = x.reshape((1,) + x.shape) # array with shape (1, 128, 128, 3)
# define input shape
input_shape = (img_height, img_width, 3)
model = Sequential()
# encoding
model.add(Conv2D(128, (3, 3), activation='relu', input_shape=input_shape,
padding='same'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Conv2D(64, (3, 3), activation='relu', padding='same'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Conv2D(64, (3, 3), activation='relu', padding='same'))
# decoding
model.add(Conv2D(64, (3, 3), activation='relu', padding='same'))
model.add(UpSampling2D(size=(2,2)))
model.add(Conv2D(64, (3, 3), activation='relu', padding='same'))
model.add(UpSampling2D(size=(2,2)))
model.add(Conv2D(128, (3, 3), activation='relu', padding='same'))
model.add(Conv2D(3, (3, 3), activation='sigmoid', padding='same'))
model.compile(loss='binary_crossentropy', optimizer='adam')
print(model.summary())
checkpoint = ModelCheckpoint("autoencoder-loss-{loss:.4f}.hdf5", monitor='loss', verbose=0, save_best_only=True, mode='min')
model.fit(x, x, epochs=10, batch_size=1, verbose=1, callbacks=[checkpoint])
y = model.predict(x)
y = y[0, :, :, :]
y = array_to_img(y)
save_img(output_img, y)
I've looked at a handful of tutorials for reference, but I still can't figure out what my issue is.
Any guidance/suggestions/help would be greatly appreciated.
Thanks!
this solved the problem. The code was just missing
x = x.astype('float32') / 255.
This is a numpy built-in function to convert the values contained in that vector to floats.
This allows us to get decimal values, where the values are divided by 255. RGB values are stored as 8 bit integers, so we divide the values in the vector by 255 (2^8 - 1), to represent the colour as a decimal value between 0.0 and 1.0.
I have build a model with keras using pre-trained VGG16
model = Sequential()
model.add(ZeroPadding2D((1, 1), input_shape=(3, img_width, img_height)))
model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_2'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_2'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_2'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_3'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_2'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_3'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_2'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_3'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
# load the weights of the VGG16 networks
# (trained on ImageNet, won the ILSVRC competition in 2014)
# note: when there is a complete match between your model definition
# and your weight savefile, you can simply call model.load_weights(filename)
assert os.path.exists(vgg_model), 'Model weights not found (see "weights_path" variable in script).'
f = h5py.File(vgg_model)
for k in range(f.attrs['nb_layers']):
if k >= len(model.layers):
# we don't look at the last (fully-connected) layers in the savefile
break
g = f['layer_{}'.format(k)]
weights = [g['param_{}'.format(p)] for p in range(g.attrs['nb_params'])]
model.layers[k].set_weights(weights)
f.close()
print('Model loaded.')
# build a classifier model to put on top of the convolutional model
top_model = Sequential()
top_model.add(Flatten(input_shape=model.output_shape[1:]))
top_model.add(Dense(256, activation='relu'))
top_model.add(Dropout(0.5))
top_model.add(Dense(2, activation='softmax'))
but while running the model with fit function it was throwing an exception
expected sequential_2 to have shape (None, 2) but got array with shape (32, 1)
What was the issue here (Note: I was using the fit from directory function to train my model).
Your problem lies in a flow_from_directory. You should change class_mode = "categorical". Moreover - your setup for a binary classification is not usual. You should either change your final layer to:
top_model.add(Dense(1, activation='sigmoid'))
and then leave your loss="binary_crossentropy" and class_mode="binary" in your generator or (in the second case) leave:
top_model.add(Dense(2, activation='softmax'))
and set your loss="categorical_crossentropy" and class_mode="categorical" in your generator.
The problem is with your training labels.
It's hard to give an exact answer because you don't show us here the type of labels you have and the compilation you made.
I can go ahead and guess that you are compiling with either binary_crossentropy or categorical_crossentropy.
If I'm guessing correctly lets call your labels 'Y' prepare them for the training with the following code:
from keras.utils import np_utils
Y = np_utils.to_categorical(Y)
A tip: when you are doing a binary classification (two classes) you can make your last dense layer output 1 instead of 2. In your labels choose 0 for one class and 1 for the other. This way you can avoid the problem you are dealing with now.