I wrote a code for a vision transformer to classify mammograms into benign and malignant. After training for 30 epochs, the model is, however, predicting only one class(benign). All the final predictions for test images are in the range: 0.47 - 0.49.
The code:
datagen = tf.keras.preprocessing.image.ImageDataGenerator(rescale = 1./255,
samplewise_center = True,
samplewise_std_normalization = True,
validation_split = 0.1,
rotation_range=180,
shear_range=15,
zoom_range=0.2,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True,
vertical_flip=True,
fill_mode='reflect')
train_gen = datagen.flow_from_dataframe(dataframe = DF_TRAIN,
directory = TRAIN_PATH,
x_col = 'image_file_path',
y_col = 'pathology',
subset = 'training',
batch_size = BATCH_SIZE,
# seed = 1,
color_mode = 'rgb',
shuffle = True,
class_mode = 'binary',
target_size = (IMAGE_SIZE, IMAGE_SIZE))
valid_gen = datagen.flow_from_dataframe(dataframe = DF_TRAIN,
directory = TRAIN_PATH,
x_col = 'image_file_path',
y_col = 'pathology',
subset = 'validation',
batch_size = BATCH_SIZE,
# seed = 1,
color_mode = 'rgb',
shuffle = False,
class_mode = 'binary',
target_size = (IMAGE_SIZE, IMAGE_SIZE))
test_gen = datagen.flow_from_dataframe(dataframe = DF_TEST,
directory = TEST_PATH,
x_col = 'image_file_path',
y_col = 'pathology',
# subset = 'validation',
batch_size = BATCH_SIZE,
# seed = 1,
color_mode = 'rgb',
shuffle = False,
class_mode = 'binary',
target_size = (IMAGE_SIZE, IMAGE_SIZE))
def mlp(x, hidden_units, dropout_rate):
for units in hidden_units:
x = layers.Dense(units, activation=tf.nn.gelu)(x)
x = layers.Dropout(dropout_rate)(x)
return x
class Patches(layers.Layer):
def __init__(self, patch_size):
super(Patches, self).__init__()
self.patch_size = patch_size
def call(self, images):
batch_size = tf.shape(images)[0]
patches = tf.image.extract_patches(
images=images,
sizes=[1, self.patch_size, self.patch_size, 1],
strides=[1, self.patch_size, self.patch_size, 1],
rates=[1, 1, 1, 1],
padding="VALID",
)
patch_dims = patches.shape[-1]
patches = tf.reshape(patches, [batch_size, -1, patch_dims])
return patches
def get_config(self):
config = super().get_config().copy()
config.update({
'patch_size': self.patch_size,
})
return config
class PatchEncoder(layers.Layer):
def __init__(self, num_patches, projection_dim):
super(PatchEncoder, self).__init__()
self.num_patches = num_patches
self.projection = layers.Dense(units=projection_dim)
self.position_embedding = layers.Embedding(
input_dim=num_patches, output_dim=projection_dim
)
def call(self, patch):
positions = tf.range(start=0, limit=self.num_patches, delta=1)
encoded = self.projection(patch) + self.position_embedding(positions)
return encoded
def get_config(self):
config = super().get_config().copy()
config.update({
'num_patches': self.num_patches,
'projection': self.projection,
'position_embedding': self.position_embedding,
})
return config
def create_vit_classifier():
inputs = layers.Input(shape=input_shape)
# Create patches.
patches = Patches(patch_size)(inputs)
# Encode patches.
encoded_patches = PatchEncoder(num_patches, projection_dim)(patches)
# Create multiple layers of the Transformer block.
for _ in range(transformer_layers):
# Layer normalization 1.
x1 = layers.LayerNormalization(epsilon=1e-6)(encoded_patches)
# Create a multi-head attention layer.
attention_output = layers.MultiHeadAttention(num_heads=num_heads, key_dim=projection_dim, dropout=0.1)(x1, x1)
# Skip connection 1.
x2 = layers.Add()([attention_output, encoded_patches])
# Layer normalization 2.
x3 = layers.LayerNormalization(epsilon=1e-6)(x2)
# MLP.
x3 = mlp(x3, hidden_units=transformer_units, dropout_rate=0.1)
# Skip connection 2.
encoded_patches = layers.Add()([x3, x2])
# Create a [batch_size, projection_dim] tensor.
representation = layers.LayerNormalization(epsilon=1e-6)(encoded_patches)
representation = layers.Flatten()(representation)
representation = layers.Dropout(0.5)(representation)
# Add MLP.
features = mlp(representation, hidden_units=mlp_head_units, dropout_rate=0.5)
# Classify outputs.
logits = layers.Dense(num_classes, activation="sigmoid")(features)
# Create the Keras model.
model = tf.keras.Model(inputs=inputs, outputs=logits)
return model
def run_experiment(model):
optimizer = tfa.optimizers.AdamW(learning_rate=learning_rate, weight_decay=weight_decay)
model.compile(
optimizer=optimizer,
loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
metrics=['accuracy'])
STEP_SIZE_TRAIN = train_gen.n // train_gen.batch_size
STEP_SIZE_VALID = valid_gen.n // valid_gen.batch_size
print(STEP_SIZE_TRAIN, STEP_SIZE_VALID)
reduce_lr = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_accuracy',
factor=0.2,
patience=2,
verbose=1,
min_delta=1e-4,
min_lr=1e-6,
mode='max')
checkpointer = tf.keras.callbacks.ModelCheckpoint(filepath='./model_3.hdf5',
monitor='val_accuracy',
verbose=1,
save_best_only=True,
save_weights_only=True,
mode='max')
callbacks = [reduce_lr, checkpointer]
history = model.fit(x=train_gen,
steps_per_epoch=STEP_SIZE_TRAIN,
validation_data=valid_gen,
validation_steps=STEP_SIZE_VALID,
epochs=EPOCHS,
callbacks=callbacks,
verbose=1)
model.save(f'{save_path}/model_3.h5')
return history
vit_classifier = create_vit_classifier()
history = run_experiment(vit_classifier)
vit_classifier.load_weights(f'{save_path}/model_3.h5')
A = vit_classifier.predict(test_gen, steps = test_gen.n // test_gen.batch_size + 1)
predicted_classes = np.where(A > 0.5, 1, 0)
true_classes = test_gen.classes
class_labels = list(test_gen.class_indices.keys())
results = pd.DataFrame(list(zip(test_gen.filenames, true_classes, predicted_classes)),
columns =['Image name', 'True class', 'Predicted class'])
results = results.replace({"True class": classes_dict})
results = results.replace({"Predicted class": classes_dict})
prob = pd.DataFrame(A, columns =['Predicted probability'])
result_df = pd.concat([results, prob], axis=1)
result_df['Predicted probability'] = pd.Series(["{0:.1f}".format(val * 100) for val in result_df['Predicted probability']], index=result_df.index)
results_csv = f'{save_path}/results_3.csv'
with open(results_csv, mode='w') as f:
result_df.to_csv(f)
Confusion matrix:
[[428 0]
[276 0]]
Performance metrics:
Please help me figure out how to rectify this problem
Related
The output dense layer is 101 but this error pops up. The error could be with respect to train_dataset and test_dataset or model compiling. Help me out with this !!
Also if I want to sample the dataset and then assign the sample to training and testing. How do I it? I'm new to Tensorflow and python and the syntax is confusing.
BATCH_SIZE = 32
IMG_SIZE = (224, 224)
directory = "/Food/Food 101/images"
train_dataset = image_dataset_from_directory(directory,
shuffle=True,
batch_size=BATCH_SIZE,
label_mode = "int",
image_size=IMG_SIZE,
validation_split=0.2,
subset='training',
seed=42)
validation_dataset = image_dataset_from_directory(directory,
shuffle=True,
batch_size=BATCH_SIZE,
label_mode = "int",
image_size=IMG_SIZE,
validation_split=0.2,
subset='validation',
seed=42)
class_names = train_dataset.class_names
plt.figure(figsize=(10, 10))
for images, labels in train_dataset.take(1):
for i in range(9):
ax = plt.subplot(3, 3, i + 1)
plt.imshow(images[i].numpy().astype("uint8"))
plt.title(class_names[labels[i]])
plt.axis("off")
AUTOTUNE = tf.data.experimental.AUTOTUNE
train_dataset = train_dataset.prefetch(buffer_size=AUTOTUNE)
def data_augmenter():
data_augmentation = tf.keras.Sequential()
data_augmentation.add(tf.keras.layers.experimental.preprocessing.RandomFlip("horizontal"))
data_augmentation.add(tf.keras.layers.experimental.preprocessing.RandomRotation(0.2))
return data_augmentation
data_augmentation = data_augmenter()
for image, _ in train_dataset.take(1):
plt.figure(figsize=(10, 10))
first_image = image[0]
for i in range(9):
ax = plt.subplot(3, 3, i + 1)
augmented_image = data_augmentation(tf.expand_dims(first_image, 0))
plt.imshow(augmented_image[0] / 255)
plt.axis('off')
preprocess_input = tf.keras.applications.mobilenet_v2.preprocess_input
IMG_SHAPE = IMG_SIZE + (3,)
IMG_SHAPE
base_model = tf.keras.applications.MobileNetV2(input_shape=IMG_SHAPE,
include_top=True,
weights='imagenet')
base_model.summary()
nb_layers = len(base_model.layers)
print(base_model.layers[nb_layers - 2].name)
print(base_model.layers[nb_layers - 1].name)
image_batch, label_batch = next(iter(train_dataset))
feature_batch = base_model(image_batch)
print(feature_batch.shape)
label_batch
base_model.trainable = False
image_var = tf.Variable(image_batch)
pred = base_model(image_var)
tf.keras.applications.mobilenet_v2.decode_predictions(pred.numpy(), top=2)
image_shape=IMG_SIZE
def FC_model(image_shape=IMG_SIZE, data_augmentation=data_augmenter()):
input_shape = image_shape + (3,)
base_model = tf.keras.applications.MobileNetV2(input_shape=input_shape,
include_top=False,
weights='imagenet')
base_model.trainable = False
inputs = tf.keras.Input(shape = image_shape + (3, ))
x = data_augmentation(inputs)
x = preprocess_input(x)
x = base_model(x, training=False)
x = tf.keras.layers.GlobalAveragePooling2D()(x)
x = tf.keras.layers.Dropout(0.2)(x)
prediction_layer = tf.keras.layers.Dense(101)
outputs = prediction_layer(x)
model = tf.keras.Model(inputs, outputs)
return model
model2 = FC_model(IMG_SIZE, data_augmentation)
model2.summary()
base_learning_rate = 0.001
model2.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=base_learning_rate),
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits = True),
metrics=['accuracy'])
initial_epochs = 10
history = model2.fit(train_dataset, validation_data=validation_dataset, epochs=initial_epochs)
The problem is in how you are loading the data:
train_dataset = image_dataset_from_directory(directory,
shuffle=True,
batch_size=BATCH_SIZE,
label_mode = "int",
image_size=IMG_SIZE,
validation_split=0.2,
subset='training',
seed=42)
In particular label_mode="int" means that your target variable is encoded as an integer (i.e., 1 if cat, 2 if dog, 3 if tree). You want to change it to label_mode="categorical".
This is the error message i get :
assertion failed: [10 250 250] [11] [[node
ssim_loss/SSIM/Assert_2/Assert (defined at
D:\Documents\neural_network\image_upscaler\nn2.py:33) ]]
[Op:__inference_train_function_7302]
Function call stack: train_function
Here's my code
def ssim_loss(y_true, y_pred):
y_pred = tf.cast(y_pred, tf.float32)
y_true = tf.cast(y_true, tf.float32)
return tf.reduce_mean(tf.image.ssim(y_true, y_pred, 2.0))
def relu_bn(inputs):
relu = ReLU()(inputs)
bn = BatchNormalization()(relu)
return bn
def build_model_shi(upscale_factor=2, channels=1):
conv_args = {
"activation": "relu",
"kernel_initializer": "Orthogonal",
"padding": "same",
}
inputs = keras.Input(shape=(None, None, channels))
x = layers.Conv2D(128, 5, **conv_args)(inputs)
x = layers.Conv2D(128, 3, **conv_args)(x)
x = layers.Conv2D(32, 3, **conv_args)(x)
x = layers.Conv2D(channels * (upscale_factor ** 2), 3, **conv_args)(x)
outputs = tf.nn.depth_to_space(x, upscale_factor)
model = keras.Model(inputs, outputs)
#loss_fn = keras.losses.MeanSquaredError()
loss_fn = ssim_loss
optimizer = keras.optimizers.Adam(learning_rate=0.001)
model.compile(optimizer=optimizer, loss=loss_fn,)
return model
def load_data(PATH):
start = time()
xte = np.load(PATH + "xtef.npy")
print("xte loaded")
xtr = np.load(PATH + "xtrf.npy")
print("xtr loaded")
ytr = np.load(PATH + "ytrf.npy")
print("ytr loaded")
yte = np.load(PATH + "ytef.npy")
print("yte loaded")
end = time()
print("loaded in",end - start,"seconds")
xtr = np.array(xtr,"float16")
ytr = np.array(ytr,"float16")
xte = np.array(xte,"float16")
yte = np.array(yte,"float16")
return xtr,xte,ytr,yte
'''print("loading dataset")
X = np.load(PATH + "X.npy")
Y = np.load(PATH + "Y.npy")
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.05, random_state=42)
return X_train, X_test, y_train, y_test'''
def train(PATH,BATCH_SIZE,EPOCHS):
xtr, xte, ytr, yte = load_data(PATH)
checkpoint = ModelCheckpoint(PATH + "model.hdf5", monitor='val_accuracy', verbose=1, save_best_only=True, mode='max') #checkpointing it
callbacks_list = [checkpoint,EarlyStopping(monitor='val_loss', patience=3, min_delta=0.0001)]
model = build_model_shi()
history = model.fit(xtr, ytr, epochs=EPOCHS, batch_size=BATCH_SIZE,validation_data=(xte,yte),callbacks=callbacks_list) #train time
results = model.predict(xte)
np.save(PATH + "model_op",results)
np.save(PATH + "model_ip",xte)
np.save(PATH + "model_gt",yte)
del xtr, xte, ytr, yte
train(PATH,BATCH_SIZE,EPOCHS)
The model works just fine when i use MSE as my loss function. But i t gives me this error when i use my own "ssim_loss" as my loss function.
Any help is appreciated.
I figured it out!
Turns out the shapes of the tensors passed to the ssim function needed reshaping.
def ssim_loss(y_true, y_pred):
y_pred = tf.reshape(y_pred,[-1,250,250,1])
y_true = tf.reshape(y_true,[-1,250,250,1])
return 1 - tf.reduce_mean(tf.image.ssim(y_true, y_pred,max_val = 255))
in my case the image size is 250x250x1
I'm trying to write this code into colab. Interestingly, I was running the same code in colab a few days ago but now it won't work. the code also works in kaggle kernel. I tried changing the TensorFlow version but all of them give different errors. Why do you think I can't run this code? This is the colab notebook if you needed more info.
Thanks in advance!
class DisasterDetector:
def __init__(self, tokenizer, bert_layer, max_len =30, lr = 0.0001,
epochs = 15, batch_size = 32, dtype = tf.int32 ,
activation = 'sigmoid', optimizer = 'SGD',
beta_1=0.9, beta_2=0.999, epsilon=1e-07,
metrics = 'accuracy', loss = 'binary_crossentropy'):
self.lr = lr
self.epochs = epochs
self.max_len = max_len
self.batch_size = batch_size
self.tokenizer = tokenizer
self.bert_layer = bert_layer
self.models = []
self.activation = activation
self.optimizer = optimizer
self.dtype = dtype
self.beta_1 = beta_1
self.beta_2 = beta_2
self.epsilon =epsilon
self.metrics = metrics
self.loss = loss
def encode(self, texts):
all_tokens = []
masks = []
segments = []
for text in texts:
tokenized = self.tokenizer.convert_tokens_to_ids(['[CLS]'] + self.tokenizer.tokenize(text) + ['[SEP]'])
len_zeros = self.max_len - len(tokenized)
padded = tokenized + [0] * len_zeros
mask = [1] * len(tokenized) + [0] * len_zeros
segment = [0] * self.max_len
all_tokens.append(padded)
masks.append(mask)
segments.append(segment)
print(len(all_tokens[0]))
return np.array(all_tokens), np.array(masks), np.array(segments)
def make_model(self):
input_word_ids = Input(shape = (self.max_len, ), dtype=tf.int32,
name = 'input_word_ids')
input_mask = Input(shape = (self.max_len, ), dtype=tf.int32,
name = 'input_mask')
segment_ids = Input(shape = (self.max_len, ), dtype=tf.int32,
name = 'segment_ids')
#pooled output is the output of dimention and
pooled_output, sequence_output = self.bert_layer([input_word_ids,
input_mask,
segment_ids])
clf_output = sequence_output[:, 0, :]
out = tf.keras.layers.Dense(1, activation = self.activation)(clf_output)
#out = tf.keras.layers.Dense(1, activation = 'sigmoid', input_shape = (clf_output,) )(clf_output)
model = Model(inputs = [input_word_ids, input_mask, segment_ids],
outputs = out)
if self.optimizer is 'SGD':
optimizer = SGD(learning_rate = self.lr)
elif self.optimizer is 'Adam':
optimizer = Adam(learning_rate = self.lr, beta_1=self.beta_1,
beta_2=self.beta_2, epsilon=self.epsilon)
model.compile(loss = self.loss, optimizer = self.optimizer,
metrics = [self.metrics])
return model
def train(self, x, k = 3):
kfold = StratifiedKFold(n_splits = k, shuffle = True)
for fold, (train_idx, val_idx) in enumerate(kfold.split(x['cleaned_text'], x['target'])):
print('fold: ', fold)
x_trn = self.encode(x.loc[train_idx, 'cleaned_text'])
x_val = self.encode(x.loc[val_idx, 'cleaned_text'])
y_trn = np.array(x.loc[train_idx, 'target'], dtype = np.uint8)
y_val = np.array(x.loc[val_idx, 'target'], dtype = np.uint8)
print('the data type of y train: ', type(y_trn))
print('x_val shape', x_val[0].shape)
print('x_trn shape', x_trn[0].shape)
model = self.make_model()
print('model made.')
model.fit(x_trn, tf.convert_to_tensor(y_trn),
validation_data = (x_val, tf.convert_to_tensor(y_val)),
batch_size=self.batch_size, epochs = self.epochs)
self.models.append(model)
and after calling the train function of the class I get that error.
classifier = DisasterDetector(tokenizer = tokenizer, bert_layer = bert_layer, max_len = max_len, lr = 0.0001,
epochs = 10, activation = 'sigmoid',
batch_size = 32,optimizer = 'SGD',
beta_1=0.9, beta_2=0.999, epsilon=1e-07)
classifier.train(train_cleaned)
and here is the error:
ValueError Traceback (most
recent call last)
<ipython-input-10-106c756f2e47> in <module>()
----> 1 classifier.train(train_cleaned)
8 frames
/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/constant_op.py in convert_to_eager_tensor(value, ctx, dtype)
96 dtype = dtypes.as_dtype(dtype).as_datatype_enum
97 ctx.ensure_initialized()
---> 98 return ops.EagerTensor(value, ctx.device_name, dtype)
99
100
ValueError: Failed to convert a NumPy array to a Tensor (Unsupported object type list).
Well, it turns out that by not giving the appropriate maximum sequence length, TensorFlow throws this error. By changing the max_len variable to 54 I could run my program with no difficulty. So the problem was not about the type of the input or the numpy arrays.
I'm trying to run this code, and I have this error:
ValueError: Error when checking target: expected flatten_4 to have shape (2048,) but got array with shape (2,)
NUM_CLASSES = 2
CHANNELS = 3
IMAGE_RESIZE = 224
RESNET50_POOLING_AVERAGE = 'avg'
DENSE_LAYER_ACTIVATION = 'softmax'
OBJECTIVE_FUNCTION = 'categorical_crossentropy'
NUM_EPOCHS = 10
EARLY_STOP_PATIENCE = 3
STEPS_PER_EPOCH_TRAINING = 10
STEPS_PER_EPOCH_VALIDATION = 10
BATCH_SIZE_TRAINING = 100
BATCH_SIZE_VALIDATION = 100
BATCH_SIZE_TESTING = 1
resnet_weights_path = '../input/resnet50/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'
model = Sequential()
train_data_dir = "C:\\Users\\Desktop\\RESNET"
model = ResNet50(include_top=True, weights='imagenet')
model.layers.pop()
model = Model(input=model.input,output=model.layers[-1].output)
model.summary()
sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='binary_crossentropy', optimizer=SGD(lr=0.01, momentum=0.9), metrics= ['binary_accuracy'])
data_dir = "C:\\Users\\Desktop\\RESNET"
batch_size = 32
from keras.applications.resnet50 import preprocess_input
from keras.preprocessing.image import ImageDataGenerator
image_size = IMAGE_RESIZE
data_generator = ImageDataGenerator(preprocessing_function=preprocess_input)
def append_ext(fn):
return fn+".jpg"
from os import listdir
from os.path import isfile, join
dir_path = os.path.dirname(os.path.realpath(__file__))
train_dir_path = dir_path + '\data'
onlyfiles = [f for f in listdir(dir_path) if isfile(join(dir_path, f))]
data_labels = [0, 1]
t = []
maxi = 25145
LieOffset = 15799
i = 0
while i < maxi: # t = tuple
if i <= LieOffset:
t.append(label['Lie'])
else:
t.append(label['Truth'])
i = i+1
train_datagenerator = ImageDataGenerator(rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
validation_split=0.2)
train_generator = train_datagenerator.flow_from_directory(
train_data_dir,
target_size=(image_size, image_size),
batch_size=BATCH_SIZE_TRAINING,
class_mode='categorical', shuffle=False, subset='training')
validation_generator = train_datagenerator.flow_from_directory(
train_data_dir, # same directory as training data kifkif
target_size=(image_size, image_size),
batch_size=BATCH_SIZE_TRAINING,
class_mode='categorical', shuffle=False, subset='validation')
(BATCH_SIZE_TRAINING, len(train_generator), BATCH_SIZE_VALIDATION, len(validation_generator))
from tensorflow.python.keras.callbacks import EarlyStopping, ModelCheckpoint
cb_early_stopper = EarlyStopping(monitor = 'val_loss', patience = EARLY_STOP_PATIENCE)
cb_checkpointer = ModelCheckpoint(filepath = '../working/best.hdf5', monitor = 'val_loss', save_best_only = True, mode = 'auto')
from sklearn.grid_search import ParameterGrid
param_grid = {'epochs': [5, 10, 15], 'steps_per_epoch' : [10, 20, 50]}
grid = ParameterGrid(param_grid)
val_loss as final model
for params in grid:
print(params)
fit_history = model.fit_generator(
train_generator,
steps_per_epoch=STEPS_PER_EPOCH_TRAINING,
epochs = NUM_EPOCHS,
validation_data=validation_generator,
validation_steps=STEPS_PER_EPOCH_VALIDATION,
callbacks=[cb_checkpointer, cb_early_stopper])
model.load_weights("../working/best.hdf5")
The error suggests that your models output layer should have 2 nodes whereas you have 2048 as you are using the output of avg_pool layer of ResNet50 model as your model output. So, you can add a Dense layer having 2 nodes on top of the avg_pool layer to solve the problem.
model = ResNet50(include_top=True, weights='imagenet')
print(model.summary())
x = model.get_layer('avg_pool').output
predictions = Dense(2, activation='sigmoid')(x)
model = Model(input = model.input, output = predictions)
print(model.summary())
As I'm not quite sure about what type of problem you are solving, i assumed that multilabel (2) classification as your data label shape is (2,).
However, if you are solving a binary classification problem then you need to change your label so that it's either 1 or 0. So, Change class_mode='categorical' to class_mode='binary' in both train_generator and validation_generator. In that case the model output layer should have 1 node.
predictions = Dense(1, activation='sigmoid')(x)
when I training vgg_face model on Keras, I used data generator, but got this issue: ValueError: Output of generator should be a tuple (x, y, sample_weight) or (x, y). Found: <keras_preprocessing.image.NumpyArrayIterator object at 0x7f03e83a75f8>
I have tried the previous methods about a similar issue: ValueError: Output of generator should be a tuple (x, y, sample_weight) or (x, y). Found: None, but without working.
def process_line(line):
path = '/home/apptech/pixeleye_test/apps/arup/dataset/AFAD-Full'
label_ages = np.asarray([line[1:3]])
label_genders = np.array([line[4:7]])
data = Image.open(path + line)
arr = np.asarray(data, dtype="float32")
arr = cv2.resize(arr, (224, 224))
# return (arr,label_ages)
if label_ages and label_genders != None:
return (arr, label_ages)
def generate_arrays_from_file(data, batch_size, datagen):
# np_utils.to_categorical onehot
while True:
f = data
cnt = 0
X_Y = []
X = []
Y_age = []
Y_gender = []
for line in f:
# x,y_age,y_gender=process_line(line.strip('\n'))
x, y_age = process_line(line.strip('\n'))[0], process_line(line.strip('\n'))[1]
X.append(x)
# X_Y.append(x_y)
Y_age.append(y_age)
# if int(y_gender)==111:
# label=np.array([1,0])
# Y_gender.append(label)
# if int(y_gender)==112:
# label = np.array([0, 1])
# Y_gender.append(label)
cnt += 1
if cnt == batch_size:
cnt = 0
datagen.fit(X)
print(np.asarray(X).shape, np.asarray(Y_age).shape)
yield datagen.flow(np.array(X), np.array(Y_age), batch_size=batch_size)
# yield np.asarray(X), np.asarray(Y_age)
X = []
X_Y = []
Y_age = []
Y_gender = []
# f.close()
def model(epochs, lr, batch_size):
content = open('/home/apptech/pixeleye_test/apps/arup/dataset/AFAD-Full/AFAD-Full.txt').readlines()
random.shuffle(content)
num = int(len(content) * 0.8)
train_data = content[:num]
test_data = content[num:]
# Convolution Features
vgg_model = VGGFace(model='resnet50', include_top=False, input_shape=(224, 224, 3),
pooling='max') # pooling: None, avg or max
# custom parameters
last_layer = vgg_model.get_layer('avg_pool').output
x = Flatten(name='flatten')(last_layer)
out_age = Dense(units=1000, activation='relu', name='regression', kernel_regularizer=regularizers.l2(0.01))(x)
out_age1 = Dense(units=500, activation='relu', name='regression1', kernel_regularizer=regularizers.l2(0.01))(
out_age)
out_age2 = Dense(units=100, name='regression2', kernel_regularizer=regularizers.l2(0.01))(out_age1)
out_age3 = Dense(units=1, name='regression3', kernel_regularizer=regularizers.l2(0.01))(out_age2)
out_gender = Dense(units=2, activation='softmax', name='classifier1')(x)
# custom_vgg_model = Model(vgg_model.input, outputs=[out_age3, out_gender])
custom_vgg_model = Model(vgg_model.input, outputs=out_age3)
# Create the model
model = custom_vgg_model
sgd = optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
# model.compile(optimizer=sgd, loss=["mean_squared_error", "categorical_crossentropy"],
# metrics=['accuracy'])
model.compile(optimizer=sgd, loss=["mean_squared_error"],
metrics=['accuracy'])
logging.debug("Model summary...")
model.count_params()
model.summary()
class Schedule:
def __init__(self, nb_epochs, initial_lr):
self.epochs = nb_epochs
self.initial_lr = initial_lr
def __call__(self, epoch_idx):
if epoch_idx < self.epochs * 0.25:
return self.initial_lr
elif epoch_idx < self.epochs * 0.50:
return self.initial_lr * 0.2
elif epoch_idx < self.epochs * 0.75:
return self.initial_lr * 0.04
return self.initial_lr * 0.008
callbacks = [LearningRateScheduler(schedule=Schedule(epochs, lr)),
ModelCheckpoint("/home/apptech/pixeleye_test/apps/arup/result/weights.{epoch:02d}-{val_loss:.2f}.hdf5",
monitor="val_loss",
verbose=1,
save_best_only=True,
mode="auto")
]
logging.debug("Running training...")
datagen = ImageDataGenerator(
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=True,
preprocessing_function=get_random_eraser(v_l=0, v_h=255))
# training_generator = MixupGenerator(X_train, [y_train_a, y_train_g], batch_size=32, alpha=0.2,
# datagen=datagen)()
hist = model.fit_generator(generator=generate_arrays_from_file(train_data, batch_size, datagen),
steps_per_epoch=len(train_data) // batch_size,
validation_data=generate_arrays_from_file(test_data, batch_size, datagen),
validation_steps=len(test_data) // batch_size,
epochs=epochs, verbose=1,
callbacks=callbacks)
fixed.
datagen.flow() returns a generator. To obtain the batch, use:
X_batch, y_batch = datagen.flow(X_train, y_train, batch_size=9).next()