Currently I use the following code:
callbacks = [
EarlyStopping(monitor='val_loss', patience=2, verbose=0),
ModelCheckpoint(kfold_weights_path, monitor='val_loss', save_best_only=True, verbose=0),
]
model.fit(X_train.astype('float32'), Y_train, batch_size=batch_size, nb_epoch=nb_epoch,
shuffle=True, verbose=1, validation_data=(X_valid, Y_valid),
callbacks=callbacks)
It tells Keras to stop training when loss didn't improve for 2 epochs. But I want to stop training after loss became smaller than some constant "THR":
if val_loss < THR:
break
I've seen in documentation there are possibility to make your own callback:
http://keras.io/callbacks/
But nothing found how to stop training process. I need an advice.
I found the answer. I looked into Keras sources and find out code for EarlyStopping. I made my own callback, based on it:
class EarlyStoppingByLossVal(Callback):
def __init__(self, monitor='val_loss', value=0.00001, verbose=0):
super(Callback, self).__init__()
self.monitor = monitor
self.value = value
self.verbose = verbose
def on_epoch_end(self, epoch, logs={}):
current = logs.get(self.monitor)
if current is None:
warnings.warn("Early stopping requires %s available!" % self.monitor, RuntimeWarning)
if current < self.value:
if self.verbose > 0:
print("Epoch %05d: early stopping THR" % epoch)
self.model.stop_training = True
And usage:
callbacks = [
EarlyStoppingByLossVal(monitor='val_loss', value=0.00001, verbose=1),
# EarlyStopping(monitor='val_loss', patience=2, verbose=0),
ModelCheckpoint(kfold_weights_path, monitor='val_loss', save_best_only=True, verbose=0),
]
model.fit(X_train.astype('float32'), Y_train, batch_size=batch_size, nb_epoch=nb_epoch,
shuffle=True, verbose=1, validation_data=(X_valid, Y_valid),
callbacks=callbacks)
The keras.callbacks.EarlyStopping callback does have a min_delta argument. From Keras documentation:
min_delta: minimum change in the monitored quantity to qualify as an improvement, i.e. an absolute change of less than min_delta, will count as no improvement.
One solution is to call model.fit(nb_epoch=1, ...) inside a for loop, then you can put a break statement inside the for loop and do whatever other custom control flow you want.
I solved the same problem using custom callback.
In the following custom callback code assign THR with the value at which you want to stop training and add the callback to your model.
from keras.callbacks import Callback
class stopAtLossValue(Callback):
def on_batch_end(self, batch, logs={}):
THR = 0.03 #Assign THR with the value at which you want to stop training.
if logs.get('loss') <= THR:
self.model.stop_training = True
While I was taking the TensorFlow in practice specialization, I learned a very elegant technique. Just little modified from the accepted answer.
Let's set the example with our favorite MNIST data.
import tensorflow as tf
class new_callback(tf.keras.callbacks.Callback):
def epoch_end(self, epoch, logs={}):
if(logs.get('accuracy')> 0.90): # select the accuracy
print("\n !!! 90% accuracy, no further training !!!")
self.model.stop_training = True
mnist = tf.keras.datasets.mnist
(x_train, y_train),(x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0 #normalize
callbacks = new_callback()
# model = tf.keras.models.Sequential([# define your model here])
model.compile(optimizer=tf.optimizers.Adam(),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(x_train, y_train, epochs=10, callbacks=[callbacks])
So, here I set the metrics=['accuracy'], and thus in the callback class the condition is set to 'accuracy'> 0.90.
You can choose any metric and monitor the training like this example. Most importantly you can set different conditions for different metric and use them simultaneously.
Hopefully this helps!
For me the model would only stop training if I added a return statement after setting the stop_training parameter to True because I was calling after self.model.evaluate. So either make sure to put stop_training = True at the end of the function or add a return statement.
def on_epoch_end(self, batch, logs):
self.epoch += 1
self.stoppingCounter += 1
print('\nstopping counter \n',self.stoppingCounter)
#Stop training if there hasn't been any improvement in 'Patience' epochs
if self.stoppingCounter >= self.patience:
self.model.stop_training = True
return
# Test on additional set if there is one
if self.testingOnAdditionalSet:
evaluation = self.model.evaluate(self.val2X, self.val2Y, verbose=0)
self.validationLoss2.append(evaluation[0])
self.validationAcc2.append(evaluation[1])enter code here
If you're using a custom training loop, you can use a collections.deque, which is a "rolling" list which can be appended, and the left-hand items gets popped out when the list is longer than maxlen. Here's the line:
loss_history = deque(maxlen=early_stopping + 1)
for epoch in range(epochs):
fit(epoch)
loss_history.append(test_loss.result().numpy())
if len(loss_history) > early_stopping and loss_history.popleft() < min(loss_history)
break
Here's a full example:
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow_datasets as tfds
import tensorflow as tf
from tensorflow.keras.layers import Dense
from collections import deque
data, info = tfds.load('iris', split='train', as_supervised=True, with_info=True)
data = data.map(lambda x, y: (tf.cast(x, tf.int32), y))
train_dataset = data.take(120).batch(4)
test_dataset = data.skip(120).take(30).batch(4)
model = tf.keras.models.Sequential([
Dense(8, activation='relu'),
Dense(16, activation='relu'),
Dense(info.features['label'].num_classes)])
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
train_loss = tf.keras.metrics.Mean()
test_loss = tf.keras.metrics.Mean()
train_acc = tf.keras.metrics.SparseCategoricalAccuracy()
test_acc = tf.keras.metrics.SparseCategoricalAccuracy()
opt = tf.keras.optimizers.Adam(learning_rate=1e-3)
#tf.function
def train_step(inputs, labels):
with tf.GradientTape() as tape:
logits = model(inputs, training=True)
loss = loss_object(labels, logits)
gradients = tape.gradient(loss, model.trainable_variables)
opt.apply_gradients(zip(gradients, model.trainable_variables))
train_loss(loss)
train_acc(labels, logits)
#tf.function
def test_step(inputs, labels):
logits = model(inputs, training=False)
loss = loss_object(labels, logits)
test_loss(loss)
test_acc(labels, logits)
def fit(epoch):
template = 'Epoch {:>2} Train Loss {:.3f} Test Loss {:.3f} ' \
'Train Acc {:.2f} Test Acc {:.2f}'
train_loss.reset_states()
test_loss.reset_states()
train_acc.reset_states()
test_acc.reset_states()
for X_train, y_train in train_dataset:
train_step(X_train, y_train)
for X_test, y_test in test_dataset:
test_step(X_test, y_test)
print(template.format(
epoch + 1,
train_loss.result(),
test_loss.result(),
train_acc.result(),
test_acc.result()
))
def main(epochs=50, early_stopping=10):
loss_history = deque(maxlen=early_stopping + 1)
for epoch in range(epochs):
fit(epoch)
loss_history.append(test_loss.result().numpy())
if len(loss_history) > early_stopping and loss_history.popleft() < min(loss_history):
print(f'\nEarly stopping. No validation loss '
f'improvement in {early_stopping} epochs.')
break
if __name__ == '__main__':
main(epochs=250, early_stopping=10)
Epoch 1 Train Loss 1.730 Test Loss 1.449 Train Acc 0.33 Test Acc 0.33
Epoch 2 Train Loss 1.405 Test Loss 1.220 Train Acc 0.33 Test Acc 0.33
Epoch 3 Train Loss 1.173 Test Loss 1.054 Train Acc 0.33 Test Acc 0.33
Epoch 4 Train Loss 1.006 Test Loss 0.935 Train Acc 0.33 Test Acc 0.33
Epoch 5 Train Loss 0.885 Test Loss 0.846 Train Acc 0.33 Test Acc 0.33
...
Epoch 89 Train Loss 0.196 Test Loss 0.240 Train Acc 0.89 Test Acc 0.87
Epoch 90 Train Loss 0.195 Test Loss 0.239 Train Acc 0.89 Test Acc 0.87
Epoch 91 Train Loss 0.195 Test Loss 0.239 Train Acc 0.89 Test Acc 0.87
Epoch 92 Train Loss 0.194 Test Loss 0.239 Train Acc 0.90 Test Acc 0.87
Early stopping. No validation loss improvement in 10 epochs.
Related
I have experimented with training a simple tensorflow model using two scenarios: passing my model to my training loop (and to the subfunctions which are called from training loop), versus not passing my model to the training loop. The two cases result in different results. When passing my model to the training functions, the model is trained properly. But in the second scenario, something is wrong because the model is apparently not trained. I am baffled, and I wonder if it's a scope thing.
To be more specific, my setup involves dynamically creating a new model of larger size (adding some layers at each iteration of a for loop), and then training the resulting model. As stated before, I train the model in two scenarios: passing the model to training subfunctions and not doing so, and I obtain different results depending on which one I do. I verify this by giving the model a test sample (class 0 MNIST images) and checking if the correct classification is output. The models trained by passing the model as an argument is trained correctly, but, if I do not do this, then only the first model created by the for loop is correctly trained, as verified by incorrect class predictions. Can this be explained?
The code below is for training by not passing model as an argument.
import tensorflow as tf
physical_devices = tf.config.experimental.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], True)
from tensorflow.keras.optimizers import Adam
from tensorflow import keras
from tensorflow.keras import layers
import numpy as np
import time
epochs = 200
input_shape = (28,28,1)
num_classes=10
batch_size = 64
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
x_val = x_train[-10000:]
y_val = y_train[-10000:]
x_train = x_train[:-10000]
y_train = y_train[:-10000]
x_train = np.expand_dims(x_train, -1)
x_val = np.expand_dims(x_val, -1)
x_test = np.expand_dims(x_test, -1)
x_train = x_train.astype("float32")
x_test = x_test.astype("float32")
x_val = x_val.astype("float32")
y_test_sorted_args_0=np.where(y_test == 0)
x_test_0=x_test[y_test_sorted_args_0]
y_test_0=np.full( (x_test_0.shape)[0],0)
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(batch_size)
val_dataset = tf.data.Dataset.from_tensor_slices((x_val, y_val))
val_dataset = val_dataset.batch(batch_size)
acc_metric = keras.metrics.SparseCategoricalAccuracy()
val_acc_metric = keras.metrics.SparseCategoricalAccuracy()
optimizer = keras.optimizers.SGD(learning_rate=1e-3)
loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=False)
#tf.function
def train_step(x, y):
with tf.GradientTape() as tape:
mod_output = model(x, training=True)
loss_value = loss_fn(y, mod_output)
grads = tape.gradient(loss_value, model.trainable_weights)
optimizer.apply_gradients(zip(grads, model.trainable_weights))
acc_metric.update_state(y, mod_output)
return loss_value
#tf.function
def test_step(x, y):
val = model(x, training=False)
acc_metric.update_state(y, val)
def train( epochs):
for epoch in range(epochs):
print("\nStart of epoch %d" % (epoch,))
start_time = time.time()
# Iterate over the batches of the dataset.
for step, (x_batch_train, y_batch_train) in enumerate(train_dataset):
loss_value = train_step( x_batch_train, y_batch_train)
# Log every 200 batches.
if step % 200 == 0:
print(
"Training loss (for one batch) at step %d: %.4f"
% (step, float(loss_value))
)
print("Seen so far: %d samples" % ((step + 1) * batch_size))
# Display metrics at the end of each epoch.
train_acc = acc_metric.result()
print("Training acc over epoch: %.4f" % (float(train_acc),))
# Reset training metrics at the end of each epoch
acc_metric.reset_states()
# Run a validation loop at the end of each epoch.
for x_batch_val, y_batch_val in val_dataset:
test_step(x_batch_val, y_batch_val)
val_acc = acc_metric.result()
val_acc_metric.reset_states()
print("Validation acc: %.4f" % (float(val_acc),))
print("Time taken: %.2fs" % (time.time() - start_time))
max_hidden=7
for num_hidden_layers in range(1,max_hidden,3):
model1 = keras.Sequential(
[
keras.Input(shape=input_shape),
layers.Flatten(),
]
)
for i in range(1, num_hidden_layers+1):
model1.add(layers.Dense(150, activation="relu"))
model1.add(layers.Dense(num_classes, activation="softmax"))
model=model1
train(epochs)
#verify that the model is properly trained by checking that the model correclty predicts images from class 0.
#The more class 0 predictions we have, the better.
for sample_index in range(0,10):
x_sample=x_test_0[sample_index,:,:,:]
x_sample=np.expand_dims(x_sample, 0)
print(tf.math.argmax(model(x_sample),axis=1))
time.sleep(1)
I want batch normalization running statistics (mean and variance) to converge in the end of training, which requires to increase batch norm momentum from some initial value to 1.0. I managed to change momentum using a custom Callback, but it works only if my model is compiled in eager mode. Toy example (it sets momentum=1.0 after epoch zero due to which moving_mean should stop updating):
import tensorflow as tf # version 2.3.1
import tensorflow_datasets as tfds
ds_train, ds_test = tfds.load("mnist", split=["train", "test"], shuffle_files=True, as_supervised=True)
ds_train = ds_train.batch(128)
ds_test = ds_test.batch(128)
model = tf.keras.models.Sequential(
[
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(128),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.ReLU(),
tf.keras.layers.Dense(10),
]
)
model.compile(
optimizer=tf.keras.optimizers.Adam(0.001),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()],
# run_eagerly=True,
)
class BatchNormMomentumCallback(tf.keras.callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
last_bn_layer = None
for layer in self.model.layers:
if isinstance(layer, tf.keras.layers.BatchNormalization):
if epoch == 0:
layer.momentum = 0.99
else:
layer.momentum = 1.0
last_bn_layer = layer
if last_bn_layer:
tf.print("Momentum=" + str(last_bn_layer.moving_mean[-1].numpy())) # Should not change after epoch 1
batchnorm_decay = BatchNormMomentumCallback()
model.fit(ds_train, epochs=6, validation_data=ds_test, callbacks=[batchnorm_decay], verbose=0)
Output (get this when run_eagerly=False)
Momentum=0.0
Momentum=-102.20184
Momentum=-106.04614
Momentum=-116.36204
Momentum=-129.995
Momentum=-123.70443
Expected output (get it when run_eagerly=True)
Momentum=0.0
Momentum=-5.9038606
Momentum=-5.9038606
Momentum=-5.9038606
Momentum=-5.9038606
Momentum=-5.9038606
I guess this happens because in graph mode TF compiles the model as graph with a momentum defined as 0.99, and the uses this value in the graph (so momentum is not updated by BatchNormMomentumCallback).
Question:
Is there a way to update that compiled momentum variable inside the graph while training? I want to update momentum not in eager mode (i.e. using run_eagerly=False) because training efficiency is important.
I would recommend simply using a custom training loop for your use case. You will have all the flexibility you need:
import tensorflow as tf # version 2.3.1
import tensorflow_datasets as tfds
ds_train, ds_test = tfds.load("mnist", split=["train", "test"], shuffle_files=True, as_supervised=True)
ds_train = ds_train.batch(128)
ds_test = ds_test.batch(128)
model = tf.keras.models.Sequential(
[
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(128),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.ReLU(),
tf.keras.layers.Dense(10),
]
)
optimizer = tf.keras.optimizers.Adam(0.001)
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
train_acc_metric = tf.keras.metrics.SparseCategoricalAccuracy()
batch_norm_layer = model.layers[2]
#tf.function
def train_step(epoch, model, batch):
if epoch == 0:
batch_norm_layer.momentum = 0.99
else:
batch_norm_layer.momentum = 1.0
with tf.GradientTape() as tape:
x_batch_train, y_batch_train = batch
logits = model(x_batch_train, training=True)
loss_value = loss_fn(y_batch_train, logits)
train_acc_metric.update_state(y_batch_train, logits)
grads = tape.gradient(loss_value, model.trainable_weights)
optimizer.apply_gradients(zip(grads, model.trainable_weights))
epochs = 6
for epoch in range(epochs):
tf.print("\nStart of epoch %d" % (epoch,))
tf.print("Momentum = ", batch_norm_layer.moving_mean[-1], summarize=-1)
for batch in ds_train:
train_step(epoch, model, batch)
train_acc = train_acc_metric.result()
tf.print("Training acc over epoch: %.4f" % (float(train_acc),))
train_acc_metric.reset_states()
Start of epoch 0
Momentum = 0
Training acc over epoch: 0.9158
Start of epoch 1
Momentum = -20.2749767
Training acc over epoch: 0.9634
Start of epoch 2
Momentum = -20.2749767
Training acc over epoch: 0.9755
Start of epoch 3
Momentum = -20.2749767
Training acc over epoch: 0.9826
Start of epoch 4
Momentum = -20.2749767
Training acc over epoch: 0.9876
Start of epoch 5
Momentum = -20.2749767
Training acc over epoch: 0.9915
A simple test shows that the function with the tf.function decorator performs way better:
import tensorflow as tf # version 2.3.1
import tensorflow_datasets as tfds
import timeit
ds_train, ds_test = tfds.load("mnist", split=["train", "test"], shuffle_files=True, as_supervised=True)
ds_train = ds_train.batch(128)
ds_test = ds_test.batch(128)
model = tf.keras.models.Sequential(
[
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(128),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.ReLU(),
tf.keras.layers.Dense(10),
]
)
optimizer = tf.keras.optimizers.Adam(0.001)
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
train_acc_metric = tf.keras.metrics.SparseCategoricalAccuracy()
batch_norm_layer = model.layers[2]
#tf.function
def train_step(epoch, model, batch):
if epoch == 0:
batch_norm_layer.momentum = 0.99
else:
batch_norm_layer.momentum = 1.0
with tf.GradientTape() as tape:
x_batch_train, y_batch_train = batch
logits = model(x_batch_train, training=True)
loss_value = loss_fn(y_batch_train, logits)
train_acc_metric.update_state(y_batch_train, logits)
grads = tape.gradient(loss_value, model.trainable_weights)
optimizer.apply_gradients(zip(grads, model.trainable_weights))
def train_step_without_tffunction(epoch, model, batch):
if epoch == 0:
batch_norm_layer.momentum = 0.99
else:
batch_norm_layer.momentum = 1.0
with tf.GradientTape() as tape:
x_batch_train, y_batch_train = batch
logits = model(x_batch_train, training=True)
loss_value = loss_fn(y_batch_train, logits)
train_acc_metric.update_state(y_batch_train, logits)
grads = tape.gradient(loss_value, model.trainable_weights)
optimizer.apply_gradients(zip(grads, model.trainable_weights))
epochs = 6
for epoch in range(epochs):
tf.print("\nStart of epoch %d" % (epoch,))
tf.print("Momentum = ", batch_norm_layer.moving_mean[-1], summarize=-1)
test = True
for batch in ds_train:
train_step(epoch, model, batch)
if test:
tf.print("TF function:", timeit.timeit(lambda: train_step(epoch, model, batch), number=10))
tf.print("Eager function:", timeit.timeit(lambda: train_step_without_tffunction(epoch, model, batch), number=10))
test = False
train_acc = train_acc_metric.result()
tf.print("Training acc over epoch: %.4f" % (float(train_acc),))
train_acc_metric.reset_states()
Start of epoch 0
Momentum = 0
TF function: 0.02285163299893611
Eager function: 0.11109527599910507
Training acc over epoch: 0.9229
Start of epoch 1
Momentum = -88.1852188
TF function: 0.024091466999379918
Eager function: 0.1109461480009486
Training acc over epoch: 0.9639
Start of epoch 2
Momentum = -88.1852188
TF function: 0.02331122400210006
Eager function: 0.11751473100230214
Training acc over epoch: 0.9756
Start of epoch 3
Momentum = -88.1852188
TF function: 0.02656845700039412
Eager function: 0.1121610670015798
Training acc over epoch: 0.9830
Start of epoch 4
Momentum = -88.1852188
TF function: 0.02821972700257902
Eager function: 0.15709391699783737
Training acc over epoch: 0.9877
Start of epoch 5
Momentum = -88.1852188
TF function: 0.02441513300072984
Eager function: 0.10921925399816246
Training acc over epoch: 0.9917
Another option is to declare the momentum as a variable
momentum = tf.Variable(0.99, trainable=False)
# pass into the BN layer
tf.keras.layers.BatchNormalization(momentum=momentum)
Then you can have a callback that updates the momentum
class BNMomentumUpdate(tf.keras.callbacks.Callback):
def __init__(self, momentum):
super().__init__()
self.momentum = momentum
def on_epoch_end(self, epoch, logs=None):
if epoch > 0:
self.momentum.assign(1.)
I want to binary classify breast cancer histopathological images from the BreakHis dataset (https://www.kaggle.com/ambarish/breakhis) using transfer learning and the Inception Resnet v2. The goal is to freeze all layers and train the fully connected layer by adding two neurons to the model. In particular, initially I want to consider the images related to the magnificant factor 40X (Benign: 625, Malignant: 1370). Here is a summary of what I do:
I read the images and resize them to 150x150
I partition the dataset into training, validation and test set
I load the pre-trained network Inception Resnet v2
I freeze all the layers I add the two neurons for binary
classification (1 = "benign", 0 = "malignant")
I compile the model using as activation function the Adam method
I carry out the training
I make the prediction
I calculate the accuracy
This is the code:
data = dataset[dataset["Magnificant"]=="40X"]
def preprocessing(dataset, img_size):
# images
X = []
# labels
y = []
i = 0
for image in list(dataset["Path"]):
# Ridimensiono e leggo le immagini
X.append(cv2.resize(cv2.imread(image, cv2.IMREAD_COLOR),
(img_size, img_size), interpolation=cv2.INTER_CUBIC))
basename = os.path.basename(image)
# Get labels
if dataset.loc[i][2] == "benign":
y.append(1)
else:
y.append(0)
i = i+1
return X, y
X, y = preprocessing(data, 150)
X = np.array(X)
y = np.array(y)
# Splitting
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, stratify = y_40, shuffle=True, random_state=1)
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.25, stratify = y_train, shuffle=True, random_state=1)
conv_base = InceptionResNetV2(weights='imagenet', include_top=False, input_shape=[150, 150, 3])
# Freezing
for layer in conv_base.layers:
layer.trainable = False
model = models.Sequential()
model.add(conv_base)
model.add(layers.Flatten())
model.add(layers.Dropout(0.5))
model.add(layers.Dense(1, activation='sigmoid'))
opt = tf.keras.optimizers.Adam(learning_rate=0.0002)
loss = tf.keras.losses.BinaryCrossentropy(from_logits=False)
model.compile(loss=loss, optimizer=opt, metrics = ["accuracy", tf.metrics.AUC()])
batch_size = 32
train_datagen = ImageDataGenerator(rescale=1./255)
val_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow(X_train, y_train, batch_size=batch_size)
val_generator = val_datagen.flow(X_val, y_val, batch_size=batch_size)
callback = tf.keras.callbacks.EarlyStopping(monitor='loss', patience=3)
ntrain =len(X_train)
nval = len(X_val)
len(y_train)
epochs = 70
history = model.fit_generator(train_generator,
steps_per_epoch=ntrain // batch_size,
epochs=epochs,
validation_data=val_generator,
validation_steps=nval // batch_size, callbacks=[callback])
This is the output of the training at the last epoch:
Epoch 70/70
32/32 [==============================] - 3s 84ms/step - loss: 0.0499 - accuracy: 0.9903 - auc_5: 0.9996 - val_loss: 0.5661 - val_accuracy: 0.8250 - val_auc_5: 0.8521
I make the prediction:
test_datagen = ImageDataGenerator(rescale=1./255)
x = X_test
y_pred = model.predict(test_datagen.flow(x))
y_p = []
for i in range(len(y_pred)):
if y_pred[i] > 0.5:
y_p.append(1)
else:
y_p.append(0)
I calculate the accuracy:
from sklearn.metrics import accuracy_score
accuracy = accuracy_score(y_test, y_p)
print(accuracy)
This is the accuracy value I get: 0.5459098497495827
Why do I get such low accuracy, I have done several tests but I always get similar results? (HELP ME)
When doing transfer learning, especially with frozen weights, it is extremely important to do the same pre-processing as was used when the network was originally trained.
For the InceptionResNetV2 network the pre-processing type is "tf" in the tensorflow / keras libraries, which corresponds to dividing by 127 then subtracting 1 for the imagenet weights. You are instead dividing by 255.
Fortunately you do not have to wade through the code to find out what function was used, as they are exposed in the API. Simply do
train_datagen = ImageDataGenerator(preprocessing_function=tf.keras.applications.inception_resnet_v2.preprocess_input)
and so on for validation and test
I am trying to run the demo code from official tensorflow website
I am attaching the full code (copied and arranged) here for ease
import tensorflow as tf
# print("1")
from tensorflow import keras
from tensorflow.keras import layers
import numpy as np
import time
import os
# print("2")
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
# #tf.function
def train_step(x, y):
with tf.GradientTape() as tape:
logits = model(x, training=True)
loss_value = loss_fn(y, logits)
grads = tape.gradient(loss_value, model.trainable_weights)
optimizer.apply_gradients(zip(grads, model.trainable_weights))
train_acc_metric.update_state(y, logits)
return loss_value
# #tf.function
def test_step(x, y):
val_logits = model(x, training=False)
val_acc_metric.update_state(y, val_logits)
inputs = keras.Input(shape=(784,), name="digits")
x1 = layers.Dense(64, activation="relu")(inputs)
x2 = layers.Dense(64, activation="relu")(x1)
outputs = layers.Dense(10, name="predictions")(x2)
model = keras.Model(inputs=inputs, outputs=outputs)
# Instantiate an optimizer.
optimizer = keras.optimizers.SGD(learning_rate=1e-3)
# Instantiate a loss function.
loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
train_acc_metric = keras.metrics.SparseCategoricalAccuracy()
val_acc_metric = keras.metrics.SparseCategoricalAccuracy()
# Prepare the training dataset.
batch_size = 64
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
x_train = np.reshape(x_train, (-1, 784))
x_test = np.reshape(x_test, (-1, 784))
# Reserve 10,000 samples for validation.
x_val = x_train[-10000:]
y_val = y_train[-10000:]
x_train = x_train[:-10000]
y_train = y_train[:-10000]
# Prepare the training dataset.
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_dataset = train_dataset.shuffle(buffer_size=1024).batch(batch_size)
# Prepare the validation dataset.
val_dataset = tf.data.Dataset.from_tensor_slices((x_val, y_val))
val_dataset = val_dataset.batch(batch_size)
epochs = 2
for epoch in range(epochs):
print("\nStart of epoch %d" % (epoch,))
start_time = time.time()
# Iterate over the batches of the dataset.
for step, (x_batch_train, y_batch_train) in enumerate(train_dataset):
loss_value = train_step(x_batch_train, y_batch_train)
# Log every 200 batches.
if step % 200 == 0:
print(
"Training loss (for one batch) at step %d: %.4f"
% (step, float(loss_value))
)
print("Seen so far: %d samples" % ((step + 1) * 64))
# Display metrics at the end of each epoch.
train_acc = train_acc_metric.result()
print("Training acc over epoch: %.4f" % (float(train_acc),))
# Reset training metrics at the end of each epoch
train_acc_metric.reset_states()
# Run a validation loop at the end of each epoch.
for x_batch_val, y_batch_val in val_dataset:
test_step(x_batch_val, y_batch_val)
val_acc = val_acc_metric.result()
val_acc_metric.reset_states()
print("Validation acc: %.4f" % (float(val_acc),))
print("Time taken: %.2fs" % (time.time() - start_time))
print("end")
Without any reason, this code enters Segmentation Fault in Tensorflow 2.3.1 right at the beginning
>python dummy.py
2021-03-11 17:45:52.231509: I tensorflow/stream_executor/platform/default/dso_loader.cc:48] Successfully opened dynamic library libcudart.so.10.1
Segmentation fault (core dumped)
Interestingly if I put some random print statements at the very start(those print("1") etc statements, the code will execute till the end and suffer segmentation fault at the end(redundant output not shown)
Start of epoch 1
Training loss (for one batch) at step 0: 1.0215
Seen so far: 64 samples
Training loss (for one batch) at step 200: 0.9116
Seen so far: 12864 samples
Training loss (for one batch) at step 400: 0.4894
Seen so far: 25664 samples
Training loss (for one batch) at step 600: 0.5636
Seen so far: 38464 samples
Training acc over epoch: 0.8416
Validation acc: 0.8296
Time taken: 3.16s
end
Segmentation fault (core dumped)
Another observation is, if I uncomment the #tf.function on top of my trainStep and testStep functions, the code enters into segfault again but after it prints
Start of epoch 0
Can someone explain what is going wrong with my Tensorflow package?
It was due to older version of Ubuntu. I was using 14. After upgrading to 18, the issue got resolved
I am trying to compare the results of my own training loop with the one given by Keras Fit. I provide a code snippet below (RUN_TYPE = 1 runs own training loop, else run Keras fit) . As you can see:
I seed the rnds so my generated training set is exactly the same (checked).
I use the same function to create the DNN architecture.
I use the same hyperparameter values (learning rate, optimised parameters, batch_size, etc)
I specifically use shuffle=False in the Keras.fit function to disable any batch shuffling.
When I set batch_size to the size of the training sample, I get the same loss between the two versions. As soon as batch_size < training sample size, i.e. an epoch takes multiple steps, my results diverge. Having made the algo deterministic I don't understand where the discrepancy comes from.
Any tips?
RUN_TYPE = 2
import numpy as np
import time
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.callbacks import LambdaCallback
import tensorflow.keras.backend as K
np.random.seed(1)
tf.random.set_seed(2)
#tf.function
def training_step(x, y, model, opt):
print(x)
print(y)
with tf.GradientTape() as tape:
predictions = model(x, training=True)
mseLoss = keras.losses.MeanSquaredError()
loss = mseLoss(y,predictions)
grads = tape.gradient(loss, model.trainable_variables)
opt.apply_gradients(zip(grads, model.trainable_variables))
return loss
#HyperParameters Initialisation
#learnging_rate, beta_1, beta_2, blablabla.
if RUN_TYPE == 1:
X_train, y_train = createDataSet(n)
train_dataset = tf.data.Dataset.from_tensor_slices((X_train, y_train))
train_dataset = train_dataset.batch(batch_size)
# An optimizer for updating the trainable variables
optimizer = keras.optimizers.Adam(learning_rate=learning_rate, beta_1=beta_1, beta_2=beta_2)
# Create an instance of the model
model = createModel(X_train.shape[1:])
# Train the model
for epoch in range(epochs):
print("\nStart of epoch %d" % (epoch,))
for step, (x_batch_train, y_batch_train) in enumerate(train_dataset):
loss = training_step(x_batch_train, y_batch_train, model, optimizer)
print("Step " + str(step) + " loss = " + str(loss.numpy()))
else:
X, Y = createDataSet(n)
model = createModel(X.shape[1:])
optimizer = keras.optimizers.Adam(learning_rate=learning_rate, beta_1=beta_1, beta_2=beta_2)
model.compile(loss="mse", optimizer=optimizer)
history = model.fit(X, Y, epochs=epochs, batch_size=batch_size, verbose=0, shuffle=False) #