Keras 2.x killed off a bunch of useful metrics that I need to use, so I copied the functions from the old metrics.py file into my code, then included them as follows.
def precision(y_true, y_pred): #taken from old keras source code
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
def recall(y_true, y_pred): #taken from old keras source code
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
...
model.compile(loss='categorical_crossentropy', optimizer='adam',
metrics=['accuracy', precision, recall])
and this results in
ValueError: Unknown metric function:precision
What am I doing wrong? I can't see anything I'm doing wrong according to Keras documentation.
edit:
Here is the full Traceback:
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "/Library/Python/2.7/site-packages/keras/models.py", line 274, in
load_model
sample_weight_mode=sample_weight_mode)
File "/Library/Python/2.7/site-packages/keras/models.py", line 824, in
compile
**kwargs)
File "/Library/Python/2.7/site-packages/keras/engine/training.py", line
934, in compile
handle_metrics(output_metrics)
File "/Library/Python/2.7/site-packages/keras/engine/training.py", line
901, in handle_metrics
metric_fn = metrics_module.get(metric)
File "/Library/Python/2.7/site-packages/keras/metrics.py", line 75, in get
return deserialize(str(identifier))
File "/Library/Python/2.7/site-packages/keras/metrics.py", line 67, in
deserialize
printable_module_name='metric function')
File "/Library/Python/2.7/site-packages/keras/utils/generic_utils.py",
line 164, in deserialize_keras_object
':' + function_name)
ValueError: Unknown metric function:precision
<FATAL> : Failed to load Keras model from file:
model.h5
***> abort program execution
Traceback (most recent call last):
File "classification.py", line 84, in <module>
'H:!V:FilenameModel=model.h5:NumEpochs=20:BatchSize=32')
#:VarTransform=D,G
TypeError: none of the 3 overloaded methods succeeded. Full details:
TMVA::MethodBase* TMVA::Factory::BookMethod(TMVA::DataLoader* loader,
TString theMethodName, TString methodTitle, TString theOption = "") =>
could not convert argument 2
TMVA::MethodBase* TMVA::Factory::BookMethod(TMVA::DataLoader* loader,
TMVA::Types::EMVA theMethod, TString methodTitle, TString theOption = "") =>
FATAL error (C++ exception of type runtime_error)
TMVA::MethodBase* TMVA::Factory::BookMethod(TMVA::DataLoader*,
TMVA::Types::EMVA, TString, TString, TMVA::Types::EMVA, TString) =>
takes at least 6 arguments (4 given)
From the traceback it seems that the problem occurs when you try to load the saved model:
Traceback (most recent call last):
File "<string>", line 1, in <module>
File "/Library/Python/2.7/site-packages/keras/models.py", line 274, in
load_model
sample_weight_mode=sample_weight_mode)
...
ValueError: Unknown metric function:precision
<FATAL> : Failed to load Keras model from file:
model.h5
Take a look at this issue: https://github.com/keras-team/keras/issues/10104
You need to add your custom objects when loading the model. For example:
dependencies = {
'auc_roc': auc_roc
}
model = keras.models.load_model(self.output_directory + 'best_model.hdf5', custom_objects=dependencies)
My suggestion would be to implement your metrics in Keras callback.
Because:
It can achieve the same thing as metrics does.
It can also provide you model saving strategy.
class Checkpoint(keras.callbacks.Callback):
def __init__(self, test_data, filename):
self.test_data = test_data
self.filename = filename
def on_train_begin(self, logs=None):
self.pre = [0.]
self.rec = [0.]
print('Test on %s begins' % self.filename)
def on_train_end(self, logs={}):
print('Best Precison: %s' % max(self.pre))
print('Best Recall: %s' % max(self.rec))
return
def on_epoch_end(self, epoch, logs={}):
x, y = self.test_data
self.pre.append(precision(x, y))
self.rec.append(recall(x, y))
# print your precision or recall as you want
print(...)
# Save your model when a better trained model was found
if pre > max(self.pre):
self.model.save(self.filename, overwrite=True)
print('Higher precision found. Save as %s' % self.filename)
return
after that, you can add your callback to your:
checkpoint = Checkpoint((x_test, y_test), 'precison.h5')
model.compile(loss='categorical_crossentropy', optimizer='adam', callbacks=[checkpoint])
I tested your code in Python 3.6.5, TensorFlow==1.9 and Keras==2.2.2 and it worked. I think the error could be due to Python 2 usage.
import numpy as np
import tensorflow as tf
import keras
import keras.backend as K
from keras.layers import Dense
from keras.models import Sequential, Input, Model
from sklearn import datasets
print(f"TF version: {tf.__version__}, Keras version: {keras.__version__}\n")
# dummy dataset
iris = datasets.load_iris()
x, y_ = iris.data, iris.target
def one_hot(v): return np.eye(len(np.unique(v)))[v]
y = one_hot(y_)
# model
inp = Input(shape=(4,))
dense = Dense(8, activation='relu')(inp)
dense = Dense(16, activation='relu')(dense)
dense = Dense(3, activation='softmax')(dense)
model = Model(inputs=inp, outputs=dense)
# custom metrics
def precision(y_true, y_pred): #taken from old keras source code
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
def recall(y_true, y_pred): #taken from old keras source code
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
# training
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy', precision, recall])
model.fit(x=x, y=y, batch_size=8, epochs=15)
Output:
TF version: 1.9.0, Keras version: 2.2.2
Epoch 1/15
150/150 [==============================] - 0s 2ms/step - loss: 1.2098 - acc: 0.2600 - precision: 0.0000e+00 - recall: 0.0000e+00
Epoch 2/15
150/150 [==============================] - 0s 135us/step - loss: 1.1036 - acc: 0.4267 - precision: 0.0000e+00 - recall: 0.0000e+00
Epoch 3/15
150/150 [==============================] - 0s 132us/step - loss: 1.0391 - acc: 0.5733 - precision: 0.0000e+00 - recall: 0.0000e+00
Epoch 4/15
150/150 [==============================] - 0s 133us/step - loss: 0.9924 - acc: 0.6533 - precision: 0.0000e+00 - recall: 0.0000e+00
Epoch 5/15
150/150 [==============================] - 0s 108us/step - loss: 0.9379 - acc: 0.6667 - precision: 0.0000e+00 - recall: 0.0000e+00
Epoch 6/15
150/150 [==============================] - 0s 134us/step - loss: 0.8802 - acc: 0.6667 - precision: 0.0533 - recall: 0.0067
Epoch 7/15
150/150 [==============================] - 0s 167us/step - loss: 0.8297 - acc: 0.7867 - precision: 0.4133 - recall: 0.0800
Epoch 8/15
150/150 [==============================] - 0s 138us/step - loss: 0.7743 - acc: 0.8200 - precision: 0.9467 - recall: 0.3667
Epoch 9/15
150/150 [==============================] - 0s 161us/step - loss: 0.7232 - acc: 0.7467 - precision: 1.0000 - recall: 0.5667
Epoch 10/15
150/150 [==============================] - 0s 134us/step - loss: 0.6751 - acc: 0.8000 - precision: 0.9733 - recall: 0.6333
Epoch 11/15
150/150 [==============================] - 0s 134us/step - loss: 0.6310 - acc: 0.8867 - precision: 0.9924 - recall: 0.6400
Epoch 12/15
150/150 [==============================] - 0s 131us/step - loss: 0.5844 - acc: 0.8867 - precision: 0.9759 - recall: 0.6600
Epoch 13/15
150/150 [==============================] - 0s 111us/step - loss: 0.5511 - acc: 0.9133 - precision: 0.9759 - recall: 0.6533
Epoch 14/15
150/150 [==============================] - 0s 134us/step - loss: 0.5176 - acc: 0.9000 - precision: 0.9403 - recall: 0.6733
Epoch 15/15
150/150 [==============================] - 0s 134us/step - loss: 0.4899 - acc: 0.8667 - precision: 0.8877 - recall: 0.6733
Related
I have made a model that tries to predict the chances of every piano key playing in a time step given all time steps before it. I tried making a GRU network with 88 outputs(one for every piano key)
input shape = (600,88,)
desired output/ label shape = (88, )
import numpy as np
import midi_processer
from keras import models
from keras import layers
x_train, x_test = np.load("samples.npy", mmap_mode='r'), np.load("test_samples.npy", mmap_mode='r')
y_train, y_test = np.load("labels.npy", mmap_mode='r'), np.load("test_labels.npy", mmap_mode='r')
def build_model():
model = models.Sequential()
model.add(layers.Input(shape=(600,88,)))
model.add(layers.GRU(512,activation='tanh',recurrent_activation='hard_sigmoid'))
model.add(layers.RepeatVector(600))
model.add(layers.GRU(512,activation='tanh', recurrent_activation='hard_sigmoid'))
model.add(layers.Dense(88, activation = 'sigmoid'))
return model
x_partial, x_val = x_train[:13000], x_train[13000:]
y_partial, y_val = y_train[:13000], y_train[13000:]
model = build_model()
model.compile(optimizer = 'adam',
loss = 'binary_crossentropy',
metrics = ['accuracy'])
history = model.fit(x_partial, y_partial, batch_size = 50, epochs = , validation_data= (x_val,y_val))
instead of learning normally my algorithm had stayed with constant accuracy throughout all of the epochs
Epoch 1/15
260/260 [==============================] - 998s 4s/step - loss: -0.1851 - accuracy: 0.0298 - val_loss: -8.8735 - val_accuracy: 0.0310
Epoch 2/15
260/260 [==============================] - 827s 3s/step - loss: -33.6520 - accuracy: 0.0382 - val_loss: -56.0122 - val_accuracy: 0.0310
Epoch 3/15
260/260 [==============================] - 844s 3s/step - loss: -78.6130 - accuracy: 0.0382 - val_loss: -98.2798 - val_accuracy: 0.0310
Epoch 4/15
260/260 [==============================] - 906s 3s/step - loss: -121.0963 - accuracy: 0.0382 - val_loss: -139.3440 - val_accuracy: 0.0310
Epoch 5/15
I was just following a TensorFlow example from the book Hands-On Machine Learning with Scikit-Learn and TensorFlow but got weird results.
The example:
import tensorflow as tf
from tensorflow import keras
tf.__version__
keras.__version__
fashion_mnist = keras.datasets.fashion_mnist
(X_train_full, y_train_full), (X_test, y_test) = fashion_mnist.load_data()
X_valid, X_train = X_train_full[:5000] / 255.0, X_train_full[5000:] / 255.0
y_valid, y_train = y_train_full[:5000] / 255.0, y_train_full[5000:] / 255.0
class_names = ["T-shirt/top", "Trouser", "Pullover", "Dress", "Coat",
"Sandal", "Shirt", "Sneaker", "Bag", "Ankle boot"]
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.Dense(300, activation="relu"),
keras.layers.Dense(100, activation="relu"),
keras.layers.Dense(10, activation="softmax")
])
model.compile(loss="sparse_categorical_crossentropy",
optimizer='sgd',
metrics=['accuracy'])
history = model.fit(X_train, y_train, epochs=50, validation_data=(X_valid, y_valid))
As the epochs evolve we should se an improvement for accuracy as indicated in the book:
Train on 55000 samples, validate on 5000 samples
Epoch 1/30
55000/55000 [==========] - 3s 55us/sample - loss: 1.4948 - acc: 0.5757 - val_loss: 1.0042 - val_acc: 0.7166
Epoch 2/30
55000/55000 [==========] - 3s 55us/sample - loss: 0.8690 - acc: 0.7318 - val_loss: 0.7549 - val_acc: 0.7616
[...]
Epoch 50/50
55000/55000 [==========] - 4s 72us/sample - loss: 0.3607 - acc: 0.8752 - acc: 0.8752 -val_loss: 0.3706 - val_acc: 0.8728
But when I ran I got the following:
Epoch 1/30
1719/1719 [==============================] - 3s 2ms/step - loss: 0.0623 - accuracy: 0.1005 - val_loss: 0.0011 - val_accuracy: 0.0914
Epoch 2/30
1719/1719 [==============================] - 3s 2ms/step - loss: 8.7637e-04 - accuracy: 0.1011 - val_loss: 5.2079e-04 - val_accuracy: 0.0914
Epoch 3/30
1719/1719 [==============================] - 3s 2ms/step - loss: 4.9200e-04 - accuracy: 0.1019 - val_loss: 3.4211e-04 - val_accuracy: 0.0914
[...]
Epoch 49/50
1719/1719 [==============================] - 3s 2ms/step - loss: 3.1710e-05 - accuracy: 0.0992 - val_loss: 3.2966e-05 - val_accuracy: 0.0914
Epoch 50/50
1719/1719 [==============================] - 3s 2ms/step - loss: 2.7711e-05 - accuracy: 0.1022 - val_loss: 3.1833e-05 - val_accuracy: 0.0914
So, as you can see the reproduction got a strongly lower accuracy that has not improved: it stayed at 0.0914 instead of 0.8728.
Is there something wrong in my TensorFlow installation, setup or even in the code?
you can not divide y such as y_valid, y_train = y_train_full[:5000] / 255.0, y_train_full[5000:] / 255.0. The completed code is following :
import tensorflow as tf
from tensorflow import keras
tf.__version__
keras.__version__
fashion_mnist = keras.datasets.fashion_mnist
(X_train_full, y_train_full), (X_test, y_test) = fashion_mnist.load_data()
X_train_full = X_train_full / 255.0
X_test = X_test / 255.0
class_names = ["T-shirt/top", "Trouser", "Pullover", "Dress", "Coat",
"Sandal", "Shirt", "Sneaker", "Bag", "Ankle boot"]
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(128, activation="relu"),
keras.layers.Dense(10, activation="softmax")
])
model.compile(loss="sparse_categorical_crossentropy",
optimizer='sgd',
metrics=['accuracy'])
history = model.fit(X_train_full, y_train_full, epochs=5, validation_data=(X_test, y_test))
It will give the acc like :
Epoch 1/5
1875/1875 [==============================] - 2s 1ms/step - loss: 0.9880 - accuracy: 0.6923 - val_loss: 0.5710 - val_accuracy: 0.8054
Epoch 2/5
1875/1875 [==============================] - 2s 944us/step - loss: 0.5281 - accuracy: 0.8227 - val_loss: 0.5112 - val_accuracy: 0.8228
Epoch 3/5
1875/1875 [==============================] - 2s 913us/step - loss: 0.4720 - accuracy: 0.8391 - val_loss: 0.4782 - val_accuracy: 0.8345
Epoch 4/5
1875/1875 [==============================] - 2s 915us/step - loss: 0.4492 - accuracy: 0.8462 - val_loss: 0.4568 - val_accuracy: 0.8410
Epoch 5/5
1875/1875 [==============================] - 2s 935us/step - loss: 0.4212 - accuracy: 0.8550 - val_loss: 0.4469 - val_accuracy: 0.8444
Also, optimizer adam may be give better result than sgd.
Im trying to implement a Cnn using Keras on a Sklearn dataset for handwritten digits recognition (load_digits). I have got the model to run but it is not improving the accuracy for each 'epochs' cycle, Im guessing its because my labels are incorrect, I have tried encoding my Y values with use of 'to_categorical' but it displays the following error:
C:\Users\AppData\Local\Programs\Python\Python38\lib\site-packages\tensorflow\python\keras\backend.py:4979 binary_crossentropy
return nn.sigmoid_cross_entropy_with_logits(labels=target, logits=output)
C:\Users\AppData\Local\Programs\Python\Python38\lib\site-packages\tensorflow\python\util\dispatch.py:201 wrapper
return target(*args, **kwargs)
C:\Users\AppData\Local\Programs\Python\Python38\lib\site-packages\tensorflow\python\ops\nn_impl.py:173 sigmoid_cross_entropy_with_logits
raise ValueError("logits and labels must have the same shape (%s vs %s)" %
ValueError: logits and labels must have the same shape ((None, 1) vs (None, 10))
When i run my code without trying to encode the Y values it seems to go through the Cnn Model however it isn't accurate and it doesn't increase, this is my code:
import tensorflow as tf
from sklearn import datasets
from sklearn.model_selection import train_test_split
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten
from tensorflow.keras.layers import Conv2D, MaxPooling2D
#from keras.utils.np_utils import to_categorical
X,y = datasets.load_digits(return_X_y = True)
X = X/16
#X = X.reshape(1797,8,8,1)
train_x, test_x, train_y, test_y = train_test_split(X, y)
train_x = train_x.reshape(1347,8,8,1)
#test_x = test_x.reshape()
#train_y = to_categorical(train_y, num_classes = 10)
model = Sequential()
model.add(Conv2D(32, (2, 2), input_shape=( 8, 8, 1)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (2, 2)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten()) # this converts our 3D feature maps to 1D feature vectors
model.add(Dense(64))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.fit(train_x, train_y, batch_size=32, epochs=6, validation_split=0.3)
print(train_x[0])
And this gives me the following output:
Epoch 1/6
1/30 [>.............................] - ETA: 13s - loss: 1.1026 - accuracy: 0.0938
6/30 [=====>........................] - ETA: 0s - loss: 0.2949 - accuracy: 0.0652
30/30 [==============================] - 1s 33ms/step - loss: -5.4832 - accuracy: 0.0893 - val_loss: -49.9462 - val_accuracy: 0.1012
Epoch 2/6
1/30 [>.............................] - ETA: 0s - loss: -52.2145 - accuracy: 0.0625
30/30 [==============================] - 0s 3ms/step - loss: -120.6972 - accuracy: 0.0961 - val_loss: -513.0211 - val_accuracy: 0.1012
Epoch 3/6
1/30 [>.............................] - ETA: 0s - loss: -638.2873 - accuracy: 0.1250
30/30 [==============================] - 0s 3ms/step - loss: -968.3621 - accuracy: 0.1006 - val_loss: -2804.1062 - val_accuracy: 0.1012
Epoch 4/6
1/30 [>.............................] - ETA: 0s - loss: -3427.3135 - accuracy: 0.0000e+00
30/30 [==============================] - 0s 3ms/step - loss: -4571.7894 - accuracy: 0.0934 - val_loss: -10332.9727 - val_accuracy: 0.1012
Epoch 5/6
1/30 [>.............................] - ETA: 0s - loss: -12963.2559 - accuracy: 0.0625
30/30 [==============================] - 0s 3ms/step - loss: -15268.3010 - accuracy: 0.0887 - val_loss: -29262.1191 - val_accuracy: 0.1012
Epoch 6/6
1/30 [>.............................] - ETA: 0s - loss: -30990.6758 - accuracy: 0.1562
30/30 [==============================] - 0s 3ms/step - loss: -40321.9540 - accuracy: 0.0960 - val_loss: -68548.6094 - val_accuracy: 0.1012
Any guidance is greatly appricated, Thanks!
When you have a CNN you want the last layer to have as many nodes as the labels. So if you have 10 digits you want the last layer to have an output size 10. It usually has the activation function "softmax", which makes every value go to 0, except on value which is 1.
model.add(Dense(10))
model.add(Activation('softmax'))
If I switch this to Python 2.x, it performs 10. Why is that?
Training a logistic regression model
import keras.backend as K
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.optimizers import SGD
from sklearn.model_selection import train_test_split, cross_val_score
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size = 0.3,
random_state = 42)
# NOTE: If I run this in Python 3.x, it only performs 1 Epoch
K.clear_session()
model = Sequential()
model.add(Dense(1, input_shape=(4,), activation='sigmoid'))
model.compile(loss = 'binary_crossentropy',
optimizer= 'sgd',
metrics = ['accuracy'])
# Saved the result of the fitting, to display the history as a data frame & see how the model does
history = model.fit (X_train, y_train)
result = model.evaluate(X_test, y_test)
Output:
Epoch 1/10
960/960 [==============================] - 0s - loss: 0.7943 - acc: 0.5219
Epoch 2/10
960/960 [==============================] - 0s - loss: 0.7338 - acc: 0.5469
Epoch 3/10
960/960 [==============================] - 0s - loss: 0.6847 - acc: 0.5688
Epoch 4/10
960/960 [==============================] - 0s - loss: 0.6446 - acc: 0.6177
Epoch 5/10
960/960 [==============================] - 0s - loss: 0.6113 - acc: 0.6719
Epoch 6/10
960/960 [==============================] - 0s - loss: 0.5832 - acc: 0.7000
Epoch 7/10
960/960 [==============================] - 0s - loss: 0.5591 - acc: 0.7177
Epoch 8/10
960/960 [==============================] - 0s - loss: 0.5381 - acc: 0.7365
Epoch 9/10
960/960 [==============================] - 0s - loss: 0.5196 - acc: 0.7542
Epoch 10/10
960/960 [==============================] - 0s - loss: 0.5031 - acc: 0.7688
32/412 [=>............................] - ETA: 0s
The fit function has parameter epochs with default value 1.
fit(self, x=None, y=None, batch_size=None, epochs=1, verbose=1, callbacks=None,
validation_split=0.0, validation_data=None, shuffle=True, class_weight=None,
sample_weight=None, initial_epoch=0, steps_per_epoch=None,
validation_steps=None)
However, the default used to be 10. See the changes to fit in models.py in this commit for example. You most likely have an older version of Keras with Python 2.
I'd like to monitor eg. the learning rate during training in Keras both in the progress bar and in Tensorboard. I figure there must be a way to specify which variables are logged, but there's no immediate clarification on this issue on the Keras website.
I guess it's got something to do with creating a custom Callback function, however, it should be possible to modify the already existing progress bar callback, no?
It can be achieved via a custom metric. Take the learning rate as an example:
def get_lr_metric(optimizer):
def lr(y_true, y_pred):
return optimizer.lr
return lr
x = Input((50,))
out = Dense(1, activation='sigmoid')(x)
model = Model(x, out)
optimizer = Adam(lr=0.001)
lr_metric = get_lr_metric(optimizer)
model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['acc', lr_metric])
# reducing the learning rate by half every 2 epochs
cbks = [LearningRateScheduler(lambda epoch: 0.001 * 0.5 ** (epoch // 2)),
TensorBoard(write_graph=False)]
X = np.random.rand(1000, 50)
Y = np.random.randint(2, size=1000)
model.fit(X, Y, epochs=10, callbacks=cbks)
The LR will be printed in the progress bar:
Epoch 1/10
1000/1000 [==============================] - 0s 103us/step - loss: 0.8228 - acc: 0.4960 - lr: 0.0010
Epoch 2/10
1000/1000 [==============================] - 0s 61us/step - loss: 0.7305 - acc: 0.4970 - lr: 0.0010
Epoch 3/10
1000/1000 [==============================] - 0s 62us/step - loss: 0.7145 - acc: 0.4730 - lr: 5.0000e-04
Epoch 4/10
1000/1000 [==============================] - 0s 58us/step - loss: 0.7129 - acc: 0.4800 - lr: 5.0000e-04
Epoch 5/10
1000/1000 [==============================] - 0s 58us/step - loss: 0.7124 - acc: 0.4810 - lr: 2.5000e-04
Epoch 6/10
1000/1000 [==============================] - 0s 63us/step - loss: 0.7123 - acc: 0.4790 - lr: 2.5000e-04
Epoch 7/10
1000/1000 [==============================] - 0s 61us/step - loss: 0.7119 - acc: 0.4840 - lr: 1.2500e-04
Epoch 8/10
1000/1000 [==============================] - 0s 61us/step - loss: 0.7117 - acc: 0.4880 - lr: 1.2500e-04
Epoch 9/10
1000/1000 [==============================] - 0s 59us/step - loss: 0.7116 - acc: 0.4880 - lr: 6.2500e-05
Epoch 10/10
1000/1000 [==============================] - 0s 63us/step - loss: 0.7115 - acc: 0.4880 - lr: 6.2500e-05
Then, you can visualize the LR curve in TensorBoard.
Another way (in fact encouraged one) of how to pass custom values to TensorBoard is by sublcassing the keras.callbacks.TensorBoard class. This allows you to apply custom functions to obtain desired metrics and pass them directly to TensorBoard.
Here is an example for learning rate of Adam optimizer:
class SubTensorBoard(TensorBoard):
def __init__(self, *args, **kwargs):
super(SubTensorBoard, self).__init__(*args, **kwargs)
def lr_getter(self):
# Get vals
decay = self.model.optimizer.decay
lr = self.model.optimizer.lr
iters = self.model.optimizer.iterations # only this should not be const
beta_1 = self.model.optimizer.beta_1
beta_2 = self.model.optimizer.beta_2
# calculate
lr = lr * (1. / (1. + decay * K.cast(iters, K.dtype(decay))))
t = K.cast(iters, K.floatx()) + 1
lr_t = lr * (K.sqrt(1. - K.pow(beta_2, t)) / (1. - K.pow(beta_1, t)))
return np.float32(K.eval(lr_t))
def on_epoch_end(self, episode, logs = {}):
logs.update({"lr": self.lr_getter()})
super(SubTensorBoard, self).on_epoch_end(episode, logs)
I've come to this question because I wanted to log more variables in the Keras progress bar. This is the way I did it after reading the answers here:
class UpdateMetricsCallback(tf.keras.callbacks.Callback):
def on_batch_end(self, batch, logs):
logs.update({'my_batch_metric' : 0.1, 'my_other_batch_metric': 0.2})
def on_epoch_end(self, epoch, logs):
logs.update({'my_epoch_metric' : 0.1, 'my_other_epoch_metric': 0.2})
model.fit(...,
callbacks=[UpdateMetricsCallback()]
)
I hope it helps others.