Keras tuner and TPU in Google Colab - python

I have some problems with keras tuner and tpu. When I run the code below, everything works well and network training is fast.
vocab_size = 5000
embedding_dim = 64
max_length = 2000
def create_model():
model = tf.keras.Sequential([
tf.keras.layers.Embedding(vocab_size, embedding_dim),
tf.keras.layers.LSTM(100, dropout=0.5, recurrent_dropout=0.5),
tf.keras.layers.Dense(embedding_dim, activation='relu'),
tf.keras.layers.Dense(4, activation='softmax')
])
return model
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='grpc://' + os.environ['COLAB_TPU_ADDR'])
tf.config.experimental_connect_to_cluster(resolver)
tf.tpu.experimental.initialize_tpu_system(resolver)
strategy = tf.distribute.experimental.TPUStrategy(resolver)
with strategy.scope():
model = create_model()
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['sparse_categorical_accuracy'])
model.fit(train_padded, y_train,
epochs=10,
validation_split=0.15,
verbose=1, batch_size=128)
When I use a keras tuner, the neural network learns slowly. I believe that TPU is not used.
vocab_size = 5000
max_length = 2000
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='grpc://' + os.environ['COLAB_TPU_ADDR'])
tf.config.experimental_connect_to_cluster(resolver)
tf.tpu.experimental.initialize_tpu_system(resolver)
strategy = tf.distribute.experimental.TPUStrategy(resolver)
def build_model(hp):
model = tf.keras.Sequential()
activation_choice = hp.Choice('activation', values=['relu', 'sigmoid', 'tanh', 'elu', 'selu'])
embedding_dim = hp.Int('units_hidden', min_value=128, max_value=24, step=8)
model.add(tf.keras.layers.Embedding(vocab_size, embedding_dim))
model.add(tf.keras.layers.LSTM(hp.Int('LSTM_Units', min_value=50, max_value=500, step=10),
dropout=hp.Float('dropout', 0, 0.5, step=0.1, default=0),
recurrent_dropout=hp.Float('recurrent_dropout', 0, 0.5, step=0.1, default=0)))
model.add(tf.keras.layers.Dense(embedding_dim, activation=activation_choice))
model.add(tf.keras.layers.Dense(4, activation='softmax'))
model.compile(
optimizer=hp.Choice('optimizer', values=['adam', 'rmsprop', 'SGD']),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['sparse_categorical_accuracy'])
return model
with strategy.scope():
tuner = Hyperband(
build_model,
objective='val_accuracy',
max_epochs=10,
hyperband_iterations=2)
tuner.search(train_padded, y_train,
batch_size=128,
epochs=10,
callbacks=[EarlyStopping(patience=1)],
validation_split=0.15,
verbose=1)
best_models = tuner.get_best_models(1)
best_model.save('/content/drive/My Drive/best_model.h5')
Notebook link
How to make a keras tuner work with TPU?

You need to pass it to the tuner:
tuner = Hyperband(
build_model,
objective='val_accuracy',
max_epochs=10,
hyperband_iterations=2,
distribution_strategy=strategy,)
(and remove the strategy.scope() part)

To add ...
I don't use Google Colab, but Kaggle. Using TPU, I get that same error "File system scheme '[local]' not implemented", when the tuner tries to write the checkpoints on Kaggle's working directory.
Since I don't have a gs://location, I just "modified" the function called by Keras Tuner to save checkpoints, to allow writing to local dir, which is the Kaggle working directory. I used patch() to mock the function.
First important thing is that Keras Tuner must be version 1.1.2 and above.
Example:
from mock import patch
<your code>
# now the new function to "replace" the existing one (keras_tuner.engine.tuner_utils.SaveBestEpoch.on_epoch_end)
def new_on_epoch_end(self, epoch, logs=None):
if not self.objective.has_value(logs):
# Save on every epoch if metric value is not in the logs. Either no
# objective is specified, or objective is computed and returned
# after `fit()`.
#***** the following are the lines I added ******************************************
# Save model in Tensorflow's "SavedModel" format
save_locally = tf.saved_model.SaveOptions(experimental_io_device = '/job:localhost')
# I then added ', options = save_locally' to the line below.
#************************************************************************************
self.model.save_weights(self.filepath, options = save_locally)
return
current_value = self.objective.get_value(logs)
if self.objective.better_than(current_value, self.best_value):
self.best_value = current_value
#***** the following are the lines I added ******************************************
# Save model in Tensorflow's "SavedModel" format
save_locally = tf.saved_model.SaveOptions(experimental_io_device = '/job:localhost')
# I then added ', options = save_locally' to the line below.
#************************************************************************************
self.model.save_weights(self.filepath, options = save_locally)
with patch('keras_tuner.engine.tuner_utils.SaveBestEpoch.on_epoch_end', new_on_epoch_end):
# Perform hypertuning. The parameters are exactly like those in the fit() method.
tuner.search(
X_train,
y_train,
epochs=num_of_epochs,
validation_data = (X_valid, y_valid),
callbacks=[early_stopping]
)
<more of your code>
Since I used 'with patch', after all is done, it reverts back to the original code automatically.
I hope this will be useful for those using Kaggle, or those who want to write to a local dir.

Related

Train model after load in MLFlow

My goal is to store an empty model into MLFlow Registry and then load it for training.
I have a register_model.py which looks like this:
if __name__ == "__main__":
remote_server_uri = "http://127.0.0.1:5000"
mlflow.set_tracking_uri(remote_server_uri)
# Load and compile Keras model
model = tf.keras.applications.MobileNetV2((32, 32, 3), classes=10, weights=None)
model.compile("adam", "sparse_categorical_crossentropy", metrics=["accuracy"])
# Load CIFAR-10 dataset
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data()
epochs = 1
batch_size = 32
mlflow.tensorflow.autolog()
model.fit(x_train, y_train, epochs=epochs, batch_size=batch_size)
tf.keras.models.save_model(model, "models/tensorflow")
mlflow.tensorflow.log_model(
tf_saved_model_dir='models/tensorflow',
tf_meta_graph_tags=None,
tf_signature_def_key='serving_default',
artifact_path="saved/models/tensorflow",
registered_model_name="tensorflow-MobileNetV2-32inputs"
)
Then I'm trying to load it using:
if __name__ == "__main__":
remote_server_uri = "http://127.0.0.1:5000"
mlflow.set_tracking_uri(remote_server_uri)
model_name = "tensorflow-MobileNetV2-32inputs"
model_version = 1
model = mlflow.tensorflow.load_model(
model_uri=f"models:/{model_name}/{model_version}"
)
I would expect my model to be a able to do things like 'model.fit()' and 'model.predict()' but Im always getting:
AttributeError: '_WrapperFunction' object has no attribute 'fit'
So my question is: it is possible to save a tensorflow/keras model and to load the architecture to be trained/retrained and even modified through mlflow? Load the model, add a new layer, store the model either as a new version or as a new model itself, for example.
Thanks in advance!

Saving and loading Keras model with ELMO embedding layer

I'm training a Keras model for token classification with an ELMO layer. I will need to save the model for future use, I've tried with model.save_weights("model_weights.h5"),
but then if I load them into a new model that I build, and then I call model.predict(...), I get results as if the model has never been trained. It looks like the configurations are not saved properly.
I am new with keras and tensorflow 1, and I'm not sure if this is the way to do it. Any help is welcome! I'm obviously missing something here, but I couldn't find sufficient on saving models with an elmo layer.
I am defining the model like this :
def ElmoEmbedding(x):
return elmo_model(inputs={"tokens": tf.squeeze(tf.cast(x, tf.string)),
"sequence_len": tf.constant(batch_size*[max_len])},
signature="tokens",
as_dict=True)["elmo"]
def build_model(max_len, n_words, n_tags):
word_input_layer = Input(shape=(max_len, 40, ))
elmo_input_layer = Input(shape=(max_len,), dtype=tf.string)
word_output_layer = Dense(n_tags, activation = 'softmax')(word_input_layer)
elmo_output_layer = Lambda(ElmoEmbedding, output_shape=(1, 1024))(elmo_input_layer)
output_layer = Concatenate()([word_output_layer, elmo_output_layer])
output_layer = BatchNormalization()(output_layer)
output_layer = Bidirectional(LSTM(units=512, return_sequences=True, recurrent_dropout=0.2, dropout=0.2))(output_layer)
output_layer = TimeDistributed(Dense(n_tags, activation='softmax'))(output_layer)
model = Model([elmo_input_layer, word_input_layer], output_layer)
return model
And I then I run the training like:
tf.disable_eager_execution()
elmo_model = hub.Module("https://tfhub.dev/google/elmo/3", trainable=False)
sess = tf.Session()
K.set_session(sess)
sess.run([tf.global_variables_initializer(), tf.tables_initializer()])
model = build_model(max_len, n_words, n_tags)
model.compile(optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"])
history = model.fit([np.array(X1_train), np.array(X2_train).reshape((len(X2_train), max_len, 40))],
y_train,
validation_data=([np.array(X1_valid), np.array(X2_valid).reshape((len(X2_valid), max_len, 40))], y_valid),
batch_size=batch_size, epochs=5, verbose=1)
model.save_weights("model_weights.h5")
If I try to load the weights in another session like the following, I get zero accuracy:
tf.disable_eager_execution()
elmo_model = hub.Module("https://tfhub.dev/google/elmo/3", trainable=False)
sess = tf.Session()
K.set_session(sess)
sess.run([tf.global_variables_initializer(), tf.tables_initializer()])
model = build_model(max_len, n_words, n_tags)
model.load_weights("model_weights.h5")
y_pred = model.predict([X1_test, np.array(X2_test).reshape((len(X2_test), max_len, 40))])

Bespoke loss routine for Keras Model

I'm trying to implement a bespoke loss calculation model but keep getting hit with recognition failures. eg: "ValueError: Unknown loss function: root_mean_squared_error_fraction. Please ensure this object is passed to the custom_objects argument."
The function "root_mean_squared_fraction" exists and is functioning, I know this because I call it elsewhere outside of the Keras model and it functions as expected. I'm clearly not understanding something about injecting this into the model definition and would appreciate any advice? Thanks.
from keras.models import load_model
from keras import backend as K
def root_mean_squared_error_fraction(y_true, y_pred):
return K.sqrt(K.mean(K.square((y_pred - y_true)/y_true)))
This is my model routine which does work when the =rmsef in the model.compile is replaced with ='mse':
def ResModel(Svect, Xvect, Yvect, dtrain):
model = Sequential()
model.add(LSTM(64, activation='relu',\
input_shape=(Xvect.shape[1], Xvect.shape[2]),\
return_sequences=True))
model.add(LSTM(32, activation='relu', return_sequences=False))
model.add(Dropout(0.2))
model.add(Dense(Yvect.shape[1]))
rmsef = root_mean_squared_error_fraction
model.compile(optimizer = "adam", loss = rmsef )
bmfile = 'bestmodel.h5'
earlystop = EarlyStopping(monitor='val_loss', mode='auto',\
verbose=0, patience=mpat, min_delta=0.005)
chkpoint = ModelCheckpoint(bmfile, monitor = 'val_loss', mode = 'auto',\
save_best_only = True )
history = model.fit(Xvect, Yvect, epochs=mcycl, batch_size=32,\
validation_split=dsplit, verbose=0,\
callbacks = [earlystop, chkpoint] )
saved = load_model('bestmodel.h5')
score = saved.evaluate(Xvect, Yvect, verbose=0)
print("Epoch: %04d yeilded best fit with overall loss of: %0.4f "\
% ((earlystop.stopped_epoch + 1), score ) )
Yvect = descalevector(Svect, saved.predict(Xvect),dtrain )
return Yvect, score

Keras ModelCheckpoint not saving but EarlyStopping is working fine with the same monitor argument

I've built a model and I'm using a custom function for validation. The problem is: My custom validation function is saving the validation accuracy in the logs dict, but Keras ModelCheckpoint, somehow, can't see it. EarlyStopping is working fine.
Here's the code for the validation class:
class ValidateModel(keras.callbacks.Callback):
def __init__(self, validation_data, loss_fnc):
super().__init__()
self.validation_data = validation_data
self.loss_fnc = loss_fnc
def on_epoch_end(self, epoch, logs={}):
th = 0.5
features = self.validation_data[0]
y_true = self.validation_data[1].reshape((-1,1))
y_pred = np.asarray(self.model.predict(features)).reshape((-1,1))
#Computing the validation loss.
y_true_tensor = K.constant(y_true)
y_pred_tensor = K.constant(y_pred)
val_loss = K.eval(self.loss_fnc(y_true_tensor, y_pred_tensor))
#Rounding the predicted values based on the threshold value.
#Values lesser than th are rounded to 0, while values greater than th are rounded to 1.
y_pred_rounded = y_pred / th
y_pred_rounded = np.clip(np.floor(y_pred_rounded).astype(int),0,1)
y_pred_rounded_tensor = K.constant(y_pred_rounded)
val_acc = accuracy_score(y_true, y_pred_rounded)
logs['val_loss'] = val_loss
logs['val_acc'] = val_acc
print(f'\nval_loss: {val_loss} - val_acc: {val_acc}')
And here's the function that I use to train the model:
def train_generator_model(model):
steps = int(train_df.shape[0] / TRAIN_BATCH_SIZE)
cb_validation = ValidateModel([validation_X, validation_y], iou)
cb_early_stop = tf.keras.callbacks.EarlyStopping(monitor='val_acc',
patience=3,
mode='max',
verbose = 1)
cb_model_checkpoint = tf.keras.callbacks.ModelCheckpoint('/kaggle/working/best_generator_model.hdf5',
monitor='val_acc',
save_best_only=True,
mode='max',
verbose=1)
history = model.fit(
x = train_datagen,
epochs = 2, ##Setting to 2 to test.
callbacks = [cb_validation, cb_model_checkpoint, cb_early_stop],
verbose = 1,
steps_per_epoch = steps)
#model = tf.keras.models.load_model('/kaggle/working/best_generator_model.hdf5', custom_objects = {'iou':iou})
#model.load_weights('/kaggle/working/best_generator_model.hdf5')
return history
If I set the ModelCheckpoint parameter "save_best_model" to False, the model is saved perfectly. When the training is over and I run history.history, I can see that the val_loss is being logged, like as follows:
{'loss': [0.13096405565738678, 0.11926634609699249],
'binary_accuracy': [0.9692355990409851, 0.9716895818710327],
'val_loss': [0.23041087, 0.18325138], 'val_acc': [0.9453247578938803,
0.956172612508138]}
I'm using Tensorflow 2.3.1 and importing keras from tensorflow.
Any help is appreciated. Thank you!
I've checked the Tensorflow code and found an incompatibility between Tensorflow and Keras. Inside the tensorflow.keras.callbacks file, there's the following code:
from keras.utils import tf_utils
The problem is that there's no tf_utils in keras.utils (atleast not in Keras 2.4.3, which I was using). Strangely, no exception was thrown.
Fix #1:
Add the following code to your program:
class ModelCheckpoint_tweaked(tf.keras.callbacks.ModelCheckpoint):
def __init__(self,
filepath,
monitor='val_loss',
verbose=0,
save_best_only=False,
save_weights_only=False,
mode='auto',
save_freq='epoch',
options=None,
**kwargs):
#Change tf_utils source package.
from tensorflow.python.keras.utils import tf_utils
super(ModelCheckpoint_tweaked, self).__init__(filepath,
monitor,
verbose,
save_best_only,
save_weights_only,
mode,
save_freq,
options,
**kwargs)
And then use this new class as the ModelCheckpoint callback:
cb_model_checkpoint = ModelCheckpoint_tweaked(file_name,
monitor='val_acc',
save_best_only=True,
mode='max',
verbose=1)
Fix #2:
Update Tensorflow to version 2.4.0. If you are using a custom callback to compute the monitored parameter, add the following line to the custom callback __init__() function:
self._supports_tf_logs = True
If you don't add this line, the logs ain't gonna be persisted between the callbacks.

How to inform class weights when using `tensorflow.python.keras.estimator.model_to_estimator` to convert Keras Models to Estimator API?

I'm having some trouble to convert a pure Keras model to TensorFlow Estimator API on an unbalanced dataset.
When using pure Keras API, the class_weight parameter is available at model.fit method, but when converting a Keras model to TensorFlow Estimator with tensorflow.python.keras.estimator.model_to_estimator there is no place to inform class_weights.
How can overcome this?
I'm using TF 1.12 on Ubuntu 18, Cuda 9, Cudnn 7
Pure Keras model:
def keras_model(n_classes=None, model_dir='./tmp-model/', config=None):
with tf.device('/gpu:0'):
# Inputs
inp_raw = Input(shape=(max_len,), name='word_raw')
# raw text LSTM network
word_raw_emb = Embedding(
input_dim=nunique_chars_raw,
output_dim=EMBED_SIZE,
input_length=MAX_WORD_LENGTH,
trainable=True,
name='word_raw_emb')(inp_raw)
word_raw_emb = Dropout(rate=dropout_rate)(word_raw_emb)
word_raw_emb_lstm = Bidirectional(
LSTM(48, return_sequences=True))(word_raw_emb)
word_raw_emb_gru = Bidirectional(
GRU(48, return_sequences=False))(word_raw_emb_lstm)
word_raw_net = Dense(16, activation='relu')(word_raw_emb_gru)
output_raw_net = Dense(n_classes, activation='softmax')(word_raw_net)
model = Model(inputs=inp_raw, outputs=output_raw_net)
optz = keras.optimizers.RMSprop(
lr=0.002, rho=0.9, epsilon=None, decay=0.0)
model.compile(loss='categorical_crossentropy',
optimizer=optz, metrics=['categorical_accuracy'])
return model
model = keras_model(5)
model.fit(train_X, train_Y_onehot,
batch_size=128,
epochs=10,
validation_data=(eval_X,eval_Y_onehot),
class_weight=class_weights,
verbose=1)
Keras model to TensorFlow Estimator:
def keras_estimator_model(n_classes=None, model_dir='./tmp-model/', config=None):
with tf.device('/gpu:0'):
# Inputs
inp_raw = Input(shape=(max_len,), name='word_raw')
# raw text LSTM network
word_raw_emb = Embedding(
input_dim=nunique_chars_raw,
output_dim=EMBED_SIZE,
input_length=MAX_WORD_LENGTH,
trainable=True,
name='word_raw_emb')(inp_raw)
word_raw_emb = Dropout(rate=dropout_rate)(word_raw_emb)
word_raw_emb_lstm = Bidirectional(
LSTM(48, return_sequences=True))(word_raw_emb)
word_raw_emb_gru = Bidirectional(
GRU(48, return_sequences=False))(word_raw_emb_lstm)
word_raw_net = Dense(16, activation='relu')(word_raw_emb_gru)
output_raw_net = Dense(n_classes, activation='softmax')(word_raw_net)
model = Model(inputs=inp_raw, outputs=output_raw_net)
optz = keras.optimizers.RMSprop(
lr=0.002, rho=0.9, epsilon=None, decay=0.0)
model.compile(loss='categorical_crossentropy',
optimizer=optz, metrics=['categorical_accuracy'])
model_estimator = model_to_estimator(keras_model=model, model_dir=model_dir, config=config)
return model_estimator
estimator_model = keras_estimator_model(5)
train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn,max_steps=10)
eval_spec = tf.estimator.EvalSpec(
input_fn=eval_input_fn,
steps=None,
start_delay_secs=10,
throttle_secs=10)
tf.estimator.train_and_evaluate(estimator_model, train_spec, eval_spec)
I've codded a workaround and it seems to work. I
import tensorflow as tf
from tensorflow.python.keras import backend as K
def weighted_loss_fn(class_weights):
def _loss_fn(y_true, y_pred):
class_weights_tensor = K.variable(class_weights)
y_true_labels = K.argmax(y_true,axis=1)
weights = K.gather(class_weights_tensor,y_true_labels)
return tf.losses.softmax_cross_entropy(onehot_labels=y_true, logits=y_pred, weights=weights)
return _loss_fn
def keras_estimator_model(n_classes=None, model_dir='./tmp-model/', config=None, class_weights=None):
with tf.device('/gpu:0'):
# Inputs
inp_raw = Input(shape=(max_len,), name='word_raw')
# raw text LSTM network
word_raw_emb = Embedding(
input_dim=nunique_chars_raw,
output_dim=EMBED_SIZE,
input_length=MAX_WORD_LENGTH,
trainable=True,
name='word_raw_emb')(inp_raw)
word_raw_emb = Dropout(rate=dropout_rate)(word_raw_emb)
word_raw_emb_lstm = Bidirectional(
LSTM(48, return_sequences=True))(word_raw_emb)
word_raw_emb_gru = Bidirectional(
GRU(48, return_sequences=False))(word_raw_emb_lstm)
word_raw_net = Dense(16, activation='relu')(word_raw_emb_gru)
output_raw_net = Dense(n_classes, activation='softmax')(word_raw_net)
model = Model(inputs=inp_raw, outputs=output_raw_net)
optz = keras.optimizers.RMSprop(
lr=0.002, rho=0.9, epsilon=None, decay=0.0)
loss_fn = weighted_loss_fn(class_weights)
model.compile(loss=loss_fn,
optimizer=optz, metrics=['categorical_accuracy'])
model_estimator = model_to_estimator(keras_model=model, model_dir=model_dir, config=config)
return model_estimator
estimator_model = keras_estimator_model(5)
train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn,max_steps=10)
eval_spec = tf.estimator.EvalSpec(
input_fn=eval_input_fn,
steps=None,
start_delay_secs=10,
throttle_secs=10)
tf.estimator.train_and_evaluate(estimator_model, train_spec, eval_spec)
In my case class_weights = [ 0.17041813 42.00318471 35.26470588 29.70495495 42.00318471 44.55743243]
The class_weight param is used for weighting the loss function (during training only). So, to get the same effect you have to alter your loss function. I did not find any existing TensorFlow's loss functions, only feature request for the cross entropy loss with class weights. There is a workaround mentioned:
Here
By the way, the issue was closed, because it is possible in Keras :)

Categories