Can't export to SavedModel in TensorFlow - python

I need to load a trained model in Tensorflow and save it to SavedModel format for use in Tensorflow Serving. I am following this tutorial. I am loading the model like this:
self.graph = tf.Graph()
def _graph_hourglass(self, inputs):
### All layers are here ###
...
def generate_model(self):
with tf.device(self.cpu):
with tf.name_scope('inputs'):
self.img = tf.compat.v1.placeholder(dtype=tf.float32,
shape=(None, 256, 256, 3), name='input_img')
self.output = self._graph_hourglass(self.img)
def model_init(self):
with self.graph.as_default():
self.generate_model()
def restore(self, load='path'):
with tf.name_scope('Session'):
with tf.device(self.cpu):
self.Session = tf.compat.v1.Session()
saver = tf.compat.v1.train.Saver()
saver.restore(self.Session, load)
def load_model(self, load = 'path'):
with self.graph.as_default():
self.restore(load)
...
self.model_init()
self.load_model(load='path/to/model')
Then I use SavedModelBuilder class from the tutorial:
export_path = 'new/path'
print('Exporting trained model to', export_path)
builder = tf.compat.v1.saved_model.builder.SavedModelBuilder(export_path)
tensor_info_x = tf.compat.v1.saved_model.utils.build_tensor_info(self.img)
tensor_info_y = tf.compat.v1.saved_model.utils.build_tensor_info(self.output)
prediction_signature = (
tf.compat.v1.saved_model.signature_def_utils.build_signature_def(
inputs={'image': tensor_info_x},
outputs={'joints': tensor_info_y},
method_name=tf.compat.v1.saved_model.signature_constants.PREDICT_METHOD_NAME))
builder.add_meta_graph_and_variables(
self.Session, [tf.compat.v1.saved_model.tag_constants.SERVING],
signature_def_map={
tf.compat.v1.saved_model.signature_constants
.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
prediction_signature,
},
strip_default_attrs=True)
builder.save()
print('Done exporting!')
Model is saved without errors, I see "Done exporting!" in console but model .pb file is empty! It is only 252 Bytes. What can be the problem here? Will appreciate any help.

Related

Error Saving & Loading Tensorflow/Keras Model With Custom Classes/Functions

I recently created a Tensorflow/Keras model with Keras Transformers. To do this, the custom PositionalEmbedding & TransformerEncoder classes were created and used to build the model architecture. There are created as such:
class PositionalEmbedding(layers.Layer):
def __init__(self, sequence_length, output_dim, **kwargs):
super().__init__(**kwargs)
self.position_embeddings = layers.Embedding(
input_dim=sequence_length, output_dim=output_dim
)
self.sequence_length = sequence_length
self.output_dim = output_dim
def call(self, inputs):
# The inputs are of shape: `(batch_size, frames, num_features)`
length = tf.shape(inputs)[1]
positions = tf.range(start=0, limit=length, delta=1)
embedded_positions = self.position_embeddings(positions)
return inputs + embedded_positions
def compute_mask(self, inputs, mask=None):
mask = tf.reduce_any(tf.cast(inputs, "bool"), axis=-1)
return mask
class TransformerEncoder(layers.Layer):
def __init__(self, embed_dim, dense_dim, num_heads, **kwargs):
super().__init__(**kwargs)
self.embed_dim = embed_dim
self.dense_dim = dense_dim
self.num_heads = num_heads
self.attention = layers.MultiHeadAttention(
num_heads=num_heads, key_dim=embed_dim, dropout=0.3
)
self.dense_proj = keras.Sequential(
[layers.Dense(dense_dim, activation=tf.nn.gelu), layers.Dense(embed_dim),]
)
self.layernorm_1 = layers.LayerNormalization()
self.layernorm_2 = layers.LayerNormalization()
def call(self, inputs, mask=None):
if mask is not None:
mask = mask[:, tf.newaxis, :]
attention_output = self.attention(inputs, inputs, attention_mask=mask)
proj_input = self.layernorm_1(inputs + attention_output)
proj_output = self.dense_proj(proj_input)
return self.layernorm_2(proj_input + proj_output)
At first, I was unable to even save this model using the typical model.save() method. However, I was able to solve for this by updating the config for the classes like so:
### FOR THE PositionalEmbedding CLASS
def get_config(self):
config = super().get_config().copy()
config.update({
'position_embeddings': self.position_embeddings,
'sequence_length': self.sequence_length,
'output_dim': self.output_dim
})
return config
### FOR THE TransformerEncoder CLASS
def get_config(self):
config = super().get_config().copy()
config.update({
'embed_dim': self.embed_dim,
'dense_dim': self.dense_dim,
'num_heads': self.num_heads,
'attention': self.attention,
'dense_proj': self.dense_proj,
'layernorm_1': self.layernorm_1,
'layernorm_2': self.layernorm_2
})
return config
However, when I try to load the model using the keras load_model() method without the custom_objects argument, I get the following error:
ValueError: Unknown layer: PositionalEmbedding. Please ensure this object is passed to the `custom_objects` argument.
And if I use the load _model() method without initializing the classes, using the custom_objects argument for the two classes as such load_model('my_model.h5', custom_objects= {'PositionalEmbedding':PositionalEmbedding,'TransformerEncoder':TransformerEncoder}), I get the following error:
NameError: name 'PositionalEmbedding' is not defined
And finally, if I do initialize the classes with the updated configs before loading, and use the load_model() method as shown in the previous example, I get the following error:
TypeError: ('Keyword argument not understood:', 'position_embeddings')
Anyone know what might be causing this issue and how I can resolve them to load this model? Any help is appreciated!
Thanks!
Sam
So I was actually able to solve this problem with a workaround. Instead of saving the model and loading it the old-fashioned way, I saved a checkpoint for the model while training, then loaded it by creating a new model from scratch and loading the checkpoint as the weights.
The code for that is below:
### SAVING THE MODEL WITH CHECKPOINT
filepath = "/content/drive/MyDrive/tmp/model_checkpoint.ckpt"
checkpoint = keras.callbacks.ModelCheckpoint(
filepath, save_weights_only=True, save_best_only=True, verbose=1
)
history = model.fit(
train_data,
train_labels,
validation_split=0.3,
epochs=250,
batch_size=256,
callbacks=[checkpoint],
)
### CREATING NEW MODEL & LOADING CHECKPOINT AS WEIGHTS
def get_compiled_model():
sequence_length = MAX_SEQ_LENGTH
embed_dim = NUM_FEATURES
dense_dim = 4
num_heads = 1
classes = len(label_processor.get_vocabulary())
inputs = keras.Input(shape=(None, None))
x = PositionalEmbedding(
sequence_length, embed_dim, name="frame_position_embedding"
)(inputs)
x = TransformerEncoder(embed_dim, dense_dim, num_heads, name="transformer_layer")(x)
x = layers.GlobalMaxPooling1D()(x)
x = layers.Dropout(0.5)(x)
outputs = layers.Dense(classes, activation="softmax")(x)
model = keras.Model(inputs, outputs)
model.compile(
optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"]
)
return model
model = get_compiled_model()
model.load_weights("/content/drive/MyDrive/tmp/model_checkpoint.ckpt")

Tensorflow signature output placeholder

I am trying to export a Tensorflow model so that I can use it in Tensorflow Serving. This is the script that I use:
import os
import tensorflow as tf
trained_checkpoint_prefix = '/home/ubuntu/checkpoint'
export_dir = os.path.join('m', '0')
loaded_graph = tf.Graph()
config=tf.ConfigProto(allow_soft_placement=True)
with tf.Session(graph=loaded_graph, config=config) as sess:
# Restore from checkpoint
loader = tf.train.import_meta_graph(trained_checkpoint_prefix + 'file.meta')
loader.restore(sess, tf.train.latest_checkpoint(trained_checkpoint_prefix))
# Create SavedModelBuilder class
# defines where the model will be exported
export_path_base = "/home/ubuntu/m"
export_path = os.path.join(
tf.compat.as_bytes(export_path_base),
tf.compat.as_bytes(str(0)))
print('Exporting trained model to', export_path)
builder = tf.saved_model.builder.SavedModelBuilder(export_path)
batch_shape = (20, 256, 256, 3)
input_tensor = tf.placeholder(tf.float32, shape=batch_shape, name="X_content")
predictions_tf = tf.placeholder(tf.float32, shape=batch_shape, name='Y_output')
tensor_info_input = tf.saved_model.utils.build_tensor_info(input_tensor)
tensor_info_output = tf.saved_model.utils.build_tensor_info(predictions_tf)
prediction_signature = (
tf.saved_model.signature_def_utils.build_signature_def(
inputs={'image': tensor_info_input},
outputs={'output': tensor_info_output},
method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME))
builder.add_meta_graph_and_variables(
sess, [tf.saved_model.tag_constants.SERVING],
signature_def_map={
'style_image':
prediction_signature,
})
builder.save(as_text=True)
The main issue is the output signature (predictions_tf). In this case, having it set to placeholder, I get an error saying that it's value has to be set when the model is called from gRPC. What should I use instead?
I have tried
predictions_tf = tf.Variable(0, dtype=tf.float32, name="Y_output")
and
predictions_tf = tf.TensorInfo(dtype=tf.float32)
predictions_tf.name = "Y_output"
predictions_tf.dtype = tf.float32
I might misunderstood what you are trying to do, but here you basically create a new placeholder for input and a new placeholder for output.
What I think you should do, is once you loaded the model , you have to get the input and the output tensor of your model in the variables input tensor and prediction_tfusing for example
input_tensor=loaded_graph.get_tensor_by_name('the_name_in_the_loaded_graph:0')
prediction_tf=loaded_graph.get_tensor_by_name('the_pred_name_in_the_loaded_graph:0')

file doesn't exist after saving with tensorflow

I saved my model using tf.train.Saver('./model.ckpt'), when I went to the local directory, I have found files named model.ckpt.index, model.ckpt.meta and model.ckpt.data-00000-of-00001, but not model.ckpt. As a consequence, I wasn't able to restore the model. Anyone know if i did anything wrong? Here's my code
class autoencoder(object):
def __init__(self, network_architecture, learning_rate=0.001, regularization_constant=1):
self.network_arch = network_architecture
self.X = tf.placeholder(tf.float32, [None, network_architecture['n_input']])
self.c = tf.Variable(regularization_constant, dtype=tf.float32)
self._initialize_weights()
self._build_graph()
self.cost, self.optimizer = self._cost_optimizer(learning_rate)
init = tf.global_variables_initializer()
self.saver = tf.train.Saver()
self.sess = tf.Session()
self.sess.run(init)
...
def save(self, path):
self.saver.save(self.sess, path)
def load(self, path):
self.saver.restore(self.sess, path)
The *.meta file contains your MetaGraph, and you can import it with:
saver = tf.train.import_meta_graph("model.ckpt.meta")
Then you can restore the graph's variables.
saver.restore(sess, "model.ckpt")
You can save additional model data to the metagraph by calling tf.train.export_meta_graph.
Alternatively, you can store your application's model using a SavedModel, which can contain multiple MetaGraphs. The documentation is here.

TensorFlow error when save/restore dynamic_RNN model

I can save and restore model if the model is CNN, but I can't restore RNN.
I made RNN network like this.
I wanna save trained weigh and bias or model. And I want to predict without training. following is main.py
#main.py
tf_x = tf.placeholder(tf.float32, [None, seq_length, data_dim], name='tf_x')
tf_y = tf.placeholder(tf.int32, [None, output_dim], name='tf_y')
rnn_cell = tf.contrib.rnn.BasicLSTMCell(num_units=hidden_dim)
outputs, (h_c, h_n) = tf.nn.dynamic_rnn( rnn_cell,
tf_x,
initial_state=None,
dtype=tf.float32,
time_major=False )
output = tf.layers.dense(outputs[:, -1, :], output_dim, name='dense_output')
loss = tf.losses.softmax_cross_entropy(onehot_labels=tf_y, logits=output)
train_op = tf.train.AdamOptimizer(LR).minimize(loss)
accuracy = tf.metrics.accuracy( labels=tf.argmax(tf_y, axis=1), predictions=tf.argmax(output, axis=1),)[1]
with tf.Session as sess:
init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer()) # the local var is for accuracy_op
sess.run(init_op) # initialize var in graph
...(training)
saver = tf.train.Saver()
save_path = saver.save(sess, "Save data/RNN-model")
saver.export_meta_graph(filename="Save Data/RNN-model.meta", as_text=True)
and in "run.py" I tried to load that data.
#run.py
...(same as main.py)
saver = tf.train.Saver()
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state('Save data/')
saver.restore(sess, ckpt.model_checkpoint_path)
saver = tf.train.import_meta_graph("Save data/RNN-model.meta")
... (prediction)
result is..
tensorflow.python.framework.errors_impl.NotFoundError: Key dense/bias not found in checkpoint
What do you think is the problem?

How to save estimator in Tensorflow for later use?

I followed the tutorial "A Guide to TF Layers: Building a Convolutional Neural Network" (here is the code: https://github.com/tensorflow/tensorflow/blob/r1.1/tensorflow/examples/tutorials/layers/cnn_mnist.py).
I adapted the tutorial for my needs, which is hand detection.
As far as I understand, this tutorial creates the estimator (which is a CNN), then does the fitting, and finally, it evaluates the performance of the estimator. Now, my problem is that I want to use the estimator int another file, which is going to be my main program. How do I access the estimator from another file? Do I have to fit the estimator every time I want to use it?? (I hope not)
I was wondering if someone could help me understand how to save the estimator to use it later. (as far as I understand, I cant create a saver with tf.train.Saver, because I don't have a session running).
Here is the code from my train.py file:
def main(unused_argv):
#Load training and eval data (part missing)
# Create the estimator
hand_detector = learn.Estimator(model_fn=cnn_model_fn, model_dir="\cnn_model_fn")
# Set up logging for predictions
# Log the values in the "Softmax" tensor with label "probabilities"
tensors_to_log = {"probabilities": "softmax_tensor"}
logging_hook = tf.train.LoggingTensorHook(
tensors=tensors_to_log, every_n_iter=50)
# Train the model
hand_detector.fit(
x=train_data,
y=train_labels,
batch_size=100,
steps=20000,
monitors=[logging_hook])
# Configure the accuracy metric for evaluation
metrics = {
"accuracy":
learn.MetricSpec(
metric_fn=tf.metrics.accuracy, prediction_key="classes"),
}
# Evaluate the model and print results
eval_results = hand_detector.evaluate(
x=eval_data, y=eval_labels, metrics=metrics)
print(eval_results)
# Save the model for later use (part missing!)
Almost all real applications of machine learning seek to train a model once and then save it for future uses with new data. Most classifiers spend hours in the training stage and just few seconds in the testing stage, so is fundamental learn how to save successfully a trained model.
I'm going to explain how to export "high level" Tensorflow models (using export_savedmodel).
The function export_savedmodel requires the argument serving_input_receiver_fn, that is a function without arguments, which defines the input from the model and the predictor. Therefore, you must create your own serving_input_receiver_fn, where the model input type match with the model input in the training script, and the predictor input type match with the predictor input in the testing script.
On the other hand, if you create a custom model, you must define the export_outputs, defined by the function tf.estimator.export.PredictOutput, which input is a dictionary that define the name that has to match with the name of the predictor output in the testing script.
For example:
TRAINING SCRIPT
def serving_input_receiver_fn():
serialized_tf_example = tf.placeholder(dtype=tf.string, shape=[None], name='input_tensors')
receiver_tensors = {"predictor_inputs": serialized_tf_example}
feature_spec = {"words": tf.FixedLenFeature([25],tf.int64)}
features = tf.parse_example(serialized_tf_example, feature_spec)
return tf.estimator.export.ServingInputReceiver(features, receiver_tensors)
def estimator_spec_for_softmax_classification(logits, labels, mode):
predicted_classes = tf.argmax(logits, 1)
if (mode == tf.estimator.ModeKeys.PREDICT):
export_outputs = {'predict_output': tf.estimator.export.PredictOutput({"pred_output_classes": predicted_classes, 'probabilities': tf.nn.softmax(logits)})}
return tf.estimator.EstimatorSpec(mode=mode, predictions={'class': predicted_classes, 'prob': tf.nn.softmax(logits)}, export_outputs=export_outputs) # IMPORTANT!!!
onehot_labels = tf.one_hot(labels, 31, 1, 0)
loss = tf.losses.softmax_cross_entropy(onehot_labels=onehot_labels, logits=logits)
if (mode == tf.estimator.ModeKeys.TRAIN):
optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
eval_metric_ops = {'accuracy': tf.metrics.accuracy(labels=labels, predictions=predicted_classes)}
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
def model_custom(features, labels, mode):
bow_column = tf.feature_column.categorical_column_with_identity("words", num_buckets=1000)
bow_embedding_column = tf.feature_column.embedding_column(bow_column, dimension=50)
bow = tf.feature_column.input_layer(features, feature_columns=[bow_embedding_column])
logits = tf.layers.dense(bow, 31, activation=None)
return estimator_spec_for_softmax_classification(logits=logits, labels=labels, mode=mode)
def main():
# ...
# preprocess-> features_train_set and labels_train_set
# ...
classifier = tf.estimator.Estimator(model_fn = model_custom)
train_input_fn = tf.estimator.inputs.numpy_input_fn(x={"words": features_train_set}, y=labels_train_set, batch_size=batch_size_param, num_epochs=None, shuffle=True)
classifier.train(input_fn=train_input_fn, steps=100)
full_model_dir = classifier.export_savedmodel(export_dir_base="C:/models/directory_base", serving_input_receiver_fn=serving_input_receiver_fn)
TESTING SCRIPT
def main():
# ...
# preprocess-> features_test_set
# ...
with tf.Session() as sess:
tf.saved_model.loader.load(sess, [tf.saved_model.tag_constants.SERVING], full_model_dir)
predictor = tf.contrib.predictor.from_saved_model(full_model_dir)
model_input = tf.train.Example(features=tf.train.Features( feature={"words": tf.train.Feature(int64_list=tf.train.Int64List(value=features_test_set)) }))
model_input = model_input.SerializeToString()
output_dict = predictor({"predictor_inputs":[model_input]})
y_predicted = output_dict["pred_output_classes"][0]
(Code tested in Python 3.6.3, Tensorflow 1.4.0)
Estimator has an export_savedmodel member function for that purpose. You will find the docs here.
Update to David Valenzuela Urrutia's answer(codes)
David Valenzuela Urrutia's answer was for Python 3.6.3, Tensorflow 1.4.0 so i thought of updating the answer(code samples) to Tensorflow 2.x because some funtionalities like tf.Session is not supported in Tensorflow version 2 so you need to replace it with tf.compat.v1.Session for it to work. Visit this link to know more about the changes added to tensorflow version 2
Training script updated code
def serving_input_receiver_fn():
serialized_tf_example = tf.compat.v1.placeholder(dtype=tf.string, shape=[None],
name='input_tensors')
receiver_tensors = {"predictor_inputs": serialized_tf_example}
feature_spec = {"words": tf.io.FixedLenFeature([25],tf.int64)}
features = tf.io.parse_example(serialized=serialized_tf_example,
features=feature_spec)
return tf.estimator.export.ServingInputReceiver(features, receiver_tensors)
def estimator_spec_for_softmax_classification(logits, labels, mode):
predicted_classes = tf.argmax(input=logits, axis=1)
if (mode == tf.estimator.ModeKeys.PREDICT):
export_outputs = {'predict_output':
tf.estimator.export.PredictOutput({"pred_output_classes": predicted_classes, 'probabilities': tf.nn.softmax(logits)})}
return tf.estimator.EstimatorSpec(mode=mode, predictions={'class': predicted_classes, 'prob': tf.nn.softmax(logits)}, export_outputs=export_outputs) # IMPORTANT!!!
onehot_labels = tf.one_hot(labels, 31, 1, 0)
loss =tf.compat.v1.losses.softmax_cross_entropy(onehot_labels=onehot_labels, logits=logits)
if (mode == tf.estimator.ModeKeys.TRAIN):
optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=0.01)
train_op = optimizer.minimize(loss, global_step=tf.compat.v1.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
eval_metric_ops = {'accuracy': tf.compat.v1.metrics.accuracy(labels=labels, predictions=predicted_classes)}
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
def model_custom(features, labels, mode):
bow_column = tf.feature_column.categorical_column_with_identity("words", num_buckets=1000)
bow_embedding_column = tf.feature_column.embedding_column(bow_column, dimension=50)
bow = tf.compat.v1.feature_column.input_layer(features, feature_columns=[bow_embedding_column])
logits = tf.compat.v1.layers.dense(bow, 31, activation=None)
return estimator_spec_for_softmax_classification(logits=logits, labels=labels, mode=mode)
def main():
# ...
# preprocess-> features_train_set and labels_train_set
# ...
classifier = tf.estimator.Estimator(model_fn = model_custom)
train_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(x={"words": features_train_set}, y=labels_train_set, batch_size=batch_size_param, num_epochs=None, shuffle=True)
classifier.train(input_fn=train_input_fn, steps=100)
full_model_dir = classifier.export_savedmodel(export_dir_base="C:/models/directory_base", serving_input_receiver_fn=serving_input_receiver_fn)
Testing script updated code
def main():
# ...
# preprocess-> features_test_set
# ...
with tf.compat.v1.Session() as sess:
tf.compat.v1.saved_model.loader.load(sess, [tf.saved_model.SERVING], full_model_dir)
predictor = tf.contrib.predictor.from_saved_model(full_model_dir)
model_input = tf.train.Example(features=tf.train.Features( feature={"words": tf.train.Feature(int64_list=tf.train.Int64List(value=features_test_set)) }))
model_input = model_input.SerializeToString()
output_dict = predictor({"predictor_inputs":[model_input]})
y_predicted = output_dict["pred_output_classes"][0]

Categories