I am trying to export a Tensorflow model so that I can use it in Tensorflow Serving. This is the script that I use:
import os
import tensorflow as tf
trained_checkpoint_prefix = '/home/ubuntu/checkpoint'
export_dir = os.path.join('m', '0')
loaded_graph = tf.Graph()
config=tf.ConfigProto(allow_soft_placement=True)
with tf.Session(graph=loaded_graph, config=config) as sess:
# Restore from checkpoint
loader = tf.train.import_meta_graph(trained_checkpoint_prefix + 'file.meta')
loader.restore(sess, tf.train.latest_checkpoint(trained_checkpoint_prefix))
# Create SavedModelBuilder class
# defines where the model will be exported
export_path_base = "/home/ubuntu/m"
export_path = os.path.join(
tf.compat.as_bytes(export_path_base),
tf.compat.as_bytes(str(0)))
print('Exporting trained model to', export_path)
builder = tf.saved_model.builder.SavedModelBuilder(export_path)
batch_shape = (20, 256, 256, 3)
input_tensor = tf.placeholder(tf.float32, shape=batch_shape, name="X_content")
predictions_tf = tf.placeholder(tf.float32, shape=batch_shape, name='Y_output')
tensor_info_input = tf.saved_model.utils.build_tensor_info(input_tensor)
tensor_info_output = tf.saved_model.utils.build_tensor_info(predictions_tf)
prediction_signature = (
tf.saved_model.signature_def_utils.build_signature_def(
inputs={'image': tensor_info_input},
outputs={'output': tensor_info_output},
method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME))
builder.add_meta_graph_and_variables(
sess, [tf.saved_model.tag_constants.SERVING],
signature_def_map={
'style_image':
prediction_signature,
})
builder.save(as_text=True)
The main issue is the output signature (predictions_tf). In this case, having it set to placeholder, I get an error saying that it's value has to be set when the model is called from gRPC. What should I use instead?
I have tried
predictions_tf = tf.Variable(0, dtype=tf.float32, name="Y_output")
and
predictions_tf = tf.TensorInfo(dtype=tf.float32)
predictions_tf.name = "Y_output"
predictions_tf.dtype = tf.float32
I might misunderstood what you are trying to do, but here you basically create a new placeholder for input and a new placeholder for output.
What I think you should do, is once you loaded the model , you have to get the input and the output tensor of your model in the variables input tensor and prediction_tfusing for example
input_tensor=loaded_graph.get_tensor_by_name('the_name_in_the_loaded_graph:0')
prediction_tf=loaded_graph.get_tensor_by_name('the_pred_name_in_the_loaded_graph:0')
Related
I need to load a trained model in Tensorflow and save it to SavedModel format for use in Tensorflow Serving. I am following this tutorial. I am loading the model like this:
self.graph = tf.Graph()
def _graph_hourglass(self, inputs):
### All layers are here ###
...
def generate_model(self):
with tf.device(self.cpu):
with tf.name_scope('inputs'):
self.img = tf.compat.v1.placeholder(dtype=tf.float32,
shape=(None, 256, 256, 3), name='input_img')
self.output = self._graph_hourglass(self.img)
def model_init(self):
with self.graph.as_default():
self.generate_model()
def restore(self, load='path'):
with tf.name_scope('Session'):
with tf.device(self.cpu):
self.Session = tf.compat.v1.Session()
saver = tf.compat.v1.train.Saver()
saver.restore(self.Session, load)
def load_model(self, load = 'path'):
with self.graph.as_default():
self.restore(load)
...
self.model_init()
self.load_model(load='path/to/model')
Then I use SavedModelBuilder class from the tutorial:
export_path = 'new/path'
print('Exporting trained model to', export_path)
builder = tf.compat.v1.saved_model.builder.SavedModelBuilder(export_path)
tensor_info_x = tf.compat.v1.saved_model.utils.build_tensor_info(self.img)
tensor_info_y = tf.compat.v1.saved_model.utils.build_tensor_info(self.output)
prediction_signature = (
tf.compat.v1.saved_model.signature_def_utils.build_signature_def(
inputs={'image': tensor_info_x},
outputs={'joints': tensor_info_y},
method_name=tf.compat.v1.saved_model.signature_constants.PREDICT_METHOD_NAME))
builder.add_meta_graph_and_variables(
self.Session, [tf.compat.v1.saved_model.tag_constants.SERVING],
signature_def_map={
tf.compat.v1.saved_model.signature_constants
.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
prediction_signature,
},
strip_default_attrs=True)
builder.save()
print('Done exporting!')
Model is saved without errors, I see "Done exporting!" in console but model .pb file is empty! It is only 252 Bytes. What can be the problem here? Will appreciate any help.
I am working with the ssd_mobilenet_v2_coco_2018_03_29 pretrained Tensorflow model. I want to change the input to fixed size, and save it under the saved_model.pb (I am using Neuron Compiler which require this format).
Here is how I change the input Tensor to fixed size:
graph = tf.Graph()
with graph.as_default():
fixed_image_tensor = tf.placeholder(tf.uint8, shape=(None, 300, 300, 3), name='image_tensor')
graph_def = tf.GraphDef()
with tf.io.gfile.GFile(frozen_pb_file, 'rb') as f:
serialized_graph = f.read()
graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(graph_def, name='', input_map={"image_tensor:0": fixed_image_tensor})
And now I save the modified graph to saved_model.pb format by using tf.saved_model.simple_save:
image_tensor = graph.get_tensor_by_name('image_tensor:0')
boxes_tensor = graph.get_tensor_by_name('detection_boxes:0')
scores_tensor = graph.get_tensor_by_name('detection_scores:0')
classes_tensor = graph.get_tensor_by_name('detection_classes:0')
num_detections_tensor = graph.get_tensor_by_name('num_detections:0')
sess = tf.Session(graph=graph)
tf.saved_model.simple_save(
session=sess,
export_dir='model/',
inputs={image_tensor.name: image_tensor},
outputs={
boxes_tensor.name: boxes_tensor,
scores_tensor.name: scores_tensor,
classes_tensor.name: classes_tensor,
num_detections_tensor.name: num_detections_tensor
}
)
The code create the following directory (variables is empty):
|-model/
|---variables/
|---saved_model.pb
The saved_model.pb is only 370 bytes, and must contains no actual information. I also try tf.saved_model.Builder like this and this, but still got the exact same result.
I can still use the sess for inference as usual with no problems. What did I do wrong? Are there any other approaches? I am using Tensorflow 1.15.0.
A bit rearranged code, TF1.13, got 67MBytes *.pb file. Reloaded generated saved_file, input has yours dimensions and all listed outputs:
import tensorflow as tf
frozen_pb_file = "./ssd_mobilenet_v2_coco_2018_03_29/frozen_inference_graph.pb"
graph = tf.Graph()
with graph.as_default():
fixed_image_tensor = tf.placeholder(tf.uint8, shape=(None, 300, 300, 3), name='image_tensor')
graph_def = tf.GraphDef()
with tf.io.gfile.GFile(frozen_pb_file, 'rb') as f:
serialized_graph = f.read()
graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(graph_def, name='', input_map={"image_tensor:0": fixed_image_tensor})
image_tensor = graph.get_tensor_by_name('image_tensor:0')
boxes_tensor = graph.get_tensor_by_name('detection_boxes:0')
scores_tensor = graph.get_tensor_by_name('detection_scores:0')
classes_tensor = graph.get_tensor_by_name('detection_classes:0')
num_detections_tensor = graph.get_tensor_by_name('num_detections:0')
sess = tf.Session(graph=graph)
file_writer = tf.summary.FileWriter(logdir='log', graph=graph)
tf.saved_model.simple_save(
session=sess,
export_dir='model/',
inputs={image_tensor.name: fixed_image_tensor},
outputs={
boxes_tensor.name: boxes_tensor,
scores_tensor.name: scores_tensor,
classes_tensor.name: classes_tensor,
num_detections_tensor.name: num_detections_tensor
}
)
I am trying to load a custom TFRecord file into my keras model. I attempted to follow this tutorial: https://medium.com/#moritzkrger/speeding-up-keras-with-tfrecord-datasets-5464f9836c36, but adapting for my use.
My goal is to have the functions work similar to ImageDataGenerator from Keras. I cannot use that function because I specific metadata from the images that the generator does not grab. I'm not including that metadata here because I just need the basic network to function first.
I also want to be able to apply this to a transfer learning application.
I keep getting this error: TypeError: Could not build a TypeSpec for None with type NoneType
I am using Tensorflow 2.2
def _parse_function(serialized):
features = \
{
'image': tf.io.FixedLenFeature([], tf.string),
'label': tf.io.FixedLenFeature([], tf.int64),
'shapex': tf.io.FixedLenFeature([], tf.int64),
'shapey': tf.io.FixedLenFeature([], tf.int64),
}
parsed_example = tf.io.parse_single_example(serialized=serialized,
features=features)
shapex = tf.cast(parsed_example['shapex'], tf.int32)
shapey = tf.cast(parsed_example['shapey'], tf.int32)
image_shape = tf.stack([shapex, shapey, 3])
image_raw = parsed_example['image']
# Decode the raw bytes so it becomes a tensor with type.
image = tf.io.decode_raw(image_raw, tf.uint8)
image = tf.reshape(image, image_shape)
# Get labels
label = tf.cast(parsed_example['label'], tf.float32)
return image, label
def imgs_inputs(type, perform_shuffle=False):
records_dir = '/path/to/tfrecord/'
record_paths = [os.path.join(records_dir,record_name) for record_name in os.listdir(records_dir)]
full_dataset = tf.data.TFRecordDataset(filenames=record_paths)
full_dataset = full_dataset.map(_parse_function, num_parallel_calls=16)
dataset_length = (len(list(full_dataset))) #Gets length of datase
iterator = tf.compat.v1.data.make_one_shot_iterator(databatch)
image, label = iterator.get_next()
#labels saved as values ex: [1,2,3], and are now converted to one hot encoded
label = to_categorical(label)
return image, label
image, label = imgs_inputs(type ='Train',perform_shuffle=True)
#Combine it with keras
# base_model = MobileNet(weights='imagenet', include_top=False, input_shape=(200,200,3), dropout=.3)
model_input = Input(shape=[200,200,3])
#Build your network
model_output = Flatten(input_shape=(200, 200, 3))(model_input)
model_output = Dense(19, activation='relu')(model_output)
#Create your model
train_model = Model(inputs=model_input, outputs=model_output)
#Compile your model
optimizer = Adam(learning_rate=.001)
train_model.compile(optimizer=optimizer,loss='mean_squared_error',metrics=['accuracy'],target_tensors=[label])
#Train the model
train_model.fit(epochs=10,steps_per_epoch=2)
image returns array of shape (100,200,200,3) which is a batch of 100 images
label returns array of shape(100,19) which is a batch of 100 labels (there are 19 labels)
The issue related to shapex and shapey but I don't know exactly why.
I set shapex = 200 and shapey=200. Then I rewrote the model to include the transfer learning.
base_model = MobileNet(weights='imagenet', include_top=False, input_shape=(200,200,3), dropout=.3)
x = base_model.output
types = Dense(19,activation='softmax')(x)
model = Model(inputs=base_model.input,outputs=types)
model.compile(
optimizer='adam',
loss = 'sparse_categorical_crossentropy',
metrics=['accuracy']
history = model.fit(get_batches(), steps_per_epoch=1000, epochs=10)
I found everything I needed on this Google Colab:
[https://colab.research.google.com/github/GoogleCloudPlatform/training-data-analyst/blob/master/courses/fast-and-lean-data-science/04_Keras_Flowers_transfer_learning_solution.ipynb#scrollTo=XLJNVGwHUDy1][1]
[1]: https://colab.research.google.com/github/GoogleCloudPlatform/training-data-analyst/blob/master/courses/fast-and-lean-data-science/04_Keras_Flowers_transfer_learning_solution.ipynb#scrollTo=XLJNVGwHUDy1
I'm pretty sure I'm missing something about how tensorflow works because my solution doesn't make any sense.
I'm trying to train a neural network (from scratch, without using Estimators or other abstractions), save it, and load a simplified version of it for inference.
The following code trains but gives me the error: FailedPreconditionError (see above for traceback): Attempting to use uninitialized value hidden0/biases/Variable
[[Node: hidden0/biases/Variable/read = Identity[T=DT_FLOAT, _device="/job:localhost/replica:0/task:0/device:CPU:0"](hidden0/biases/Variable)]]. If I add the commented line - if I recreate the saver obect that I'm not going to use nor return - the code works just fine.
Why do I need to create a (useless) saver object in order to restore the saved weights?
import tensorflow as tf
import numpy as np
def add_fc_layer(input_tensor, input_dimensions, output_dimensions, layer_name, activation=None):
with tf.variable_scope(layer_name):
with tf.variable_scope('weights'):
weights = tf.Variable(tf.truncated_normal([input_dimensions, output_dimensions]))
with tf.variable_scope('biases'):
biases = tf.Variable(tf.zeros([output_dimensions]))
with tf.variable_scope('Wx_plus_b'):
preactivate = tf.matmul(input_tensor, weights) + biases
if activation is None:
return preactivate
with tf.variable_scope('activation'):
activations = activation(preactivate)
return activations
def make_network(model_phase):
if model_phase not in {"train", "test"}:
raise ValueError("invalid type")
hidden0_units = 25
hidden1_units = 15
hidden2_units = 10
input_size = 10
output_size = 4
with tf.variable_scope('InputVector'):
inputs = tf.placeholder(shape=[1, input_size], dtype=tf.float32)
hidden0_out = add_fc_layer(inputs, input_size, hidden0_units, "hidden0", activation=tf.nn.sigmoid)
hidden1_out = add_fc_layer(hidden0_out, hidden0_units, hidden1_units, "hidden1", activation=tf.nn.sigmoid)
hidden2_out = add_fc_layer(hidden1_out, hidden1_units, hidden2_units, "hidden2", activation=tf.nn.sigmoid)
out = add_fc_layer(hidden2_out, hidden2_units, output_size, "regression")
if model_phase == "test":
# UNCOMMENTIN THIS LINE MAKES THE SCRIPT WORK
# saver = tf.train.Saver(var_list=tf.trainable_variables())
return inputs, out
saver = tf.train.Saver(var_list=tf.trainable_variables())
with tf.variable_scope('training'):
with tf.variable_scope('groundTruth'):
ground_truth = tf.placeholder(shape=[1, output_size], dtype=tf.float32)
with tf.variable_scope('loss'):
loss = tf.reduce_sum(tf.square(ground_truth - out))
tf.summary.scalar('loss', loss)
with tf.variable_scope('optimizer'):
trainer = tf.train.AdamOptimizer(learning_rate=0.001)
with tf.variable_scope('gradient'):
updateModel = trainer.minimize(loss)
with tf.variable_scope('predict'):
predict = tf.random_shuffle(tf.boolean_mask(out, tf.equal(out, tf.reduce_max(out, axis=None))))[0]
writer = tf.summary.FileWriter('/tmp/test', tf.get_default_graph())
return inputs, out, ground_truth, updateModel, writer, saver
train_graph = tf.Graph()
with tf.Session(graph=train_graph) as sess:
tf.set_random_seed(42)
inputs, out, ground_truth, updateModel, writer, saver = make_network(model_phase='train')
init = tf.initialize_all_variables()
sess.run(init)
print('\nLearning...')
for _ in range(10):
sess.run([updateModel], feed_dict={inputs:np.arange(10)+np.random.random((1,10)), ground_truth:np.arange(4).reshape(1, 4)})
saver.save(sess,'./tensorflowModel.ckpt')
new_graph = tf.Graph()
with tf.Session(graph=new_graph) as sess:
inputs, out = make_network(model_phase='test')
saver = tf.train.import_meta_graph('./tensorflowModel.ckpt.meta')
saver.restore(sess, tf.train.latest_checkpoint('./'))
# evaluation
print('\nEvaluation...')
for _ in range(10):
_ = sess.run(out, feed_dict={inputs:np.arange(10).reshape(1,10)})
I don't know why creating an unused Saver makes the problem go away, but the code betrays a misunderstanding.
When you are restoring, you are creating the model graph twice. First, you call make_network() which creates the computation graph and variables. You then also call import_meta_graph which also creates a graph and variables. You should create a saver with simple saver = tf.train.Saver() instead of saver = tf.train.import_meta_graph('./tensorflowModel.ckpt.meta')
I have created the classification model using retrain.py. Then I deployed the model in google ml engine. It gives the following error
Create Version failed. Model validation failed: Outer dimension for outputs must be unknown, the outer dimension of 'Const:0' is 12 For more information on how to export Tensorflow SavedModel, see https://www.tensorflow.org/api_docs/python/tf/saved_model.
StackOverflow suggests changing the output tensor dimension
def export_model(sess,keys, architecture, saved_model_dir):
print("reached export funct")
if architecture == 'inception_v3':
input_tensor = 'DecodeJpeg/contents:0'
elif architecture.startswith('mobilenet_'):
input_tensor = 'input:0'
else:
raise ValueError('Unknown architecture', architecture)
in_image = sess.graph.get_tensor_by_name(input_tensor)
inputs = {'image': tf.saved_model.utils.build_tensor_info(in_image)}
out_classes = sess.graph.get_tensor_by_name('final_result:0')
outputs = {'prediction':
tf.saved_model.utils.build_tensor_info(out_classes),
'classes':
tf.saved_model.utils.build_tensor_info(tf.convert_to_tensor(list(keys))),}
signature = tf.saved_model.signature_def_utils.build_signature_def(
inputs=inputs,
outputs=outputs,
method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME
)
print(
tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY)
legacy_init_op = tf.group(tf.tables_initializer(), name='legacy_init_op')
# Save out the SavedModel.
builder = tf.saved_model.builder.SavedModelBuilder(saved_model_dir)
builder.add_meta_graph_and_variables(
sess, [tf.saved_model.tag_constants.SERVING],
signature_def_map={
tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
signature
},
legacy_init_op=legacy_init_op)
builder.save()