I'm trying to export my retrained inception graph (pb file) for tensorflow serve since I was not able to find any snippets for exporting pb file I had to create my own but obviously I'm doing something wrong since I'm getting "No variables to save" error, I would appreciate any help
Update: after researching more I think I need meta graph to supply variables but retrain.py doesn't give me meta graph, any ideas ?
import tensorflow as tf,sys
label_lines = [line.rstrip() for line
in tf.gfile.GFile("retrained_labels.txt")]
graph = None
with tf.gfile.FastGFile("retrained_graph.pb", 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
graph = tf.import_graph_def(graph_def, name='')
with tf.Session(graph=graph) as sess:
output_tensor = sess.graph.get_tensor_by_name('final_result:0')
input_tensor = sess.graph.get_tensor_by_name('DecodeJpeg/contents:0')
mapping_string = tf.constant( label_lines )
indices = tf.constant( [0,len(label_lines)-1], tf.int64 )
prediction_classes = tf.contrib.lookup.index_to_string( indices, mapping=mapping_string )
export_path = sys.argv[1]
print('Exporting trained model to %s' % export_path)
init_op = tf.group(tf.initialize_all_tables(), name='init_op')
saver = tf.train.Saver(sharded=True)
model_exporter = exporter.Exporter(saver)
model_exporter.init(
sess.graph.as_graph_def(),
init_op=init_op,
default_graph_signature=exporter.classification_signature(
input_tensor=input_tensor,
classes_tensor=prediction_classes,
scores_tensor=output_tensor),
named_graph_signatures={
'inputs': exporter.generic_signature({'images': 'final_result:0'}),
'outputs': exporter.generic_signature({'scores': 'DecodeJpeg/contents:0'})})
model_exporter.export(export_path, tf.constant(1), sess)
print('Done exporting!')
Related
I'm trying to use a model for OCR and I have this block of code which results in the error:
"AttributeError: module 'tensorflow._api.v2.saved_model' has no attribute 'loader'" which I assume is due to using a TF version 1 construction that is not valid anymore in V2
def load_graph(self, modelFile):
graph = tf.Graph()
# graph_def = tf.compat.v1.GraphDef()
sess = tf.compat.v1.Session(graph=tf.Graph())
Point of failure -> model = tf.saved_model.loader.load(sess, [tf.saved_model.tag_constants.SERVING], './model')
graph_def = model.graph_def
with open(modelFile, "rb") as f:
graph_def.ParseFromString(f.read())
with graph.as_default():
tf.import_graph_def(graph_def)
return graph
I have changed the original construction from tf.Session() to tf.compat.v1.Session(), which leads us to this error.
What would be the equivalent for TF V2 that I can use in this case?
EDIT: Previously I had tried this variation which had the same error.
def load_graph(self, modelFile):
graph = tf.Graph()
graph_def = tf.GraphDef()
with open(modelFile, "rb") as f:
graph_def.ParseFromString(f.read())
with graph.as_default():
tf.import_graph_def(graph_def)
return graph
I am working with the ssd_mobilenet_v2_coco_2018_03_29 pretrained Tensorflow model. I want to change the input to fixed size, and save it under the saved_model.pb (I am using Neuron Compiler which require this format).
Here is how I change the input Tensor to fixed size:
graph = tf.Graph()
with graph.as_default():
fixed_image_tensor = tf.placeholder(tf.uint8, shape=(None, 300, 300, 3), name='image_tensor')
graph_def = tf.GraphDef()
with tf.io.gfile.GFile(frozen_pb_file, 'rb') as f:
serialized_graph = f.read()
graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(graph_def, name='', input_map={"image_tensor:0": fixed_image_tensor})
And now I save the modified graph to saved_model.pb format by using tf.saved_model.simple_save:
image_tensor = graph.get_tensor_by_name('image_tensor:0')
boxes_tensor = graph.get_tensor_by_name('detection_boxes:0')
scores_tensor = graph.get_tensor_by_name('detection_scores:0')
classes_tensor = graph.get_tensor_by_name('detection_classes:0')
num_detections_tensor = graph.get_tensor_by_name('num_detections:0')
sess = tf.Session(graph=graph)
tf.saved_model.simple_save(
session=sess,
export_dir='model/',
inputs={image_tensor.name: image_tensor},
outputs={
boxes_tensor.name: boxes_tensor,
scores_tensor.name: scores_tensor,
classes_tensor.name: classes_tensor,
num_detections_tensor.name: num_detections_tensor
}
)
The code create the following directory (variables is empty):
|-model/
|---variables/
|---saved_model.pb
The saved_model.pb is only 370 bytes, and must contains no actual information. I also try tf.saved_model.Builder like this and this, but still got the exact same result.
I can still use the sess for inference as usual with no problems. What did I do wrong? Are there any other approaches? I am using Tensorflow 1.15.0.
A bit rearranged code, TF1.13, got 67MBytes *.pb file. Reloaded generated saved_file, input has yours dimensions and all listed outputs:
import tensorflow as tf
frozen_pb_file = "./ssd_mobilenet_v2_coco_2018_03_29/frozen_inference_graph.pb"
graph = tf.Graph()
with graph.as_default():
fixed_image_tensor = tf.placeholder(tf.uint8, shape=(None, 300, 300, 3), name='image_tensor')
graph_def = tf.GraphDef()
with tf.io.gfile.GFile(frozen_pb_file, 'rb') as f:
serialized_graph = f.read()
graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(graph_def, name='', input_map={"image_tensor:0": fixed_image_tensor})
image_tensor = graph.get_tensor_by_name('image_tensor:0')
boxes_tensor = graph.get_tensor_by_name('detection_boxes:0')
scores_tensor = graph.get_tensor_by_name('detection_scores:0')
classes_tensor = graph.get_tensor_by_name('detection_classes:0')
num_detections_tensor = graph.get_tensor_by_name('num_detections:0')
sess = tf.Session(graph=graph)
file_writer = tf.summary.FileWriter(logdir='log', graph=graph)
tf.saved_model.simple_save(
session=sess,
export_dir='model/',
inputs={image_tensor.name: fixed_image_tensor},
outputs={
boxes_tensor.name: boxes_tensor,
scores_tensor.name: scores_tensor,
classes_tensor.name: classes_tensor,
num_detections_tensor.name: num_detections_tensor
}
)
I have tried to freeze my Tensorflow graph and restore it, but when I try and run prediction, I get the error:
You must feed a value for placeholder tensor 'DQNetwork/actions' with dtype float and shape [?,10]
My restoration code is:
sess = tf.Session()
graph = tf.get_default_graph()
with graph.as_default():
with sess.as_default():
GRAPH_PB_PATH = "./frozentensorflowModel.pb"
with gfile.FastGFile(GRAPH_PB_PATH,'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
x_tensor = graph.get_tensor_by_name("DQNetwork/inputs:0")
op_to_restore = graph.get_tensor_by_name("DQNetwork/actions:0")
new_state(cards.copy())
state = game_state.state
feed_dict={x_tensor: state.reshape((1, *state.shape))}
opt = []
opt = sess.run(op_to_restore, feed_dict) # Error throws
predictions = np.argmax(opt, 1)
I defined my DQNetwork inputs like so:
DQNetwork.inputs = tf.placeholder(tf.float32, [None, state_size], name="inputs")
DQNetwork.actions = tf.placeholder(tf.float32, [None, action_size], name="actions")
More info:
>>>op_to_restore
<tf.Tensor 'DQNetwork/actions:0' shape=(?, 10) dtype=float32>
>>>op_to_restore.op
<tf.Operation 'DQNetwork/actions' type=Placeholder>
Training line:
results = sess.run(DQNetwork.output, feed_dict = {DQNetwork.inputs: input_batch})
This may help you:
sess = tf.Session()
graph = tf.get_default_graph()
with graph.as_default():
with sess.as_default():
GRAPH_PB_PATH = "./frozentensorflowModel.pb"
with gfile.FastGFile(GRAPH_PB_PATH,'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
x_tensor = graph.get_tensor_by_name("DQNetwork/inputs:0")
op_to_restore = graph.get_operation_by_name("DQNetwork/actions")
new_state(cards.copy())
state = game_state.state
feed_dict={x_tensor: state.reshape((1, *state.shape))}
opt = []
opt = sess.run(op_to_restore, feed_dict) # Error throws
predictions = np.argmax(opt, 1)
This is what I was suggesting.
I got the point:
feed_dict={x_tensor: state.reshape((1, *state.shape))}
Instead of sess.run(op_to_restore, feed_dict) try op_to_restore.eval(feeddict)
How can I implement the resize function in this construction?
For example, in_image has shape = (845, 594, 3), but I want to resize this image to shape = (299, 299, 3)
def main(_):
with tf.Graph().as_default() as graph:
input_graph = FLAGS.input_graph
saved_model_dir = FLAGS.saved_model_dir
# Read in the export graph
with tf.gfile.FastGFile(input_graph, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
# Define SavedModel Signature (inputs and outputs)
in_image = graph.get_tensor_by_name('input:0')
inputs = {'image_bytes': tf.saved_model.utils.build_tensor_info(in_image)}
out_classes = graph.get_tensor_by_name('InceptionV3/Predictions/Reshape_1:0')
outputs = {'prediction':
tf.saved_model.utils.build_tensor_info(out_classes)}
signature = tf.saved_model.signature_def_utils.build_signature_def(
inputs=inputs,
outputs=outputs,
method_name='tensorflow/serving/predict'
)
with tf.Session(graph=graph) as sess:
# Save out the SavedModel.
b = saved_model_builder.SavedModelBuilder(saved_model_dir)
b.add_meta_graph_and_variables(sess,
[tf.saved_model.tag_constants.SERVING],
signature_def_map={'serving_default':
signature})
b.save()
if __name__ == '__main__':
tf.app.run()
I figured it out:
def main(_):
with tf.Graph().as_default() as graph:
input_graph = FLAGS.input_graph
saved_model_dir = FLAGS.saved_model_dir
# Read in the export graph
with tf.gfile.FastGFile(input_graph, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
serialized_tf_example = tf.placeholder(tf.string, name='b64')
jpeg = preprocess_image(serialized_tf_example)
out, = tf.import_graph_def(graph.as_graph_def(),
input_map={'input:0': jpeg},
return_elements = ['InceptionV3/Predictions/Reshape_1:0'])
inputs = {'inputs': tf.saved_model.utils.build_tensor_info(serialized_tf_example)}
outputs = {'prediction': tf.saved_model.utils.build_tensor_info(out)}
signature = tf.saved_model.signature_def_utils.build_signature_def(
inputs=inputs,
outputs=outputs,
method_name='tensorflow/serving/predict'
)
with tf.Session(graph=graph) as sess:
# Save out the SavedModel.
b = saved_model_builder.SavedModelBuilder(saved_model_dir)
b.add_meta_graph_and_variables(sess,
[tf.saved_model.tag_constants.SERVING],
signature_def_map={'serving_default': signature})
b.save()
I'm pretty sure I'm missing something about how tensorflow works because my solution doesn't make any sense.
I'm trying to train a neural network (from scratch, without using Estimators or other abstractions), save it, and load a simplified version of it for inference.
The following code trains but gives me the error: FailedPreconditionError (see above for traceback): Attempting to use uninitialized value hidden0/biases/Variable
[[Node: hidden0/biases/Variable/read = Identity[T=DT_FLOAT, _device="/job:localhost/replica:0/task:0/device:CPU:0"](hidden0/biases/Variable)]]. If I add the commented line - if I recreate the saver obect that I'm not going to use nor return - the code works just fine.
Why do I need to create a (useless) saver object in order to restore the saved weights?
import tensorflow as tf
import numpy as np
def add_fc_layer(input_tensor, input_dimensions, output_dimensions, layer_name, activation=None):
with tf.variable_scope(layer_name):
with tf.variable_scope('weights'):
weights = tf.Variable(tf.truncated_normal([input_dimensions, output_dimensions]))
with tf.variable_scope('biases'):
biases = tf.Variable(tf.zeros([output_dimensions]))
with tf.variable_scope('Wx_plus_b'):
preactivate = tf.matmul(input_tensor, weights) + biases
if activation is None:
return preactivate
with tf.variable_scope('activation'):
activations = activation(preactivate)
return activations
def make_network(model_phase):
if model_phase not in {"train", "test"}:
raise ValueError("invalid type")
hidden0_units = 25
hidden1_units = 15
hidden2_units = 10
input_size = 10
output_size = 4
with tf.variable_scope('InputVector'):
inputs = tf.placeholder(shape=[1, input_size], dtype=tf.float32)
hidden0_out = add_fc_layer(inputs, input_size, hidden0_units, "hidden0", activation=tf.nn.sigmoid)
hidden1_out = add_fc_layer(hidden0_out, hidden0_units, hidden1_units, "hidden1", activation=tf.nn.sigmoid)
hidden2_out = add_fc_layer(hidden1_out, hidden1_units, hidden2_units, "hidden2", activation=tf.nn.sigmoid)
out = add_fc_layer(hidden2_out, hidden2_units, output_size, "regression")
if model_phase == "test":
# UNCOMMENTIN THIS LINE MAKES THE SCRIPT WORK
# saver = tf.train.Saver(var_list=tf.trainable_variables())
return inputs, out
saver = tf.train.Saver(var_list=tf.trainable_variables())
with tf.variable_scope('training'):
with tf.variable_scope('groundTruth'):
ground_truth = tf.placeholder(shape=[1, output_size], dtype=tf.float32)
with tf.variable_scope('loss'):
loss = tf.reduce_sum(tf.square(ground_truth - out))
tf.summary.scalar('loss', loss)
with tf.variable_scope('optimizer'):
trainer = tf.train.AdamOptimizer(learning_rate=0.001)
with tf.variable_scope('gradient'):
updateModel = trainer.minimize(loss)
with tf.variable_scope('predict'):
predict = tf.random_shuffle(tf.boolean_mask(out, tf.equal(out, tf.reduce_max(out, axis=None))))[0]
writer = tf.summary.FileWriter('/tmp/test', tf.get_default_graph())
return inputs, out, ground_truth, updateModel, writer, saver
train_graph = tf.Graph()
with tf.Session(graph=train_graph) as sess:
tf.set_random_seed(42)
inputs, out, ground_truth, updateModel, writer, saver = make_network(model_phase='train')
init = tf.initialize_all_variables()
sess.run(init)
print('\nLearning...')
for _ in range(10):
sess.run([updateModel], feed_dict={inputs:np.arange(10)+np.random.random((1,10)), ground_truth:np.arange(4).reshape(1, 4)})
saver.save(sess,'./tensorflowModel.ckpt')
new_graph = tf.Graph()
with tf.Session(graph=new_graph) as sess:
inputs, out = make_network(model_phase='test')
saver = tf.train.import_meta_graph('./tensorflowModel.ckpt.meta')
saver.restore(sess, tf.train.latest_checkpoint('./'))
# evaluation
print('\nEvaluation...')
for _ in range(10):
_ = sess.run(out, feed_dict={inputs:np.arange(10).reshape(1,10)})
I don't know why creating an unused Saver makes the problem go away, but the code betrays a misunderstanding.
When you are restoring, you are creating the model graph twice. First, you call make_network() which creates the computation graph and variables. You then also call import_meta_graph which also creates a graph and variables. You should create a saver with simple saver = tf.train.Saver() instead of saver = tf.train.import_meta_graph('./tensorflowModel.ckpt.meta')