Loading model in tensorflow only once - python

I am loading the model as
def _load_model(model_filepath):
model_exp = os.path.expanduser(model_filepath)
if os.path.isfile(model_exp):
print("loading model to graph")
with gfile.FastGFile(model_exp, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
and using this function in following code
tf.reset_default_graph()
with tf.Session(config=tf.ConfigProto(log_device_placement=False)) as sess:
_load_model(model_filepath=model_path)
test_set = _get_test_data(input_directory)
images, labels = _load_images_and_labels(test_set, image_size=160, batch_size=batch_size,
num_threads=num_threads, num_epochs=1)
init_op = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
sess.run(init_op)
images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
embedding_layer = tf.get_default_graph().get_tensor_by_name("embeddings:0")
phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
On each api call i am reseting the default graph and loading model which takes long time.
I want to load my model only once and use it in session with new graph
how can i achieve this?

Usually you save and load models with tf.train.Saver(), see docs.
So after you train your model you do something like this:
saver.save(sess_name, "/path/model.ckpt")
and when you want to load ("restore") you do something like this:
saver = tf.train.Saver()
saver.restore(sess_name, "/path/model.ckpt")
As Jonathan DEKHTIAR has already mentioned, it makes sense to use the search before asking questions:
Tensorflow: how to save/restore a model?

Related

NotImplementedError: Pre-trained Graph Output -> New Layers

I am working on feeding some outputs of a pre-trained graph into some additional layers in Tensorflow. Here is a walkthrough of some of my code:
First, I define a new tf.Graph(), and load in the pre-trained model.
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile('./mobilenetssd/frozen_inference_graph.pb', 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
Fetching input/output tensors of the loaded graph, defining placeholders, adding some ops.
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
output_matrix = detection_graph.get_tensor_by_name('concat:0')
labels = tf.placeholder(tf.float32, [None, 1])
# Adding operations
outmat_sq = tf.squeeze(output_matrix)
logits_max = tf.squeeze(tf.math.reduce_max(outmat_sq, reduction_indices=[0]))
logits_mean = tf.squeeze(tf.math.reduce_mean(outmat_sq, reduction_indices=[0]))
logodds = tf.concat([logits_max, logits_mean], 0)
logodds = tf.expand_dims(logodds, 0)
logodds.set_shape([None, 1204])
Defining the new layers, setting up optimizer to train new layers.
hidden = tf.contrib.layers.fully_connected(inputs=logodds, num_outputs=500, activation_fn=tf.nn.tanh)
out = tf.contrib.layers.fully_connected(inputs=hidden, num_outputs=1, activation_fn=tf.nn.sigmoid)
# Define Loss, Training, and Accuracy
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=out, labels=labels))
training_step = tf.train.AdamOptimizer(1e-6).minimize(loss, var_list=[hidden, out])
correct_prediction = tf.equal(tf.round(out), labels)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
After running this code, I am getting a NotImplementedError:('Trying to update a Tensor ', tf.Tensor 'fully_connected/Tanh:0' shape=(?, 500) dtype=float32) error. This seems to be a problem with "linking" the two parts of the model together. Do I need to pass the output of the first graph into some tf.Variable and then pass that into the subsequent layers? Also, I am using TF 1.10.
Any insight on this would be appreciated!

How to restore pretrained model to initialize parameters

I have downloaded an network with its pretrained model. I added several layers and parameters to the network, I want to use this pretrained model to initialize the original parameters,and random initialize new added parameters by myself.I use this code:
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, "output/saver-test")
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
but I met the error:"Key global_step not found in checkpoint",this error because I have some new parameters that didn't exist in pretrained model.But how can I solve this problem? What's more,I want to use this code "sess.run(tf.global_variables_initializer())" to initialize the new added parameters,but the extracted parameters from pretrained model will be covered by it?
It happens because of your network is not perfectly match to the loaded one.
You can use selective checkpoint loader something like that:
reader = tf.train.NewCheckpointReader(os.path.join(checkpoint_dir, ckpt_name))
restore_dict = dict()
for v in tf.trainable_variables():
tensor_name = v.name.split(':')[0]
if reader.has_tensor(tensor_name):
print('has tensor ', tensor_name)
restore_dict[tensor_name] = v
restore_dict['my_new_var_scope/my_new_var'] = self.get_my_new_var_variable()
Where get_my_new_var_variable() is something like that:
def get_my_new_var_variable(self):
with tf.variable_scope("my_new_var_scope",reuse=tf.AUTO_REUSE):
my_new_var = tf.get_variable("my_new_var", dtype=tf.int32,initializer=tf.constant([23, 42]))
return my_new_var
Loading the weights:
self.saver = tf.train.Saver(restore_dict)
self.saver.restore(self.sess, os.path.join(checkpoint_dir, ckpt_name))
Edited:
Note that in order to avoid override the loaded variables you can use this method:
def initialize_uninitialized(sess):
global_vars = tf.global_variables()
is_not_initialized = sess.run([tf.is_variable_initialized(var) for var in global_vars])
not_initialized_vars = [v for (v, f) in zip(global_vars, is_not_initialized) if not f]
if len(not_initialized_vars):
sess.run(tf.variables_initializer(not_initialized_vars))
Or simply calling tf.global_variables_initializer() before loading the variables should work here.

Tensorflow MNIST Sample: Code to Predict from SavedModel

I am using the sample to build a CNN as per this article: https://www.tensorflow.org/tutorials/layers
However, I am unable to find a sample to predict by feeding in a sample image. Any help here would be highly appreciated.
Below is what I have tried, and not able to find the output tensor name
img = <load from file>
sess = tf.Session()
saver = tf.train.import_meta_graph('/tmp/mnist_convnet_model/model.ckpt-2000.meta')
saver.restore(sess, tf.train.latest_checkpoint('/tmp/mnist_convnet_model/'))
input_place_holder = sess.graph.get_tensor_by_name("enqueue_input/Placeholder:0")
out_put = <not sure what the tensor output name in the graph>
current_input = img
result = sess.run(out_put, feed_dict={input_place_holder: current_input})
print(result)
You can use the inspect_checkpoint tool in Tensorflow to find the tensors inside a checkpoint file.
from tensorflow.python.tools.inspect_checkpoint import print_tensors_in_checkpoint_file
print_tensors_in_checkpoint_file(file_name="tmp/mnist_convnet_model/model.ckpt-2000.meta", tensor_name='')
There are nice instructions on how to save and restore in tensorflows programming guide. Here is a small example inspired from the latter link. Just make sure that the ./tmp dir exists
import tensorflow as tf
# Create some variables.
variable = tf.get_variable("variable_1", shape=[3], initializer=tf.zeros_initializer)
inc_v1=variable.assign(variable + 1)
# Operation to initialize variables if we do not restore from checkpoint
init_op = tf.global_variables_initializer()
# Create the saver
saver = tf.train.Saver()
with tf.Session() as sess:
# Setting to decide wether or not to restore
DO_RESTORE=True
# Where to save the data file
save_path="./tmp/model.ckpt"
if DO_RESTORE:
# If we want to restore, load the variables from the saved file
saver.restore(sess, save_path)
else:
# If we don't want to restore, then initialize variables
# using their specified initializers.
sess.run(init_op)
# Print the initial values of variable
initial_var_value=sess.run(variable)
print("Initial:", initial_var_value)
# Do some work with the model.
incremented=sess.run(inc_v1)
print("Incremented:", incremented)
# Save the variables to disk.
save_path = saver.save(sess, save_path)
print("Model saved in path: %s" % save_path)

restoring weights of an already saved Tensorflow .pb model

I have seen many posts about restoring an already saved TF models here, but none could answer my question. Using TF 1.0.0
Specifically, I am interested in seeing the weights for inceptionv3 model which is publicly available in .pb file here. I managed to restore it back using a small chunk of Python code and can access the graphs high-level view in tensorboard:
from tensorflow.python.platform import gfile
INCEPTION_LOG_DIR = '/tmp/inception_v3_log'
if not os.path.exists(INCEPTION_LOG_DIR):
os.makedirs(INCEPTION_LOG_DIR)
with tf.Session() as sess:
model_filename = './model/tensorflow_inception_v3_stripped_optimized_quantized.pb'
with gfile.FastGFile(model_filename, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_= tf.import_graph_def(graph_def,name='')
writer = tf.train.SummaryWriter(INCEPTION_LOG_DIR, graph_def)
writer=tf.summary.FileWriter(INCEPTION_LOG_DIR, graph_def)
writer.close()
However, I failed to access any layers' weights.
tensors= tf.import_graph_def(graph_def,name='')
returns empty, even if I add the arbitrary return_elements=. Does it have any weights at all? If yes, what is the appropriate procedure here? Thanks.
use this code to print your tensor's value :
with tf.Session() as sess:
print sess.run('your_tensor_name')
you can use this code to retrieve tensor names:
op = sess.graph.get_operations()
for m in op :
print(m.values())
There is a difference between restoring weights and printing them. The former one denotes that one would like to import the weight values from already saved ckpt files for retraining or inference while the latter may be for inspection. Also .pb file encodes model parameters as tf.constant() ops. As a result, the model parameters would not appear in tf.trainable_variables(), hence you can't use .pb directly to restore the weights. From your question I take that you just want to 'see' the weights for inspection.
Let us first load the graph from .pb file.
import tensorflow as tf
from tensorflow.python.platform import gfile
GRAPH_PB_PATH = './model/tensorflow_inception_v3_stripped_optimized_quantized.pb' #path to your .pb file
with tf.Session(config=config) as sess:
print("load graph")
with gfile.FastGFile(GRAPH_PB_PATH,'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
sess.graph.as_default()
tf.import_graph_def(graph_def, name='')
graph_nodes=[n for n in graph_def.node]
Now when you freeze a graph to .pb file your variables are converted to Const type and the weights which were trainabe variables would also be stored as Const in .pb file. graph_nodes contains all the nodes in graph. But we are interested in all the Const type nodes.
wts = [n for n in graph_nodes if n.op=='Const']
Each element of wts is of NodeDef type. It has several atributes such as name, op etc. The values can be extracted as follows -
from tensorflow.python.framework import tensor_util
for n in wts:
print "Name of the node - %s" % n.name
print "Value - "
print tensor_util.MakeNdarray(n.attr['value'].tensor)
Hope this solves your concern.
You can use this code to get the names of tensor.
[tensor.name for tensor in tf.get_default_graph().as_graph_def().node]
Just small utils to print .pb model weights:
import argparse
import tensorflow as tf
from tensorflow.python.framework import tensor_util
def print_pb_weights(pb_filepath):
graph_def = tf.GraphDef()
with tf.gfile.GFile(pb_filepath, "rb") as f:
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
for node in graph_def.node:
if node.op == 'Const':
print('-' * 60)
print('op:', node.op)
print('name:', node.name)
arr = tensor_util.MakeNdarray(node.attr['value'].tensor)
print('shape:', list(arr.shape))
print(arr)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('pb_filepath')
args = parser.parse_args()
print_pb_weights(args.pb_filepath)

What is the difference between two tensorflow inception models 'inception_v3_2016_08_28.tar.gz' and 'classify_image_graph_def.pb'?

I have tried both the tensorflow models for tranfer learning. The models are
inception_v3_2016_08_28.tar.gz - from tensorflow-models
classify_image_graph_def.pb - comes along with tensorflow image_retraining code.
But the results i am getting are totally different. Where the second models performs way better than the first. Is it expected ? First model gives accuracy of 57% where the second model gives 80% accuracy.
The first model is checkpoint file. For transfer learning, i have converted the checkpoint file to protobuf file. Then the python code retrain.py which comes along with tensorflow is used to do the retraining. Following code is used to convert checkpoint file to a protobuf file.
checkpoint_file = '../check_points/inception_v3.ckpt'
decode_jpeg_data = tf.placeholder(tf.string)
decode_jpeg = tf.image.decode_jpeg(decode_jpeg_data, channels=3, dct_method="INTEGER_ACCURATE")
if decode_jpeg.dtype != tf.float32:
decode_jpeg = tf.image.convert_image_dtype(decode_jpeg, dtype=tf.float32)
image_ = tf.expand_dims(decode_jpeg, 0)
image = tf.image.resize_bicubic(image_, [299, 299], align_corners=True)
scaled_input_tensor = tf.scalar_mul((1.0/255), image)
scaled_input_tensor = tf.subtract(scaled_input_tensor, 0.5)
scaled_input_tensor = tf.multiply(scaled_input_tensor, 2.0)
# loading the inception graph
arg_scope = inception_v3_arg_scope()
with slim.arg_scope(arg_scope):
logits, end_points = inception_v3(inputs=scaled_input_tensor, is_training=False, num_classes=1001)
saver = tf.train.Saver()
sess = tf.Session()
saver.restore(sess, checkpoint_file)
with gfile.FastGFile('./models/inceptionv3.pb', 'wb') as f:
f.write(output_graph_def.SerializeToString())

Categories