Is it possible to run multiple tensorflow object detection models at the same time? (I have trained two models and want to run both parallel)
I wrote this code and tried to run but it doesn't work.
# First Frozen
detection_graph1 = tf.Graph()
with detection_graph1.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_FROZEN_GRAPH1, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
# Second Frozen
detection_graph2 = tf.Graph()
with detection_graph2.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_FROZEN_GRAPH2, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
def run_inference_for_multiple_images(path,graph1,graph2):
with graph1.as_default():
with tf.Session() as sess1:
with graph2.as_default():
with tf.Session() as sess2:
#detection code..
Yes it's absolutely possible, but you're doing it wrong.
Don't define the two models in two separate graphs, just load them in the same one (and add proper name scopes to avoid naming conflicts):
graph = tf.Graph() # just one graph, with both models loaded
with graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_FROZEN_GRAPH1, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='first_graph')
with tf.gfile.GFile(PATH_TO_FROZEN_GRAPH2, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='second_graph')
# [...] get the correct input and output tensors for the two graphs via their names
with tf.Session(graph=graph) as sess: # just one session
# Running only one of the two at a time
res_1 = sess.run(outputs_from_graph_1, feed_dict=graph_1_feeds)
res_2 = sess.run(outputs_from_graph_2, feed_dict=graph_2_feeds)
# Actually running them in parallel (might not fit in memory!)
res_1_and_2 = sess.run( outputs_from_graph_1 + outputs_from_graph_2, {**graph_1_feeds, **graph_2_feeds} )
Note: I'm assuming the feeds are dicts with tensor_name:values or placeholder_tensor:values key/value pairs
Related
sess = tf.compat.v1.Session(config=config)
with open(r'C:\Users\User\Downloads\New folder (8)\Docify-master\api\data\ctpn.pb', 'rb') as f:
graph_def = tf.compat.v1.GraphDef()
# print("sdsds",graph_def)
# text_format.Merge(f.read(), graph_def)
graph_def.ParseFromString(f.read())
# print("sss",graph_def.ParseFromString(f.read()))
sess.graph.as_default()
tf.import_graph_def(graph_def, name='')
# print("hhdsd",tf.compat.v1.global_variables_initializer())
sess.run(tf.compat.v1.global_variables_initializer())
input_img = sess.graph.get_tensor_by_name('Placeholder:0')
output_cls_prob = sess.graph.get_tensor_by_name('Reshape_2:0')
output_box_pred = sess.graph.get_tensor_by_name('rpn_bbox_pred/Reshape_1:0')
textdetector = TextDetector()
sess.run(tf.compat.v1.global_variables_initializer())
TypeError: Argument fetch = None has invalid type "NoneType". Cannot be None
i need know how to fix it
I'm trying to use a model for OCR and I have this block of code which results in the error:
"AttributeError: module 'tensorflow._api.v2.saved_model' has no attribute 'loader'" which I assume is due to using a TF version 1 construction that is not valid anymore in V2
def load_graph(self, modelFile):
graph = tf.Graph()
# graph_def = tf.compat.v1.GraphDef()
sess = tf.compat.v1.Session(graph=tf.Graph())
Point of failure -> model = tf.saved_model.loader.load(sess, [tf.saved_model.tag_constants.SERVING], './model')
graph_def = model.graph_def
with open(modelFile, "rb") as f:
graph_def.ParseFromString(f.read())
with graph.as_default():
tf.import_graph_def(graph_def)
return graph
I have changed the original construction from tf.Session() to tf.compat.v1.Session(), which leads us to this error.
What would be the equivalent for TF V2 that I can use in this case?
EDIT: Previously I had tried this variation which had the same error.
def load_graph(self, modelFile):
graph = tf.Graph()
graph_def = tf.GraphDef()
with open(modelFile, "rb") as f:
graph_def.ParseFromString(f.read())
with graph.as_default():
tf.import_graph_def(graph_def)
return graph
I am working with the ssd_mobilenet_v2_coco_2018_03_29 pretrained Tensorflow model. I want to change the input to fixed size, and save it under the saved_model.pb (I am using Neuron Compiler which require this format).
Here is how I change the input Tensor to fixed size:
graph = tf.Graph()
with graph.as_default():
fixed_image_tensor = tf.placeholder(tf.uint8, shape=(None, 300, 300, 3), name='image_tensor')
graph_def = tf.GraphDef()
with tf.io.gfile.GFile(frozen_pb_file, 'rb') as f:
serialized_graph = f.read()
graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(graph_def, name='', input_map={"image_tensor:0": fixed_image_tensor})
And now I save the modified graph to saved_model.pb format by using tf.saved_model.simple_save:
image_tensor = graph.get_tensor_by_name('image_tensor:0')
boxes_tensor = graph.get_tensor_by_name('detection_boxes:0')
scores_tensor = graph.get_tensor_by_name('detection_scores:0')
classes_tensor = graph.get_tensor_by_name('detection_classes:0')
num_detections_tensor = graph.get_tensor_by_name('num_detections:0')
sess = tf.Session(graph=graph)
tf.saved_model.simple_save(
session=sess,
export_dir='model/',
inputs={image_tensor.name: image_tensor},
outputs={
boxes_tensor.name: boxes_tensor,
scores_tensor.name: scores_tensor,
classes_tensor.name: classes_tensor,
num_detections_tensor.name: num_detections_tensor
}
)
The code create the following directory (variables is empty):
|-model/
|---variables/
|---saved_model.pb
The saved_model.pb is only 370 bytes, and must contains no actual information. I also try tf.saved_model.Builder like this and this, but still got the exact same result.
I can still use the sess for inference as usual with no problems. What did I do wrong? Are there any other approaches? I am using Tensorflow 1.15.0.
A bit rearranged code, TF1.13, got 67MBytes *.pb file. Reloaded generated saved_file, input has yours dimensions and all listed outputs:
import tensorflow as tf
frozen_pb_file = "./ssd_mobilenet_v2_coco_2018_03_29/frozen_inference_graph.pb"
graph = tf.Graph()
with graph.as_default():
fixed_image_tensor = tf.placeholder(tf.uint8, shape=(None, 300, 300, 3), name='image_tensor')
graph_def = tf.GraphDef()
with tf.io.gfile.GFile(frozen_pb_file, 'rb') as f:
serialized_graph = f.read()
graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(graph_def, name='', input_map={"image_tensor:0": fixed_image_tensor})
image_tensor = graph.get_tensor_by_name('image_tensor:0')
boxes_tensor = graph.get_tensor_by_name('detection_boxes:0')
scores_tensor = graph.get_tensor_by_name('detection_scores:0')
classes_tensor = graph.get_tensor_by_name('detection_classes:0')
num_detections_tensor = graph.get_tensor_by_name('num_detections:0')
sess = tf.Session(graph=graph)
file_writer = tf.summary.FileWriter(logdir='log', graph=graph)
tf.saved_model.simple_save(
session=sess,
export_dir='model/',
inputs={image_tensor.name: fixed_image_tensor},
outputs={
boxes_tensor.name: boxes_tensor,
scores_tensor.name: scores_tensor,
classes_tensor.name: classes_tensor,
num_detections_tensor.name: num_detections_tensor
}
)
What is the difference of printing graph nodes between these 2 methods?
Seems the printed order of ops is different.
def print_graph_v1():
with open(input_model_filepath, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
for node in graph_def.node:
print('-'*60)
print(node.name)
def print_graph_v2():
with tf.gfile.GFile(input_model_filepath, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
with tf.Graph().as_default() as graph:
tf.import_graph_def(graph_def, name='')
for op in graph.get_operations():
print('-' * 60)
print(op.name)
I'm trying to export my retrained inception graph (pb file) for tensorflow serve since I was not able to find any snippets for exporting pb file I had to create my own but obviously I'm doing something wrong since I'm getting "No variables to save" error, I would appreciate any help
Update: after researching more I think I need meta graph to supply variables but retrain.py doesn't give me meta graph, any ideas ?
import tensorflow as tf,sys
label_lines = [line.rstrip() for line
in tf.gfile.GFile("retrained_labels.txt")]
graph = None
with tf.gfile.FastGFile("retrained_graph.pb", 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
graph = tf.import_graph_def(graph_def, name='')
with tf.Session(graph=graph) as sess:
output_tensor = sess.graph.get_tensor_by_name('final_result:0')
input_tensor = sess.graph.get_tensor_by_name('DecodeJpeg/contents:0')
mapping_string = tf.constant( label_lines )
indices = tf.constant( [0,len(label_lines)-1], tf.int64 )
prediction_classes = tf.contrib.lookup.index_to_string( indices, mapping=mapping_string )
export_path = sys.argv[1]
print('Exporting trained model to %s' % export_path)
init_op = tf.group(tf.initialize_all_tables(), name='init_op')
saver = tf.train.Saver(sharded=True)
model_exporter = exporter.Exporter(saver)
model_exporter.init(
sess.graph.as_graph_def(),
init_op=init_op,
default_graph_signature=exporter.classification_signature(
input_tensor=input_tensor,
classes_tensor=prediction_classes,
scores_tensor=output_tensor),
named_graph_signatures={
'inputs': exporter.generic_signature({'images': 'final_result:0'}),
'outputs': exporter.generic_signature({'scores': 'DecodeJpeg/contents:0'})})
model_exporter.export(export_path, tf.constant(1), sess)
print('Done exporting!')