sess = tf.compat.v1.Session(config=config)
with open(r'C:\Users\User\Downloads\New folder (8)\Docify-master\api\data\ctpn.pb', 'rb') as f:
graph_def = tf.compat.v1.GraphDef()
# print("sdsds",graph_def)
# text_format.Merge(f.read(), graph_def)
graph_def.ParseFromString(f.read())
# print("sss",graph_def.ParseFromString(f.read()))
sess.graph.as_default()
tf.import_graph_def(graph_def, name='')
# print("hhdsd",tf.compat.v1.global_variables_initializer())
sess.run(tf.compat.v1.global_variables_initializer())
input_img = sess.graph.get_tensor_by_name('Placeholder:0')
output_cls_prob = sess.graph.get_tensor_by_name('Reshape_2:0')
output_box_pred = sess.graph.get_tensor_by_name('rpn_bbox_pred/Reshape_1:0')
textdetector = TextDetector()
sess.run(tf.compat.v1.global_variables_initializer())
TypeError: Argument fetch = None has invalid type "NoneType". Cannot be None
i need know how to fix it
Related
I'm trying to use a model for OCR and I have this block of code which results in the error:
"AttributeError: module 'tensorflow._api.v2.saved_model' has no attribute 'loader'" which I assume is due to using a TF version 1 construction that is not valid anymore in V2
def load_graph(self, modelFile):
graph = tf.Graph()
# graph_def = tf.compat.v1.GraphDef()
sess = tf.compat.v1.Session(graph=tf.Graph())
Point of failure -> model = tf.saved_model.loader.load(sess, [tf.saved_model.tag_constants.SERVING], './model')
graph_def = model.graph_def
with open(modelFile, "rb") as f:
graph_def.ParseFromString(f.read())
with graph.as_default():
tf.import_graph_def(graph_def)
return graph
I have changed the original construction from tf.Session() to tf.compat.v1.Session(), which leads us to this error.
What would be the equivalent for TF V2 that I can use in this case?
EDIT: Previously I had tried this variation which had the same error.
def load_graph(self, modelFile):
graph = tf.Graph()
graph_def = tf.GraphDef()
with open(modelFile, "rb") as f:
graph_def.ParseFromString(f.read())
with graph.as_default():
tf.import_graph_def(graph_def)
return graph
I am working with the ssd_mobilenet_v2_coco_2018_03_29 pretrained Tensorflow model. I want to change the input to fixed size, and save it under the saved_model.pb (I am using Neuron Compiler which require this format).
Here is how I change the input Tensor to fixed size:
graph = tf.Graph()
with graph.as_default():
fixed_image_tensor = tf.placeholder(tf.uint8, shape=(None, 300, 300, 3), name='image_tensor')
graph_def = tf.GraphDef()
with tf.io.gfile.GFile(frozen_pb_file, 'rb') as f:
serialized_graph = f.read()
graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(graph_def, name='', input_map={"image_tensor:0": fixed_image_tensor})
And now I save the modified graph to saved_model.pb format by using tf.saved_model.simple_save:
image_tensor = graph.get_tensor_by_name('image_tensor:0')
boxes_tensor = graph.get_tensor_by_name('detection_boxes:0')
scores_tensor = graph.get_tensor_by_name('detection_scores:0')
classes_tensor = graph.get_tensor_by_name('detection_classes:0')
num_detections_tensor = graph.get_tensor_by_name('num_detections:0')
sess = tf.Session(graph=graph)
tf.saved_model.simple_save(
session=sess,
export_dir='model/',
inputs={image_tensor.name: image_tensor},
outputs={
boxes_tensor.name: boxes_tensor,
scores_tensor.name: scores_tensor,
classes_tensor.name: classes_tensor,
num_detections_tensor.name: num_detections_tensor
}
)
The code create the following directory (variables is empty):
|-model/
|---variables/
|---saved_model.pb
The saved_model.pb is only 370 bytes, and must contains no actual information. I also try tf.saved_model.Builder like this and this, but still got the exact same result.
I can still use the sess for inference as usual with no problems. What did I do wrong? Are there any other approaches? I am using Tensorflow 1.15.0.
A bit rearranged code, TF1.13, got 67MBytes *.pb file. Reloaded generated saved_file, input has yours dimensions and all listed outputs:
import tensorflow as tf
frozen_pb_file = "./ssd_mobilenet_v2_coco_2018_03_29/frozen_inference_graph.pb"
graph = tf.Graph()
with graph.as_default():
fixed_image_tensor = tf.placeholder(tf.uint8, shape=(None, 300, 300, 3), name='image_tensor')
graph_def = tf.GraphDef()
with tf.io.gfile.GFile(frozen_pb_file, 'rb') as f:
serialized_graph = f.read()
graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(graph_def, name='', input_map={"image_tensor:0": fixed_image_tensor})
image_tensor = graph.get_tensor_by_name('image_tensor:0')
boxes_tensor = graph.get_tensor_by_name('detection_boxes:0')
scores_tensor = graph.get_tensor_by_name('detection_scores:0')
classes_tensor = graph.get_tensor_by_name('detection_classes:0')
num_detections_tensor = graph.get_tensor_by_name('num_detections:0')
sess = tf.Session(graph=graph)
file_writer = tf.summary.FileWriter(logdir='log', graph=graph)
tf.saved_model.simple_save(
session=sess,
export_dir='model/',
inputs={image_tensor.name: fixed_image_tensor},
outputs={
boxes_tensor.name: boxes_tensor,
scores_tensor.name: scores_tensor,
classes_tensor.name: classes_tensor,
num_detections_tensor.name: num_detections_tensor
}
)
I have tried to freeze my Tensorflow graph and restore it, but when I try and run prediction, I get the error:
You must feed a value for placeholder tensor 'DQNetwork/actions' with dtype float and shape [?,10]
My restoration code is:
sess = tf.Session()
graph = tf.get_default_graph()
with graph.as_default():
with sess.as_default():
GRAPH_PB_PATH = "./frozentensorflowModel.pb"
with gfile.FastGFile(GRAPH_PB_PATH,'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
x_tensor = graph.get_tensor_by_name("DQNetwork/inputs:0")
op_to_restore = graph.get_tensor_by_name("DQNetwork/actions:0")
new_state(cards.copy())
state = game_state.state
feed_dict={x_tensor: state.reshape((1, *state.shape))}
opt = []
opt = sess.run(op_to_restore, feed_dict) # Error throws
predictions = np.argmax(opt, 1)
I defined my DQNetwork inputs like so:
DQNetwork.inputs = tf.placeholder(tf.float32, [None, state_size], name="inputs")
DQNetwork.actions = tf.placeholder(tf.float32, [None, action_size], name="actions")
More info:
>>>op_to_restore
<tf.Tensor 'DQNetwork/actions:0' shape=(?, 10) dtype=float32>
>>>op_to_restore.op
<tf.Operation 'DQNetwork/actions' type=Placeholder>
Training line:
results = sess.run(DQNetwork.output, feed_dict = {DQNetwork.inputs: input_batch})
This may help you:
sess = tf.Session()
graph = tf.get_default_graph()
with graph.as_default():
with sess.as_default():
GRAPH_PB_PATH = "./frozentensorflowModel.pb"
with gfile.FastGFile(GRAPH_PB_PATH,'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
x_tensor = graph.get_tensor_by_name("DQNetwork/inputs:0")
op_to_restore = graph.get_operation_by_name("DQNetwork/actions")
new_state(cards.copy())
state = game_state.state
feed_dict={x_tensor: state.reshape((1, *state.shape))}
opt = []
opt = sess.run(op_to_restore, feed_dict) # Error throws
predictions = np.argmax(opt, 1)
This is what I was suggesting.
I got the point:
feed_dict={x_tensor: state.reshape((1, *state.shape))}
Instead of sess.run(op_to_restore, feed_dict) try op_to_restore.eval(feeddict)
What is the difference of printing graph nodes between these 2 methods?
Seems the printed order of ops is different.
def print_graph_v1():
with open(input_model_filepath, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
for node in graph_def.node:
print('-'*60)
print(node.name)
def print_graph_v2():
with tf.gfile.GFile(input_model_filepath, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
with tf.Graph().as_default() as graph:
tf.import_graph_def(graph_def, name='')
for op in graph.get_operations():
print('-' * 60)
print(op.name)
How can I implement the resize function in this construction?
For example, in_image has shape = (845, 594, 3), but I want to resize this image to shape = (299, 299, 3)
def main(_):
with tf.Graph().as_default() as graph:
input_graph = FLAGS.input_graph
saved_model_dir = FLAGS.saved_model_dir
# Read in the export graph
with tf.gfile.FastGFile(input_graph, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
# Define SavedModel Signature (inputs and outputs)
in_image = graph.get_tensor_by_name('input:0')
inputs = {'image_bytes': tf.saved_model.utils.build_tensor_info(in_image)}
out_classes = graph.get_tensor_by_name('InceptionV3/Predictions/Reshape_1:0')
outputs = {'prediction':
tf.saved_model.utils.build_tensor_info(out_classes)}
signature = tf.saved_model.signature_def_utils.build_signature_def(
inputs=inputs,
outputs=outputs,
method_name='tensorflow/serving/predict'
)
with tf.Session(graph=graph) as sess:
# Save out the SavedModel.
b = saved_model_builder.SavedModelBuilder(saved_model_dir)
b.add_meta_graph_and_variables(sess,
[tf.saved_model.tag_constants.SERVING],
signature_def_map={'serving_default':
signature})
b.save()
if __name__ == '__main__':
tf.app.run()
I figured it out:
def main(_):
with tf.Graph().as_default() as graph:
input_graph = FLAGS.input_graph
saved_model_dir = FLAGS.saved_model_dir
# Read in the export graph
with tf.gfile.FastGFile(input_graph, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
serialized_tf_example = tf.placeholder(tf.string, name='b64')
jpeg = preprocess_image(serialized_tf_example)
out, = tf.import_graph_def(graph.as_graph_def(),
input_map={'input:0': jpeg},
return_elements = ['InceptionV3/Predictions/Reshape_1:0'])
inputs = {'inputs': tf.saved_model.utils.build_tensor_info(serialized_tf_example)}
outputs = {'prediction': tf.saved_model.utils.build_tensor_info(out)}
signature = tf.saved_model.signature_def_utils.build_signature_def(
inputs=inputs,
outputs=outputs,
method_name='tensorflow/serving/predict'
)
with tf.Session(graph=graph) as sess:
# Save out the SavedModel.
b = saved_model_builder.SavedModelBuilder(saved_model_dir)
b.add_meta_graph_and_variables(sess,
[tf.saved_model.tag_constants.SERVING],
signature_def_map={'serving_default': signature})
b.save()