How can I implement the resize function in this construction?
For example, in_image has shape = (845, 594, 3), but I want to resize this image to shape = (299, 299, 3)
def main(_):
with tf.Graph().as_default() as graph:
input_graph = FLAGS.input_graph
saved_model_dir = FLAGS.saved_model_dir
# Read in the export graph
with tf.gfile.FastGFile(input_graph, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
# Define SavedModel Signature (inputs and outputs)
in_image = graph.get_tensor_by_name('input:0')
inputs = {'image_bytes': tf.saved_model.utils.build_tensor_info(in_image)}
out_classes = graph.get_tensor_by_name('InceptionV3/Predictions/Reshape_1:0')
outputs = {'prediction':
tf.saved_model.utils.build_tensor_info(out_classes)}
signature = tf.saved_model.signature_def_utils.build_signature_def(
inputs=inputs,
outputs=outputs,
method_name='tensorflow/serving/predict'
)
with tf.Session(graph=graph) as sess:
# Save out the SavedModel.
b = saved_model_builder.SavedModelBuilder(saved_model_dir)
b.add_meta_graph_and_variables(sess,
[tf.saved_model.tag_constants.SERVING],
signature_def_map={'serving_default':
signature})
b.save()
if __name__ == '__main__':
tf.app.run()
I figured it out:
def main(_):
with tf.Graph().as_default() as graph:
input_graph = FLAGS.input_graph
saved_model_dir = FLAGS.saved_model_dir
# Read in the export graph
with tf.gfile.FastGFile(input_graph, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
serialized_tf_example = tf.placeholder(tf.string, name='b64')
jpeg = preprocess_image(serialized_tf_example)
out, = tf.import_graph_def(graph.as_graph_def(),
input_map={'input:0': jpeg},
return_elements = ['InceptionV3/Predictions/Reshape_1:0'])
inputs = {'inputs': tf.saved_model.utils.build_tensor_info(serialized_tf_example)}
outputs = {'prediction': tf.saved_model.utils.build_tensor_info(out)}
signature = tf.saved_model.signature_def_utils.build_signature_def(
inputs=inputs,
outputs=outputs,
method_name='tensorflow/serving/predict'
)
with tf.Session(graph=graph) as sess:
# Save out the SavedModel.
b = saved_model_builder.SavedModelBuilder(saved_model_dir)
b.add_meta_graph_and_variables(sess,
[tf.saved_model.tag_constants.SERVING],
signature_def_map={'serving_default': signature})
b.save()
Related
sess = tf.compat.v1.Session(config=config)
with open(r'C:\Users\User\Downloads\New folder (8)\Docify-master\api\data\ctpn.pb', 'rb') as f:
graph_def = tf.compat.v1.GraphDef()
# print("sdsds",graph_def)
# text_format.Merge(f.read(), graph_def)
graph_def.ParseFromString(f.read())
# print("sss",graph_def.ParseFromString(f.read()))
sess.graph.as_default()
tf.import_graph_def(graph_def, name='')
# print("hhdsd",tf.compat.v1.global_variables_initializer())
sess.run(tf.compat.v1.global_variables_initializer())
input_img = sess.graph.get_tensor_by_name('Placeholder:0')
output_cls_prob = sess.graph.get_tensor_by_name('Reshape_2:0')
output_box_pred = sess.graph.get_tensor_by_name('rpn_bbox_pred/Reshape_1:0')
textdetector = TextDetector()
sess.run(tf.compat.v1.global_variables_initializer())
TypeError: Argument fetch = None has invalid type "NoneType". Cannot be None
i need know how to fix it
I am working with the ssd_mobilenet_v2_coco_2018_03_29 pretrained Tensorflow model. I want to change the input to fixed size, and save it under the saved_model.pb (I am using Neuron Compiler which require this format).
Here is how I change the input Tensor to fixed size:
graph = tf.Graph()
with graph.as_default():
fixed_image_tensor = tf.placeholder(tf.uint8, shape=(None, 300, 300, 3), name='image_tensor')
graph_def = tf.GraphDef()
with tf.io.gfile.GFile(frozen_pb_file, 'rb') as f:
serialized_graph = f.read()
graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(graph_def, name='', input_map={"image_tensor:0": fixed_image_tensor})
And now I save the modified graph to saved_model.pb format by using tf.saved_model.simple_save:
image_tensor = graph.get_tensor_by_name('image_tensor:0')
boxes_tensor = graph.get_tensor_by_name('detection_boxes:0')
scores_tensor = graph.get_tensor_by_name('detection_scores:0')
classes_tensor = graph.get_tensor_by_name('detection_classes:0')
num_detections_tensor = graph.get_tensor_by_name('num_detections:0')
sess = tf.Session(graph=graph)
tf.saved_model.simple_save(
session=sess,
export_dir='model/',
inputs={image_tensor.name: image_tensor},
outputs={
boxes_tensor.name: boxes_tensor,
scores_tensor.name: scores_tensor,
classes_tensor.name: classes_tensor,
num_detections_tensor.name: num_detections_tensor
}
)
The code create the following directory (variables is empty):
|-model/
|---variables/
|---saved_model.pb
The saved_model.pb is only 370 bytes, and must contains no actual information. I also try tf.saved_model.Builder like this and this, but still got the exact same result.
I can still use the sess for inference as usual with no problems. What did I do wrong? Are there any other approaches? I am using Tensorflow 1.15.0.
A bit rearranged code, TF1.13, got 67MBytes *.pb file. Reloaded generated saved_file, input has yours dimensions and all listed outputs:
import tensorflow as tf
frozen_pb_file = "./ssd_mobilenet_v2_coco_2018_03_29/frozen_inference_graph.pb"
graph = tf.Graph()
with graph.as_default():
fixed_image_tensor = tf.placeholder(tf.uint8, shape=(None, 300, 300, 3), name='image_tensor')
graph_def = tf.GraphDef()
with tf.io.gfile.GFile(frozen_pb_file, 'rb') as f:
serialized_graph = f.read()
graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(graph_def, name='', input_map={"image_tensor:0": fixed_image_tensor})
image_tensor = graph.get_tensor_by_name('image_tensor:0')
boxes_tensor = graph.get_tensor_by_name('detection_boxes:0')
scores_tensor = graph.get_tensor_by_name('detection_scores:0')
classes_tensor = graph.get_tensor_by_name('detection_classes:0')
num_detections_tensor = graph.get_tensor_by_name('num_detections:0')
sess = tf.Session(graph=graph)
file_writer = tf.summary.FileWriter(logdir='log', graph=graph)
tf.saved_model.simple_save(
session=sess,
export_dir='model/',
inputs={image_tensor.name: fixed_image_tensor},
outputs={
boxes_tensor.name: boxes_tensor,
scores_tensor.name: scores_tensor,
classes_tensor.name: classes_tensor,
num_detections_tensor.name: num_detections_tensor
}
)
I have tried to freeze my Tensorflow graph and restore it, but when I try and run prediction, I get the error:
You must feed a value for placeholder tensor 'DQNetwork/actions' with dtype float and shape [?,10]
My restoration code is:
sess = tf.Session()
graph = tf.get_default_graph()
with graph.as_default():
with sess.as_default():
GRAPH_PB_PATH = "./frozentensorflowModel.pb"
with gfile.FastGFile(GRAPH_PB_PATH,'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
x_tensor = graph.get_tensor_by_name("DQNetwork/inputs:0")
op_to_restore = graph.get_tensor_by_name("DQNetwork/actions:0")
new_state(cards.copy())
state = game_state.state
feed_dict={x_tensor: state.reshape((1, *state.shape))}
opt = []
opt = sess.run(op_to_restore, feed_dict) # Error throws
predictions = np.argmax(opt, 1)
I defined my DQNetwork inputs like so:
DQNetwork.inputs = tf.placeholder(tf.float32, [None, state_size], name="inputs")
DQNetwork.actions = tf.placeholder(tf.float32, [None, action_size], name="actions")
More info:
>>>op_to_restore
<tf.Tensor 'DQNetwork/actions:0' shape=(?, 10) dtype=float32>
>>>op_to_restore.op
<tf.Operation 'DQNetwork/actions' type=Placeholder>
Training line:
results = sess.run(DQNetwork.output, feed_dict = {DQNetwork.inputs: input_batch})
This may help you:
sess = tf.Session()
graph = tf.get_default_graph()
with graph.as_default():
with sess.as_default():
GRAPH_PB_PATH = "./frozentensorflowModel.pb"
with gfile.FastGFile(GRAPH_PB_PATH,'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
x_tensor = graph.get_tensor_by_name("DQNetwork/inputs:0")
op_to_restore = graph.get_operation_by_name("DQNetwork/actions")
new_state(cards.copy())
state = game_state.state
feed_dict={x_tensor: state.reshape((1, *state.shape))}
opt = []
opt = sess.run(op_to_restore, feed_dict) # Error throws
predictions = np.argmax(opt, 1)
This is what I was suggesting.
I got the point:
feed_dict={x_tensor: state.reshape((1, *state.shape))}
Instead of sess.run(op_to_restore, feed_dict) try op_to_restore.eval(feeddict)
Im classifying multiple jpeg images in a folder and I get this error: "ValueError: Argument must be a dense tensor: got shape [2], but wanted [2, 16]". Im trying to perform inference on each image at a time and I'm not sure where I'm making a mistake. Any help would be greatly appreciated!
Relevant code:
def load_graph(model_file):
graph = tf.Graph()
graph_def = tf.GraphDef()
with tf.device('/cpu:0'):
with open(model_file, "rb") as f:
graph_def.ParseFromString(f.read())
with graph.as_default():
tf.import_graph_def(graph_def)
return graph
def load_labels(label_file):
label = []
proto_as_ascii_lines = tf.gfile.GFile(label_file).readlines()
for l in proto_as_ascii_lines:
label.append(l.rstrip())
return label
def read_tensor_from_image_file(images):
image_file_path = "./test_images/"
filenames = [os.path.join(image_file_path, filename) for
filename in os.listdir(image_file_path)]
filename_queue = tf.train.string_input_producer((filenames,
'./*.jpg'))
image_reader = tf.WholeFileReader()
_, image_file = image_reader.read(filename_queue)
image_orig = tf.image.decode_jpeg(image_file, channels = 3,
name='jpeg_reader')
image = tf.image.resize_images(image_orig, [299, 299])
image = image.set_shape([299, 299, 3])
batch_size = 1
num_preprocess_threads = 1
min_queue_examples = 50
images = tf.train.shuffle_batch([image], batch_size =
batch_size, num_threads=num_preprocess_threads,
capacity=min_queue_examples + 10 * batch_size,
min_after_dequeue=min_queue_examples)
with tf.Session() as sess:
tf.global_variables_initializer().run()
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
image_tensor = sess.run([images])
print(image_tensor)
coord.request_stop()
coord.join(threads)
def main(images):
model_file = "tf_files/retrained_graph.pb"
label_file = "tf_files/retrained_labels.txt"
input_layer = "Mul"
output_layer = "final_result"
graph = load_graph(model_file)
t = read_tensor_from_image_file(images)
input_name = "import/" + input_layer
output_name = "import/" + output_layer
input_operation = graph.get_operation_by_name(input_name);
output_operation = graph.get_operation_by_name(output_name);
with tf.device('/cpu:0'):
with tf.Session(graph=graph) as sess:
config = tf.ConfigProto(device_count={"CPU": 4},
inter_op_parallelism_threads=44,
intra_op_parallelism_threads=44)
sess = tf.Session(config=config)
start = time.time()
results = sess.run(output_operation.outputs[0],
{input_operation.outputs[0]: t})
end=time.time()
results = np.squeeze(results)
top_k = results.argsort()[-5:][::-1]
labels = load_labels(label_file)
print('\nEvaluation time (1-image): {:.3f}s\n'.format(end-start))
for i in top_k:
print(images, labels[i], results[i])
return [images] + list(results)
if __name__ == "__main__":
image_list = [f for f in listdir('test_images') if
isfile(join('test_images', f))]
res_list = []
for image in image_list:
if image.lower().endswith(('.png', '.jpg',
'.jpeg', '.gif')):
res_list.append(main(join('test_images',
image)))
else:
if not image.endswith('.jpg') or
image.startswith('.'):
continue
I'm trying to export my retrained inception graph (pb file) for tensorflow serve since I was not able to find any snippets for exporting pb file I had to create my own but obviously I'm doing something wrong since I'm getting "No variables to save" error, I would appreciate any help
Update: after researching more I think I need meta graph to supply variables but retrain.py doesn't give me meta graph, any ideas ?
import tensorflow as tf,sys
label_lines = [line.rstrip() for line
in tf.gfile.GFile("retrained_labels.txt")]
graph = None
with tf.gfile.FastGFile("retrained_graph.pb", 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
graph = tf.import_graph_def(graph_def, name='')
with tf.Session(graph=graph) as sess:
output_tensor = sess.graph.get_tensor_by_name('final_result:0')
input_tensor = sess.graph.get_tensor_by_name('DecodeJpeg/contents:0')
mapping_string = tf.constant( label_lines )
indices = tf.constant( [0,len(label_lines)-1], tf.int64 )
prediction_classes = tf.contrib.lookup.index_to_string( indices, mapping=mapping_string )
export_path = sys.argv[1]
print('Exporting trained model to %s' % export_path)
init_op = tf.group(tf.initialize_all_tables(), name='init_op')
saver = tf.train.Saver(sharded=True)
model_exporter = exporter.Exporter(saver)
model_exporter.init(
sess.graph.as_graph_def(),
init_op=init_op,
default_graph_signature=exporter.classification_signature(
input_tensor=input_tensor,
classes_tensor=prediction_classes,
scores_tensor=output_tensor),
named_graph_signatures={
'inputs': exporter.generic_signature({'images': 'final_result:0'}),
'outputs': exporter.generic_signature({'scores': 'DecodeJpeg/contents:0'})})
model_exporter.export(export_path, tf.constant(1), sess)
print('Done exporting!')