Cannot freeze Tensorflow models into frozen(.pb) file - python

I am referring (here) to freeze models into .pb file. My model is CNN for text classification I am using (Github) link to train CNN for text classification and exporting in form of models. I have trained models to 4 epoch and My checkpoints folders look as follows:
I want to freeze this model into (.pb file). For that I am using following script:
import os, argparse
import tensorflow as tf
# The original freeze_graph function
# from tensorflow.python.tools.freeze_graph import freeze_graph
dir = os.path.dirname(os.path.realpath(__file__))
def freeze_graph(model_dir, output_node_names):
"""Extract the sub graph defined by the output nodes and convert
all its variables into constant
Args:
model_dir: the root folder containing the checkpoint state file
output_node_names: a string, containing all the output node's names,
comma separated
"""
if not tf.gfile.Exists(model_dir):
raise AssertionError(
"Export directory doesn't exists. Please specify an export "
"directory: %s" % model_dir)
if not output_node_names:
print("You need to supply the name of a node to --output_node_names.")
return -1
# We retrieve our checkpoint fullpath
checkpoint = tf.train.get_checkpoint_state(model_dir)
input_checkpoint = checkpoint.model_checkpoint_path
# We precise the file fullname of our freezed graph
absolute_model_dir = "/".join(input_checkpoint.split('/')[:-1])
output_graph = absolute_model_dir + "/frozen_model.pb"
# We clear devices to allow TensorFlow to control on which device it will load operations
clear_devices = True
# We start a session using a temporary fresh Graph
with tf.Session(graph=tf.Graph()) as sess:
# We import the meta graph in the current default Graph
saver = tf.train.import_meta_graph(input_checkpoint + '.meta', clear_devices=clear_devices)
# We restore the weights
saver.restore(sess, input_checkpoint)
# We use a built-in TF helper to export variables to constants
output_graph_def = tf.graph_util.convert_variables_to_constants(
sess, # The session is used to retrieve the weights
tf.get_default_graph().as_graph_def(), # The graph_def is used to retrieve the nodes
output_node_names.split(",") # The output node names are used to select the usefull nodes
)
# Finally we serialize and dump the output graph to the filesystem
with tf.gfile.GFile(output_graph, "wb") as f:
f.write(output_graph_def.SerializeToString())
print("%d ops in the final graph." % len(output_graph_def.node))
return output_graph_def
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--model_dir", type=str, default="", help="Model folder to export")
parser.add_argument("--output_node_names", type=str, default="", help="The name of the output nodes, comma separated.")
args = parser.parse_args()
freeze_graph(args.model_dir, args.output_node_names)
I am using following argument parser to run the above code
python3 freeze_graph.py --model_dir /Users/path_to_checkpoints/ --output_node_names softmax
It is giving error
assert d in name_to_node_map, "%s is not in graph" % d
AssertionError: softmax is not in graph
My model is CNN for text classification. What should I write in output_node_names ? to produce a successful .pb file in the output

Use the below script to print the tensors... the last tensor would be the output tensor.
Original author: https://blog.metaflow.fr/tensorflow-how-to-freeze-a-model-and-serve-it-with-a-python-api-d4f3596b3adc
import argparse
import tensorflow as tf
def print_tensors(pb_file):
print('Model File: {}\n'.format(pb_file))
# read pb into graph_def
with tf.gfile.GFile(pb_file, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
# import graph_def
with tf.Graph().as_default() as graph:
tf.import_graph_def(graph_def)
# print operations
for op in graph.get_operations():
print(op.name + '\t' + str(op.values()))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--pb_file", type=str, required=True, help="Pb file")
args = parser.parse_args()
print_tensors(args.pb_file)

Related

I have the image file even then FileNotFoundError: [Errno 2] is found

I am training an image captioning model on Google Colab. While doing that, I can't open an image when it is there. I printed out the image path and found no faults there. What is the wrong I'm doing here?
Here's the image_path:
/content/gdrive/MyDrive/Thesis/BanglaImageCaptioning/image&caption/validation/7330.png
Here's the script:
!python predict.py --path image_path --checkpoint '/content/gdrive/MyDrive/Thesis/BanglaImageCaptioning/checkpoint.pth'
I can open the image with Image.open normally but it doesn't open with predict.py. I'm providing the predict.py below:
import torch
from transformers import BertTokenizer
from PIL import Image
import argparse
import matplotlib.pyplot as plt
from models import caption
from datasets import coco, utils
from configuration import Config
import os
parser = argparse.ArgumentParser(description='Image Captioning')
parser.add_argument('--path', type=str, help='path to image')
parser.add_argument('--checkpoint', type=str, help='checkpoint path')
args = parser.parse_args()
image_path = args.path
#version = args.v
checkpoint_path = args.checkpoint
config = Config()
# if version == 'v1':
# model = torch.hub.load('saahiluppal/catr', 'v1', pretrained=True)
# elif version == 'v2':
# model = torch.hub.load('saahiluppal/catr', 'v2', pretrained=True)
# elif version == 'v3':
# model = torch.hub.load('saahiluppal/catr', 'v3', pretrained=True)
# else:
print("Checking for checkpoint.")
if checkpoint_path is None:
raise NotImplementedError('No model to chose from!')
else:
if not os.path.exists(checkpoint_path):
raise NotImplementedError('Give valid checkpoint path')
print("Found checkpoint! Loading!")
model,_ = caption.build_model(config)
print("Loading Checkpoint...")
checkpoint = torch.load(checkpoint_path, map_location='cuda')
model.load_state_dict(checkpoint['model'])
tokenizer = BertTokenizer.from_pretrained("sagorsarker/bangla-bert-base")
start_token = tokenizer.convert_tokens_to_ids(tokenizer._cls_token)
end_token = tokenizer.convert_tokens_to_ids(tokenizer._sep_token)
image = Image.open(image_path)
image = coco.val_transform(image)
image = image.unsqueeze(0)
def create_caption_and_mask(start_token, max_length):
caption_template = torch.zeros((1, max_length), dtype=torch.long)
mask_template = torch.ones((1, max_length), dtype=torch.bool)
caption_template[:, 0] = start_token
mask_template[:, 0] = False
return caption_template, mask_template
caption, cap_mask = create_caption_and_mask(
start_token, config.max_position_embeddings)
#torch.no_grad()
def evaluate():
model.eval()
for i in range(config.max_position_embeddings - 1):
predictions = model(image, caption, cap_mask)
predictions = predictions[:, i, :]
predicted_id = torch.argmax(predictions, axis=-1)
if predicted_id[0] == 102:
return caption
caption[:, i+1] = predicted_id[0]
cap_mask[:, i+1] = False
return caption
output = evaluate()
######################################
#print(tokenizer.decode(output[0][2]))
for i in range(len(output[0][1:16])):
print(tokenizer.decode(output[0][i]))
########################################
result = tokenizer.decode(output[0].tolist(), skip_special_tokens=True)
#result = tokenizer.decode(output[0], skip_special_tokens=True)
print("predicted caption: {}".format(result))
Here's the error msg:
Checking for checkpoint.
Found checkpoint! Loading!
/usr/local/lib/python3.8/dist-packages/torchvision/models/_utils.py:208: UserWarning: The parameter 'pretrained' is deprecated since 0.13 and may be removed in the future, please use 'weights' instead.
warnings.warn(
/usr/local/lib/python3.8/dist-packages/torchvision/models/_utils.py:223: UserWarning: Arguments other than a weight enum or `None` for 'weights' are deprecated since 0.13 and may be removed in the future. The current behavior is equivalent to passing `weights=ResNet101_Weights.IMAGENET1K_V1`. You can also use `weights=ResNet101_Weights.DEFAULT` to get the most up-to-date weights.
warnings.warn(msg)
Loading Checkpoint...
Traceback (most recent call last):
File "predict.py", line 47, in <module>
image = Image.open(image_path)
File "/usr/local/lib/python3.8/dist-packages/PIL/Image.py", line 2843, in open
fp = builtins.open(filename, "rb")
FileNotFoundError: [Errno 2] No such file or directory: 'image_path'
It looks like the issue is related to the way you're passing the image_path argument to your script.
In this line:
!python predict.py --path image_path --checkpoint '/content/gdrive/MyDrive/Thesis/BanglaImageCaptioning/checkpoint.pth'
You're passing the string "image_path" as the argument instead of the actual path.
It should be:
!python predict.py --path /content/gdrive/MyDrive/Thesis/BanglaImageCaptioning/image&caption/validation/7330.png --checkpoint '/content/gdrive/MyDrive/Thesis/BanglaImageCaptioning/checkpoint.pth'
So the full path to the image should be used in the command instead of a variable name
It would be a good idea to check if the image is there in the given path before trying to open it.
Also, the ! before python is not needed since it's running on the same runtime environment of the colab.
It will open the image correctly and perform the captioning task.

How to run inference from the SavedModel locally?

I want to run a model locally. I'm trying to train and predict models from web course:
https://github.com/GoogleCloudPlatform/tensorflow-without-a-phd/blob/master/tensorflow-planespotting/trainer_yolo/main.py
A model was trained with above code. This is a YOLO object detection model that detect airplane built with tf.estimator. Training was done successfully with provided codes but I don't know about how to inference the model.
import tensorflow as tf
# DATA
DATA = './samples/airplane_sample.png'
# Model: This directory contains saved_model.pb and variables
SAVED_MODEL_DIR = './1559196417/'
def decode_image():
img_bytes = tf.read_file(DATA)
decoded = tf.image.decode_image(img_bytes, channels=3)
return tf.cast(decoded, dtype=tf.uint8)
def main1():
with tf.Session(graph=tf.Graph()) as sess:
tf.saved_model.loader.load(sess, [tf.saved_model.tag_constants.SERVING], SAVED_MODEL_DIR)
img = decode_image()
result = sess.run(['classes'], feed_dict={'input': img})
print(result)
def main2():
model = tf.contrib.predictor.from_saved_model(SAVED_MODEL_DIR)
pred = model({'image_bytes': [decode_image()], 'square_size': [tf.placeholder(tf.int32)]})
print(pred)
if __name__ == "__main__":
main2()
Above is a code written by me but it doesn't work. Even I don't know what is a problem. Incorrect input type? Improper API? Could you give me some advice to me?
First run saved_model_cli show --all --dir SAVED_MODEL_DIR in the terminal outside of python to inspect the saved model and check that it has the right tags, inputs and outputs. From there it takes a bit of wrangling to get the necessary info out of the API.
def extract_tensors(signature_def, graph):
output = dict()
for key in signature_def:
value = signature_def[key]
if isinstance(value, tf.TensorInfo):
output[key] = graph.get_tensor_by_name(value.name)
return output
def extract_tags(signature_def, graph):
output = dict()
for key in signature_def:
output[key] = dict()
output[key]['inputs'] = extract_tensors(
signature_def[key].inputs, graph)
output[key]['outputs'] = extract_tensors(
signature_def[key].outputs, graph)
return output
with tf.Session(graph=tf.Graph()) as session:
serve = tf.saved_model.load(
session, tags=['serve'], export_dir=SAVED_MODEL_DIR)
tags = extract_tags(serve.signature_def, session.graph)
model = tags['serving_default']
From there you can try print(model['inputs'], model['outputs']) to see which inputs and outputs were exported and if they agree with saved_model_cli, if you need another tag then just replace serving_default with that.
Maybe this will work:
import tensorflow as tf
import cv2
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile('./1559196417/saved_model.pb', 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
with detection_graph.as_default():
with tf.Session(graph=detection_graph) as sess:
image = cv2.imread('./samples/airplane_sample.png')
rgb_img = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
rgb_img_expanded = np.expand_dims(rgb_img, axis=0)
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
classes = detection_graph.get_tensor_by_name('classes:0')
result = sess.run([classes],feed_dict={image_tensor: rgb_img_expanded})

tensorflow serving produces wrong answer when I export keras model

I tried export my keras model to tensorflow serving and all works well. What I'm trying to do is to accept a b64 encoded input image string from client and output a True/False value. My keras model outputs 3 values and the first value indicates the degree predicted from model, and I will compare it to another fixed value and export the whole algorithm from taking image string to outputing True/False value to tensorflow serving using RESTful API. However, I did not get correct output from my client program. Long words short, let me show the code
My program to export saved model:
import tensorflow as tf
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import tag_constants, signature_constants, signature_def_utils_impl
from keras.models import load_model
from keras.layers import Input
import os
tf.app.flags.DEFINE_string('model_dir', './keras_models',
'''Directory which contains keras models''')
tf.app.flags.DEFINE_string('output_dir', './model_output',
'''Directory where to export the model''')
tf.app.flags.DEFINE_string('model_version', '1',
'''version number of the model''')
tf.app.flags.DEFINE_string('model_file', 'pointer_model.json',
'''json file which contains model architecture''')
tf.app.flags.DEFINE_string('weights_file', 'pointer_model.h5',
'''h5 file that contains model weights''')
FLAGS = tf.app.flags.FLAGS
def preprocess_image(image_buffer):
'''
Preprocess JPEG encoded bytes to 3D floate tensor
:param image_buffer:
:return: 4D image tensor (1, width, height, channels)
'''
image = tf.image.decode_jpeg(image_buffer, channels=3)
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
return image
def main(_):
with tf.Graph().as_default():
serialized_tf_example = tf.placeholder(tf.string, name='input_image')
feature_configs = {
'image/encoded': tf.FixedLenFeature(
shape=[], dtype=tf.string),
}
tf_example = tf.parse_example(serialized_tf_example, feature_configs)
jpegs = tf_example['image/encoded']
images = tf.map_fn(preprocess_image, jpegs, dtype=tf.float32)
images = tf.squeeze(images, [0])
images = tf.expand_dims(images, axis=0)
# now the image shape is [1, ?, ?, 3]
images = tf.image.resize_images(images, tf.constant([224, 224]))
model = load_model('./keras_models/my_model.h5')
x = Input(tensor=images)
y = model(x)
model.summary()
compare_value = tf.Variable(100.0)
bool_out = tf.math.greater(y, compare_value)
bool_out = bool_out[:,0]
bool_out = tf.cast(bool_out, tf.float32)
bool_out = tf.expand_dims(bool_out, axis=0)
final_out = tf.concat([tf.transpose(y), bool_out], axis=0)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
# predict_tensor_input_info = tf.saved_model.utils.build_tensor_info(jpegs)
# predict_tensor_score_info = tf.saved_model.utils.build_tensor_info(bool_out)
prediction_signature = \
(tf.saved_model.signature_def_utils.predict_signature_def(
inputs={'images': jpegs},
outputs={'scores': final_out}
)
)
export_path = os.path.join(
tf.compat.as_bytes(FLAGS.output_dir),
tf.compat.as_bytes(FLAGS.model_version)
)
builder = saved_model_builder.SavedModelBuilder(export_path)
legacy_init_op = tf.group(tf.tables_initializer(),
name = 'legacy_init_op')
builder.add_meta_graph_and_variables(
sess, [tag_constants.SERVING],
signature_def_map={
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:prediction_signature,
},
legacy_init_op = legacy_init_op
)
builder.save()
if __name__ =="__main__":
tf.app.run()
and this is my client program:
import base64
import requests
import json
import argparse
import time
from glob import glob
image_path = glob('./segmented_image/*.jpg')
for i in range(len(image_path)):
input_image = open(image_path[i], 'rb').read()
encoded_input_string = base64.b64encode(input_image)
input_string = encoded_input_string.decode('utf-8')
# input_image_recover = base64.b64decode(input_string)
# with open('recovered_image.jpg', 'wb') as output_file:
# output_file.write(input_image_recover)
#
# print('Base64 encoded string: ' + input_string[:10] + '...' + input_string[-10:])
instance = [{"b64": input_string}]
data = json.dumps({"instances": instance})
print(data[:30]+ '...' + data[-10:])
json_response = requests.post('http://localhost:8501/v1/models/pointer_model:predict',
data=data)
print(json_response.text)
end_time = time.time()
The output from json_response.text is like:
{"instances": [{"b64": "/9j/4A...Y//9k="}]}
{
"predictions": [[-0.00015692], [-0.000967527], [0.000567942], [0.0]
]
}
{"instances": [{"b64": "/9j/4A...if/9k="}]}
{
"predictions": [[-0.000157582], [-0.000998327], [0.000598866], [0.0]
]
}
......
The first 3 values in prediction key is supposed to be degree, and x,y coordinates in an image which should be hundreds value... the last value is the True/False value casted to float32 comparing with 100.0
Ok.. and last, I have also tested my model using model.predict, which gives correct answer...
Now I'm completely confused. Can someone tell me where is wrong with my code?
use my script to export in tensorflow serving format
import sys
from keras.models import load_model
import tensorflow as tf
from keras import backend as K
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import graph_io
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import tag_constants
K.set_learning_phase(0)
K.set_image_data_format('channels_last')
INPUT_MODEL = sys.argv[1]
NUMBER_OF_OUTPUTS = 1
OUTPUT_NODE_PREFIX = 'output_node'
OUTPUT_FOLDER= 'frozen'
OUTPUT_GRAPH = 'frozen_model.pb'
OUTPUT_SERVABLE_FOLDER = sys.argv[2]
INPUT_TENSOR = sys.argv[3]
try:
model = load_model(INPUT_MODEL)
except ValueError as err:
print('Please check the input saved model file')
raise err
output = [None]*NUMBER_OF_OUTPUTS
output_node_names = [None]*NUMBER_OF_OUTPUTS
for i in range(NUMBER_OF_OUTPUTS):
output_node_names[i] = OUTPUT_NODE_PREFIX+str(i)
output[i] = tf.identity(model.outputs[i], name=output_node_names[i])
print('Output Tensor names: ', output_node_names)
sess = K.get_session()
try:
frozen_graph = graph_util.convert_variables_to_constants(sess, sess.graph.as_graph_def(), output_node_names)
graph_io.write_graph(frozen_graph, OUTPUT_FOLDER, OUTPUT_GRAPH, as_text=False)
print(f'Frozen graph ready for inference/serving at {OUTPUT_FOLDER}/{OUTPUT_GRAPH}')
except:
print('Error Occured')
builder = tf.saved_model.builder.SavedModelBuilder(OUTPUT_SERVABLE_FOLDER)
with tf.gfile.GFile(f'{OUTPUT_FOLDER}/{OUTPUT_GRAPH}', "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
sigs = {}
OUTPUT_TENSOR = output_node_names
with tf.Session(graph=tf.Graph()) as sess:
tf.import_graph_def(graph_def, name="")
g = tf.get_default_graph()
inp = g.get_tensor_by_name(INPUT_TENSOR)
out = g.get_tensor_by_name(OUTPUT_TENSOR[0] + ':0')
sigs[signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY] = \
tf.saved_model.signature_def_utils.predict_signature_def(
{"input": inp}, {"outout": out})
builder.add_meta_graph_and_variables(sess,
[tag_constants.SERVING],
signature_def_map=sigs)
try:
builder.save()
print(f'Model ready for deployment at {OUTPUT_SERVABLE_FOLDER}/saved_model.pb')
print('Prediction signature : ')
print(sigs['serving_default'])
except:
print('Error Occured, please checked frozen graph')

Object Detection using Tensorflow

I am following tensorflow object detection tutorial for Oxford-IIIT Pets Dataset: https://github.com/tensorflow/models/blob/master/object_detection/g3doc/running_pets.md
I have successfully generated the "frozen_inference_graph.pb" from the latest checkpoint.
How I can test the inference graph - "frozen_inference_graph.pb" and pet labels - "pet_label_map.pbtxt" on an image.
I have tried using jupytor notebook but nothing gets detected in the image. I have also used following python code for detecting "dog" and "cat" but nothing gets detected. Python code is given below:
import os
import cv2
import time
import argparse
import multiprocessing
import numpy as np
import tensorflow as tf
from utils import FPS, WebcamVideoStream
from multiprocessing import Queue, Pool
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util
PATH_TO_CKPT = os.path.join('frozen_inference_graph.pb')
PATH_TO_LABELS = os.path.join('pet_label_map.pbtxt')
NUM_CLASSES = 37
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES,
use_display_name=True)
category_index = label_map_util.create_category_index(categories)
def detect_objects(image_np, sess, detection_graph):
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
scores = detection_graph.get_tensor_by_name('detection_scores:0')
classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
# Actual detection.
(boxes, scores, classes, num_detections) = sess.run(
[boxes, scores, classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
# Visualization of the results of a detection.
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=8)
return image_np
def worker(input_q, output_q):
# Load a (frozen) Tensorflow model into memory.
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
sess = tf.Session(graph=detection_graph)
frame = input_q.get()
output_q.put(detect_objects(frame, sess, detection_graph))
sess.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-src', '--source', dest='video_source', type=int,
default=0, help='Device index of the camera.')
parser.add_argument('-wd', '--width', dest='width', type=int,
default=20, help='Width of the frames in the video stream.')
parser.add_argument('-ht', '--height', dest='height', type=int,
default=20, help='Height of the frames in the video stream.')
parser.add_argument('-num-w', '--num-workers', dest='num_workers', type=int,
default=2, help='Number of workers.')
parser.add_argument('-q-size', '--queue-size', dest='queue_size', type=int,
default=5, help='Size of the queue.')
args = parser.parse_args()
logger = multiprocessing.log_to_stderr()
logger.setLevel(multiprocessing.SUBDEBUG)
input_q = Queue(maxsize=args.queue_size)
output_q = Queue(maxsize=args.queue_size)
pool = Pool(args.num_workers, worker, (input_q, output_q))
frame = cv2.imread("image2.jpg");
input_q.put(frame)
cv2.imshow('Video', output_q.get())
cv2.waitKey(0)
cv2.destroyAllWindows()
Any help will be greatly appreciated related to running the inference graph on actual image or debugging if nothing gets detected.
if you are using Tensorflow API, go to the folder models/research, open there a console.
In the research folder run command protoc object_detection/protos/*.proto --python_out=. and then export PYTHONPATH=$PYTHONPATH:pwd:pwd/slim.
Then run cd object_detection to change folder in the console and open jupyter notebook in current folder.
In jupyter notebook's home find the file object_detection_tutorial.ipynb, modify it so that it suits your purposes.
What are the outputs of boxes, scores and classes? Can you print them? If you get numbers from them, maybe you just need to change a few lines in your code to properly visualize the results.
For test, you can use:
vis_util.save_image_array_as_png(image,'./outputImg.png')
#print(image.shape)
print('image saved')
img=mpimg.imread('./outputImg.png')
imgplot = plt.imshow(img)
plt.show()

Inception: How to create a output function from image input

How do you create a function for Inception v3 that:
Takes an image as input.
Print out logits of labels as output.
The original code for inception v3 is here:
https://github.com/tensorflow/models/tree/master/inception
An example code where they manage to calculate output from a graph is here. I want the model to use checkpoint instead of graph. However, I don't know how to do the same thing like the example below, but with checkpoint.
"""Simple image classification with Inception.
Run image classification with Inception trained on ImageNet 2012 Challenge data
set.
This program creates a graph from a saved GraphDef protocol buffer,
and runs inference on an input JPEG image. It outputs human readable
strings of the top 5 predictions along with their probabilities.
Change the --image_file argument to any jpg image to compute a
classification of that image.
Please see the tutorial and website for a detailed description of how
to use this script to perform image recognition.
https://tensorflow.org/tutorials/image_recognition/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os.path
import re
import sys
import tarfile
import numpy as np
from six.moves import urllib
import tensorflow as tf
FLAGS = None
# pylint: disable=line-too-long
DATA_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'
# pylint: enable=line-too-long
class NodeLookup(object):
"""Converts integer node ID's to human readable labels."""
def __init__(self,
label_lookup_path=None,
uid_lookup_path=None):
if not label_lookup_path:
label_lookup_path = os.path.join(
FLAGS.model_dir, 'imagenet_2012_challenge_label_map_proto.pbtxt')
if not uid_lookup_path:
uid_lookup_path = os.path.join(
FLAGS.model_dir, 'imagenet_synset_to_human_label_map.txt')
self.node_lookup = self.load(label_lookup_path, uid_lookup_path)
def load(self, label_lookup_path, uid_lookup_path):
"""Loads a human readable English name for each softmax node.
Args:
label_lookup_path: string UID to integer node ID.
uid_lookup_path: string UID to human-readable string.
Returns:
dict from integer node ID to human-readable string.
"""
if not tf.gfile.Exists(uid_lookup_path):
tf.logging.fatal('File does not exist %s', uid_lookup_path)
if not tf.gfile.Exists(label_lookup_path):
tf.logging.fatal('File does not exist %s', label_lookup_path)
# Loads mapping from string UID to human-readable string
proto_as_ascii_lines = tf.gfile.GFile(uid_lookup_path).readlines()
uid_to_human = {}
p = re.compile(r'[n\d]*[ \S,]*')
for line in proto_as_ascii_lines:
parsed_items = p.findall(line)
uid = parsed_items[0]
human_string = parsed_items[2]
uid_to_human[uid] = human_string
# Loads mapping from string UID to integer node ID.
node_id_to_uid = {}
proto_as_ascii = tf.gfile.GFile(label_lookup_path).readlines()
for line in proto_as_ascii:
if line.startswith(' target_class:'):
target_class = int(line.split(': ')[1])
if line.startswith(' target_class_string:'):
target_class_string = line.split(': ')[1]
node_id_to_uid[target_class] = target_class_string[1:-2]
# Loads the final mapping of integer node ID to human-readable string
node_id_to_name = {}
for key, val in node_id_to_uid.items():
if val not in uid_to_human:
tf.logging.fatal('Failed to locate: %s', val)
name = uid_to_human[val]
node_id_to_name[key] = name
return node_id_to_name
def id_to_string(self, node_id):
if node_id not in self.node_lookup:
return ''
return self.node_lookup[node_id]
def create_graph():
"""Creates a graph from saved GraphDef file and returns a saver."""
# Creates graph from saved graph_def.pb.
with tf.gfile.FastGFile(os.path.join(
FLAGS.model_dir, 'classify_image_graph_def.pb'), 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
def run_inference_on_image(image):
"""Runs inference on an image.
Args:
image: Image file name.
Returns:
Nothing
"""
if not tf.gfile.Exists(image):
tf.logging.fatal('File does not exist %s', image)
image_data = tf.gfile.FastGFile(image, 'rb').read()
# Creates graph from saved GraphDef.
create_graph()
with tf.Session() as sess:
# Some useful tensors:
# 'softmax:0': A tensor containing the normalized prediction across
# 1000 labels.
# 'pool_3:0': A tensor containing the next-to-last layer containing 2048
# float description of the image.
# 'DecodeJpeg/contents:0': A tensor containing a string providing JPEG
# encoding of the image.
# Runs the softmax tensor by feeding the image_data as input to the graph.
softmax_tensor = sess.graph.get_tensor_by_name('softmax:0')
predictions = sess.run(softmax_tensor,
{'DecodeJpeg/contents:0': image_data})
predictions = np.squeeze(predictions)
# Creates node ID --> English string lookup.
node_lookup = NodeLookup()
top_k = predictions.argsort()[-FLAGS.num_top_predictions:][::-1]
for node_id in top_k:
human_string = node_lookup.id_to_string(node_id)
score = predictions[node_id]
print('%s (score = %.5f)' % (human_string, score))
def maybe_download_and_extract():
"""Download and extract model tar file."""
dest_directory = FLAGS.model_dir
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (
filename, float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
def main(_):
maybe_download_and_extract()
image = (FLAGS.image_file if FLAGS.image_file else
os.path.join(FLAGS.model_dir, 'cropped_panda.jpg'))
run_inference_on_image(image)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# classify_image_graph_def.pb:
# Binary representation of the GraphDef protocol buffer.
# imagenet_synset_to_human_label_map.txt:
# Map from synset ID to a human readable string.
# imagenet_2012_challenge_label_map_proto.pbtxt:
# Text representation of a protocol buffer mapping a label to synset ID.
parser.add_argument(
'--model_dir',
type=str,
default='/tmp/imagenet',
help="""\
Path to classify_image_graph_def.pb,
imagenet_synset_to_human_label_map.txt, and
imagenet_2012_challenge_label_map_proto.pbtxt.\
"""
)
parser.add_argument(
'--image_file',
type=str,
default='',
help='Absolute path to image file.'
)
parser.add_argument(
'--num_top_predictions',
type=int,
default=5,
help='Display this many predictions.'
)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
Just run it like this python classify_image.py --image_file /path/to/file
This will take the image as input and will output the labels.
You might also want to try and add the line below. It will identify and analyse the last added .jpg file to the specified folder.
newest = max(glob.iglob('/home/l2grp/Jetty/src/ubiserv/simple/img/*.[Jj][Pp][Gg]'), key=os.path.getctime)
Please see the tutorial and website for a detailed description of how
to use this script to perform image recognition.
https://tensorflow.org/tutorials/image_recognition/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os.path
import re
import sys
import tarfile
import glob
import numpy as np
from six.moves import urllib
import tensorflow as tf
FLAGS = None
# pylint: disable=line-too-long
DATA_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'
# pylint: enable=line-too-long
newest = max(glob.iglob('/home/l2grp/Jetty/src/ubiserv/simple/img/*.[Jj][Pp][Gg]'), key=os.path.getctime)
class NodeLookup(object):
"""Converts integer node ID's to human readable labels."""
def __init__(self,
label_lookup_path=None,
uid_lookup_path=None):
if not label_lookup_path:
label_lookup_path = os.path.join(
FLAGS.model_dir, 'imagenet_2012_challenge_label_map_proto.pbtxt')
if not uid_lookup_path:
uid_lookup_path = os.path.join(
FLAGS.model_dir, 'imagenet_synset_to_human_label_map.txt')
self.node_lookup = self.load(label_lookup_path, uid_lookup_path)
def load(self, label_lookup_path, uid_lookup_path):
"""Loads a human readable English name for each softmax node.
Args:
label_lookup_path: string UID to integer node ID.
uid_lookup_path: string UID to human-readable string.
Returns:
dict from integer node ID to human-readable string.
"""
if not tf.gfile.Exists(uid_lookup_path):
tf.logging.fatal('File does not exist %s', uid_lookup_path)
if not tf.gfile.Exists(label_lookup_path):
tf.logging.fatal('File does not exist %s', label_lookup_path)
# Loads mapping from string UID to human-readable string
proto_as_ascii_lines = tf.gfile.GFile(uid_lookup_path).readlines()
uid_to_human = {}
p = re.compile(r'[n\d]*[ \S,]*')
for line in proto_as_ascii_lines:
parsed_items = p.findall(line)
uid = parsed_items[0]
human_string = parsed_items[2]
uid_to_human[uid] = human_string
# Loads mapping from string UID to integer node ID.
node_id_to_uid = {}
proto_as_ascii = tf.gfile.GFile(label_lookup_path).readlines()
for line in proto_as_ascii:
if line.startswith(' target_class:'):
target_class = int(line.split(': ')[1])
if line.startswith(' target_class_string:'):
target_class_string = line.split(': ')[1]
node_id_to_uid[target_class] = target_class_string[1:-2]
# Loads the final mapping of integer node ID to human-readable string
node_id_to_name = {}
for key, val in node_id_to_uid.items():
if val not in uid_to_human:
tf.logging.fatal('Failed to locate: %s', val)
name = uid_to_human[val]
node_id_to_name[key] = name
return node_id_to_name
def id_to_string(self, node_id):
if node_id not in self.node_lookup:
return ''
return self.node_lookup[node_id]
def create_graph():
"""Creates a graph from saved GraphDef file and returns a saver."""
# Creates graph from saved graph_def.pb.
with tf.gfile.FastGFile(os.path.join(
FLAGS.model_dir, 'classify_image_graph_def.pb'), 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
def run_inference_on_image(image):
"""Runs inference on an image.
Args:
image: Image file name.
Returns:
Nothing
"""
if not tf.gfile.Exists(image):
tf.logging.fatal('File does not exist %s', image)
image_data = tf.gfile.FastGFile(image, 'rb').read()
# Creates graph from saved GraphDef.
create_graph()
with tf.Session() as sess:
# Some useful tensors:
# 'softmax:0': A tensor containing the normalized prediction across
# 1000 labels.
# 'pool_3:0': A tensor containing the next-to-last layer containing 2048
# float description of the image.
# 'DecodeJpeg/contents:0': A tensor containing a string providing JPEG
# encoding of the image.
# Runs the softmax tensor by feeding the image_data as input to the graph.
softmax_tensor = sess.graph.get_tensor_by_name('softmax:0')
predictions = sess.run(softmax_tensor,
{'DecodeJpeg/contents:0': image_data})
predictions = np.squeeze(predictions)
# Creates node ID --> English string lookup.
node_lookup = NodeLookup()
top_k = predictions.argsort()[-FLAGS.num_top_predictions:][::-1]
for node_id in top_k:
human_string = node_lookup.id_to_string(node_id)
score = predictions[node_id]
print('%s (score = %.5f)' % (human_string, score))
def main(_):
image = newest
run_inference_on_image(image)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# classify_image_graph_def.pb:
# Binary representation of the GraphDef protocol buffer.
# imagenet_synset_to_human_label_map.txt:
# Map from synset ID to a human readable string.
# imagenet_2012_challenge_label_map_proto.pbtxt:
# Text representation of a protocol buffer mapping a label to synset ID.
parser.add_argument(
'--model_dir',
type=str,
default='/tmp/imagenet',
help="""\
Path to classify_image_graph_def.pb,
imagenet_synset_to_human_label_map.txt, and
imagenet_2012_challenge_label_map_proto.pbtxt.\
"""
)
parser.add_argument(
'--image_file',
type=str,
default='',
help='Absolute path to image file.'
)
parser.add_argument(
'--num_top_predictions',
type=int,
default=5,
help='Display this many predictions.'
)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)

Categories