Inception: How to create a output function from image input - python

How do you create a function for Inception v3 that:
Takes an image as input.
Print out logits of labels as output.
The original code for inception v3 is here:
https://github.com/tensorflow/models/tree/master/inception
An example code where they manage to calculate output from a graph is here. I want the model to use checkpoint instead of graph. However, I don't know how to do the same thing like the example below, but with checkpoint.
"""Simple image classification with Inception.
Run image classification with Inception trained on ImageNet 2012 Challenge data
set.
This program creates a graph from a saved GraphDef protocol buffer,
and runs inference on an input JPEG image. It outputs human readable
strings of the top 5 predictions along with their probabilities.
Change the --image_file argument to any jpg image to compute a
classification of that image.
Please see the tutorial and website for a detailed description of how
to use this script to perform image recognition.
https://tensorflow.org/tutorials/image_recognition/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os.path
import re
import sys
import tarfile
import numpy as np
from six.moves import urllib
import tensorflow as tf
FLAGS = None
# pylint: disable=line-too-long
DATA_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'
# pylint: enable=line-too-long
class NodeLookup(object):
"""Converts integer node ID's to human readable labels."""
def __init__(self,
label_lookup_path=None,
uid_lookup_path=None):
if not label_lookup_path:
label_lookup_path = os.path.join(
FLAGS.model_dir, 'imagenet_2012_challenge_label_map_proto.pbtxt')
if not uid_lookup_path:
uid_lookup_path = os.path.join(
FLAGS.model_dir, 'imagenet_synset_to_human_label_map.txt')
self.node_lookup = self.load(label_lookup_path, uid_lookup_path)
def load(self, label_lookup_path, uid_lookup_path):
"""Loads a human readable English name for each softmax node.
Args:
label_lookup_path: string UID to integer node ID.
uid_lookup_path: string UID to human-readable string.
Returns:
dict from integer node ID to human-readable string.
"""
if not tf.gfile.Exists(uid_lookup_path):
tf.logging.fatal('File does not exist %s', uid_lookup_path)
if not tf.gfile.Exists(label_lookup_path):
tf.logging.fatal('File does not exist %s', label_lookup_path)
# Loads mapping from string UID to human-readable string
proto_as_ascii_lines = tf.gfile.GFile(uid_lookup_path).readlines()
uid_to_human = {}
p = re.compile(r'[n\d]*[ \S,]*')
for line in proto_as_ascii_lines:
parsed_items = p.findall(line)
uid = parsed_items[0]
human_string = parsed_items[2]
uid_to_human[uid] = human_string
# Loads mapping from string UID to integer node ID.
node_id_to_uid = {}
proto_as_ascii = tf.gfile.GFile(label_lookup_path).readlines()
for line in proto_as_ascii:
if line.startswith(' target_class:'):
target_class = int(line.split(': ')[1])
if line.startswith(' target_class_string:'):
target_class_string = line.split(': ')[1]
node_id_to_uid[target_class] = target_class_string[1:-2]
# Loads the final mapping of integer node ID to human-readable string
node_id_to_name = {}
for key, val in node_id_to_uid.items():
if val not in uid_to_human:
tf.logging.fatal('Failed to locate: %s', val)
name = uid_to_human[val]
node_id_to_name[key] = name
return node_id_to_name
def id_to_string(self, node_id):
if node_id not in self.node_lookup:
return ''
return self.node_lookup[node_id]
def create_graph():
"""Creates a graph from saved GraphDef file and returns a saver."""
# Creates graph from saved graph_def.pb.
with tf.gfile.FastGFile(os.path.join(
FLAGS.model_dir, 'classify_image_graph_def.pb'), 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
def run_inference_on_image(image):
"""Runs inference on an image.
Args:
image: Image file name.
Returns:
Nothing
"""
if not tf.gfile.Exists(image):
tf.logging.fatal('File does not exist %s', image)
image_data = tf.gfile.FastGFile(image, 'rb').read()
# Creates graph from saved GraphDef.
create_graph()
with tf.Session() as sess:
# Some useful tensors:
# 'softmax:0': A tensor containing the normalized prediction across
# 1000 labels.
# 'pool_3:0': A tensor containing the next-to-last layer containing 2048
# float description of the image.
# 'DecodeJpeg/contents:0': A tensor containing a string providing JPEG
# encoding of the image.
# Runs the softmax tensor by feeding the image_data as input to the graph.
softmax_tensor = sess.graph.get_tensor_by_name('softmax:0')
predictions = sess.run(softmax_tensor,
{'DecodeJpeg/contents:0': image_data})
predictions = np.squeeze(predictions)
# Creates node ID --> English string lookup.
node_lookup = NodeLookup()
top_k = predictions.argsort()[-FLAGS.num_top_predictions:][::-1]
for node_id in top_k:
human_string = node_lookup.id_to_string(node_id)
score = predictions[node_id]
print('%s (score = %.5f)' % (human_string, score))
def maybe_download_and_extract():
"""Download and extract model tar file."""
dest_directory = FLAGS.model_dir
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (
filename, float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
def main(_):
maybe_download_and_extract()
image = (FLAGS.image_file if FLAGS.image_file else
os.path.join(FLAGS.model_dir, 'cropped_panda.jpg'))
run_inference_on_image(image)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# classify_image_graph_def.pb:
# Binary representation of the GraphDef protocol buffer.
# imagenet_synset_to_human_label_map.txt:
# Map from synset ID to a human readable string.
# imagenet_2012_challenge_label_map_proto.pbtxt:
# Text representation of a protocol buffer mapping a label to synset ID.
parser.add_argument(
'--model_dir',
type=str,
default='/tmp/imagenet',
help="""\
Path to classify_image_graph_def.pb,
imagenet_synset_to_human_label_map.txt, and
imagenet_2012_challenge_label_map_proto.pbtxt.\
"""
)
parser.add_argument(
'--image_file',
type=str,
default='',
help='Absolute path to image file.'
)
parser.add_argument(
'--num_top_predictions',
type=int,
default=5,
help='Display this many predictions.'
)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)

Just run it like this python classify_image.py --image_file /path/to/file
This will take the image as input and will output the labels.
You might also want to try and add the line below. It will identify and analyse the last added .jpg file to the specified folder.
newest = max(glob.iglob('/home/l2grp/Jetty/src/ubiserv/simple/img/*.[Jj][Pp][Gg]'), key=os.path.getctime)
Please see the tutorial and website for a detailed description of how
to use this script to perform image recognition.
https://tensorflow.org/tutorials/image_recognition/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os.path
import re
import sys
import tarfile
import glob
import numpy as np
from six.moves import urllib
import tensorflow as tf
FLAGS = None
# pylint: disable=line-too-long
DATA_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'
# pylint: enable=line-too-long
newest = max(glob.iglob('/home/l2grp/Jetty/src/ubiserv/simple/img/*.[Jj][Pp][Gg]'), key=os.path.getctime)
class NodeLookup(object):
"""Converts integer node ID's to human readable labels."""
def __init__(self,
label_lookup_path=None,
uid_lookup_path=None):
if not label_lookup_path:
label_lookup_path = os.path.join(
FLAGS.model_dir, 'imagenet_2012_challenge_label_map_proto.pbtxt')
if not uid_lookup_path:
uid_lookup_path = os.path.join(
FLAGS.model_dir, 'imagenet_synset_to_human_label_map.txt')
self.node_lookup = self.load(label_lookup_path, uid_lookup_path)
def load(self, label_lookup_path, uid_lookup_path):
"""Loads a human readable English name for each softmax node.
Args:
label_lookup_path: string UID to integer node ID.
uid_lookup_path: string UID to human-readable string.
Returns:
dict from integer node ID to human-readable string.
"""
if not tf.gfile.Exists(uid_lookup_path):
tf.logging.fatal('File does not exist %s', uid_lookup_path)
if not tf.gfile.Exists(label_lookup_path):
tf.logging.fatal('File does not exist %s', label_lookup_path)
# Loads mapping from string UID to human-readable string
proto_as_ascii_lines = tf.gfile.GFile(uid_lookup_path).readlines()
uid_to_human = {}
p = re.compile(r'[n\d]*[ \S,]*')
for line in proto_as_ascii_lines:
parsed_items = p.findall(line)
uid = parsed_items[0]
human_string = parsed_items[2]
uid_to_human[uid] = human_string
# Loads mapping from string UID to integer node ID.
node_id_to_uid = {}
proto_as_ascii = tf.gfile.GFile(label_lookup_path).readlines()
for line in proto_as_ascii:
if line.startswith(' target_class:'):
target_class = int(line.split(': ')[1])
if line.startswith(' target_class_string:'):
target_class_string = line.split(': ')[1]
node_id_to_uid[target_class] = target_class_string[1:-2]
# Loads the final mapping of integer node ID to human-readable string
node_id_to_name = {}
for key, val in node_id_to_uid.items():
if val not in uid_to_human:
tf.logging.fatal('Failed to locate: %s', val)
name = uid_to_human[val]
node_id_to_name[key] = name
return node_id_to_name
def id_to_string(self, node_id):
if node_id not in self.node_lookup:
return ''
return self.node_lookup[node_id]
def create_graph():
"""Creates a graph from saved GraphDef file and returns a saver."""
# Creates graph from saved graph_def.pb.
with tf.gfile.FastGFile(os.path.join(
FLAGS.model_dir, 'classify_image_graph_def.pb'), 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
def run_inference_on_image(image):
"""Runs inference on an image.
Args:
image: Image file name.
Returns:
Nothing
"""
if not tf.gfile.Exists(image):
tf.logging.fatal('File does not exist %s', image)
image_data = tf.gfile.FastGFile(image, 'rb').read()
# Creates graph from saved GraphDef.
create_graph()
with tf.Session() as sess:
# Some useful tensors:
# 'softmax:0': A tensor containing the normalized prediction across
# 1000 labels.
# 'pool_3:0': A tensor containing the next-to-last layer containing 2048
# float description of the image.
# 'DecodeJpeg/contents:0': A tensor containing a string providing JPEG
# encoding of the image.
# Runs the softmax tensor by feeding the image_data as input to the graph.
softmax_tensor = sess.graph.get_tensor_by_name('softmax:0')
predictions = sess.run(softmax_tensor,
{'DecodeJpeg/contents:0': image_data})
predictions = np.squeeze(predictions)
# Creates node ID --> English string lookup.
node_lookup = NodeLookup()
top_k = predictions.argsort()[-FLAGS.num_top_predictions:][::-1]
for node_id in top_k:
human_string = node_lookup.id_to_string(node_id)
score = predictions[node_id]
print('%s (score = %.5f)' % (human_string, score))
def main(_):
image = newest
run_inference_on_image(image)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# classify_image_graph_def.pb:
# Binary representation of the GraphDef protocol buffer.
# imagenet_synset_to_human_label_map.txt:
# Map from synset ID to a human readable string.
# imagenet_2012_challenge_label_map_proto.pbtxt:
# Text representation of a protocol buffer mapping a label to synset ID.
parser.add_argument(
'--model_dir',
type=str,
default='/tmp/imagenet',
help="""\
Path to classify_image_graph_def.pb,
imagenet_synset_to_human_label_map.txt, and
imagenet_2012_challenge_label_map_proto.pbtxt.\
"""
)
parser.add_argument(
'--image_file',
type=str,
default='',
help='Absolute path to image file.'
)
parser.add_argument(
'--num_top_predictions',
type=int,
default=5,
help='Display this many predictions.'
)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)

Related

ValueError: saved_model_path must be the valid directory of a saved model to load

This is sentiment analysis project and i am getting this error
Nudity-Detection-Model.h5
Traceback (most recent call last):
File "c:\Users\kvidushi\Desktop\Mini_project\script\vapp.py", line 214, in <module>
main()
File "c:\Users\kvidushi\Desktop\Mini_project\script\vapp.py", line 208, in main
model = load_model('Nudity-Detection-Model.h5')
File "c:\Users\kvidushi\Desktop\Mini_project\script\vapp.py", line 59, in load_model
raise ValueError("saved_model_path must be the valid directory of a saved model to load.")
ValueError: saved_model_path must be the valid directory of a saved model to load.
My script file is:
import json
import cv2
import os
import time
from os import listdir
from os.path import isfile, join, exists, isdir, abspath
from keras.models import load_model
import numpy as np
import tensorflow as tf
from tensorflow import keras
import tensorflow_hub as hub
import matplotlib.pyplot as plt
IMAGE_DIM = 224 # required/default image dimensionality
def load_images(image_paths, image_size, verbose=True):
# Function for loading images into numpy arrays for passing to model.predict
# inputs:
# image_paths: list of image paths to load
# image_size: size into which images should be resized
# verbose: show all of the image path and sizes loaded
# outputs:
# loaded_images: loaded images on which keras model can run predictions
# loaded_image_indexes: paths of images which the function is able to process
loaded_images = []
loaded_image_paths = []
if isdir(image_paths):
parent = abspath(image_paths)
image_paths = [join(parent, f) for f in listdir(image_paths) if isfile(join(parent, f))]
elif isfile(image_paths):
image_paths = [image_paths]
for img_path in image_paths:
try:
if verbose:
print(img_path, "size:", image_size)
image = keras.preprocessing.image.load_img(img_path, target_size=image_size)
image = keras.preprocessing.image.img_to_array(image)
# print(image.dtype)
# print(image.shape)
# print(image)
image /= 255
loaded_images.append(image)
loaded_image_paths.append(img_path)
except Exception as ex:
print("Image Load Failure: ", img_path, ex)
return np.asarray(loaded_images), loaded_image_paths
def load_model(model_path):
print(model_path)
if model_path is None or not exists(model_path):
raise ValueError("saved_model_path must be the valid directory of a saved model to load.")
model = tf.keras.models.load_model(model_path)
#model = tf.keras.models.load_model(model_path, custom_objects={'KerasLayer':hub.KerasLayer})
# model.summary()
print(model.summary())
return model
def classify(model, input_paths, image_dim=IMAGE_DIM):
""" Classify given a model, input paths (could be single string), and image dimensionality...."""
images, image_paths = load_images(input_paths, (image_dim, image_dim))
probs = classify_nd(model, images)
# print(type(probs))
return probs
def classify_nd(model, nd_images):
""" Classify given a model, image array (numpy)...."""
model_preds = model.predict(nd_images)
# preds = np.argsort(model_preds, axis = 1).tolist()
categories = ['drawings', 'hentai', 'neutral', 'porn', 'sexy']
probs = []
single_probs = {}
cnt=0
for i, single_preds in enumerate(model_preds):
cnt=cnt+1
for j, pred in enumerate(single_preds):
if categories[j] in single_probs.keys():
single_probs[categories[j]] = single_probs[categories[j]] + float(pred)
else:
single_probs[categories[j]]=float(pred)
print(cnt)
for i in single_probs.keys():
# print(single_probs[i])
single_probs[i]=single_probs[i]/cnt
probs.append(single_probs)
return probs
def predict(model,img_paths):
# for img in img_paths:
image_preds = classify(model, img_paths, IMAGE_DIM)
data=image_preds[0]
category= list(data.keys())
values = list(data.values())
fig = plt.figure(figsize = (10, 5))
# creating the bar plot
plt.bar(category, values, color ='maroon',
width = 0.4)
plt.xlabel("Categories")
plt.ylabel("values")
plt.title("Nudity Detection Model")
print(json.dumps(image_preds, indent=2), '\n')
plt.show()
def get_frames(inputFile,outputFolder,step,count):
'''
Input:
inputFile - name of the input file with directoy
outputFolder - name and path of the folder to save the results
step - time lapse between each step (in seconds)
count - number of screenshots
Output:
'count' number of screenshots that are 'step' seconds apart created from video 'inputFile' and stored in folder 'outputFolder'
Function Call:
get_frames("test.mp4", 'data', 10, 10)
'''
#initializing local variables
step = step
frames_count = count
currentframe = 0
frames_captured = 0
#creating a folder
try:
# creating a folder named data
if not os.path.exists(outputFolder):
os.makedirs(outputFolder)
#if not created then raise error
except OSError:
print ('Error! Could not create a directory')
#reading the video from specified path
cam = cv2.VideoCapture(inputFile)
#reading the number of frames at that particular second
frame_per_second = cam.get(cv2.CAP_PROP_FPS)
print( frame_per_second)
while (True):
ret, frame = cam.read()
if ret:
if currentframe > (step*frame_per_second):
currentframe = 0
#saving the frames (screenshots)
name = './data/frame' + str(frames_captured) + '.jpg'
print ('Creating...' + name)
cv2.imwrite(name, frame)
frames_captured+=1
#breaking the loop when count achieved
if frames_captured > frames_count-1:
ret = False
currentframe += 1
if ret == False:
break
#Releasing all space and windows once done
cam.release()
cv2.destroyAllWindows()
def main():
# img_paths=[]
# img_paths.append("1.jpg")
# img_paths.append("2.jpg")
# img_paths.append("3.jpg")
# img_paths.append("4.jpg")
# img_paths.append("5.jpg")
# img_paths.append("6.jpg")
# img_paths.append("7.jpg")
# img_paths.append("8.jpg")
# img_paths.append("9.jpg")
# img_paths.append("10.jpg")
# img_paths.append("11.jpg")
# img_paths.append("12.jpg")
# img_paths.append("13.jpg")
# img_paths.append("14.jpg")
# img_paths.append("15.jpg")
get_frames("1.mp4","data",5,20)
model = load_model('Nudity-Detection-Model.h5')
predict(model,"data")
if __name__ == "__main__":
main()
It is asking for this file: Nudity_detection_model.h5
I have put this file in same folder.Still it is not able to recognize it. I tried adding double quotes and single quotes and import load_model but still the error is same.
can anyone help me

tensorflow serving produces wrong answer when I export keras model

I tried export my keras model to tensorflow serving and all works well. What I'm trying to do is to accept a b64 encoded input image string from client and output a True/False value. My keras model outputs 3 values and the first value indicates the degree predicted from model, and I will compare it to another fixed value and export the whole algorithm from taking image string to outputing True/False value to tensorflow serving using RESTful API. However, I did not get correct output from my client program. Long words short, let me show the code
My program to export saved model:
import tensorflow as tf
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import tag_constants, signature_constants, signature_def_utils_impl
from keras.models import load_model
from keras.layers import Input
import os
tf.app.flags.DEFINE_string('model_dir', './keras_models',
'''Directory which contains keras models''')
tf.app.flags.DEFINE_string('output_dir', './model_output',
'''Directory where to export the model''')
tf.app.flags.DEFINE_string('model_version', '1',
'''version number of the model''')
tf.app.flags.DEFINE_string('model_file', 'pointer_model.json',
'''json file which contains model architecture''')
tf.app.flags.DEFINE_string('weights_file', 'pointer_model.h5',
'''h5 file that contains model weights''')
FLAGS = tf.app.flags.FLAGS
def preprocess_image(image_buffer):
'''
Preprocess JPEG encoded bytes to 3D floate tensor
:param image_buffer:
:return: 4D image tensor (1, width, height, channels)
'''
image = tf.image.decode_jpeg(image_buffer, channels=3)
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
return image
def main(_):
with tf.Graph().as_default():
serialized_tf_example = tf.placeholder(tf.string, name='input_image')
feature_configs = {
'image/encoded': tf.FixedLenFeature(
shape=[], dtype=tf.string),
}
tf_example = tf.parse_example(serialized_tf_example, feature_configs)
jpegs = tf_example['image/encoded']
images = tf.map_fn(preprocess_image, jpegs, dtype=tf.float32)
images = tf.squeeze(images, [0])
images = tf.expand_dims(images, axis=0)
# now the image shape is [1, ?, ?, 3]
images = tf.image.resize_images(images, tf.constant([224, 224]))
model = load_model('./keras_models/my_model.h5')
x = Input(tensor=images)
y = model(x)
model.summary()
compare_value = tf.Variable(100.0)
bool_out = tf.math.greater(y, compare_value)
bool_out = bool_out[:,0]
bool_out = tf.cast(bool_out, tf.float32)
bool_out = tf.expand_dims(bool_out, axis=0)
final_out = tf.concat([tf.transpose(y), bool_out], axis=0)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
# predict_tensor_input_info = tf.saved_model.utils.build_tensor_info(jpegs)
# predict_tensor_score_info = tf.saved_model.utils.build_tensor_info(bool_out)
prediction_signature = \
(tf.saved_model.signature_def_utils.predict_signature_def(
inputs={'images': jpegs},
outputs={'scores': final_out}
)
)
export_path = os.path.join(
tf.compat.as_bytes(FLAGS.output_dir),
tf.compat.as_bytes(FLAGS.model_version)
)
builder = saved_model_builder.SavedModelBuilder(export_path)
legacy_init_op = tf.group(tf.tables_initializer(),
name = 'legacy_init_op')
builder.add_meta_graph_and_variables(
sess, [tag_constants.SERVING],
signature_def_map={
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:prediction_signature,
},
legacy_init_op = legacy_init_op
)
builder.save()
if __name__ =="__main__":
tf.app.run()
and this is my client program:
import base64
import requests
import json
import argparse
import time
from glob import glob
image_path = glob('./segmented_image/*.jpg')
for i in range(len(image_path)):
input_image = open(image_path[i], 'rb').read()
encoded_input_string = base64.b64encode(input_image)
input_string = encoded_input_string.decode('utf-8')
# input_image_recover = base64.b64decode(input_string)
# with open('recovered_image.jpg', 'wb') as output_file:
# output_file.write(input_image_recover)
#
# print('Base64 encoded string: ' + input_string[:10] + '...' + input_string[-10:])
instance = [{"b64": input_string}]
data = json.dumps({"instances": instance})
print(data[:30]+ '...' + data[-10:])
json_response = requests.post('http://localhost:8501/v1/models/pointer_model:predict',
data=data)
print(json_response.text)
end_time = time.time()
The output from json_response.text is like:
{"instances": [{"b64": "/9j/4A...Y//9k="}]}
{
"predictions": [[-0.00015692], [-0.000967527], [0.000567942], [0.0]
]
}
{"instances": [{"b64": "/9j/4A...if/9k="}]}
{
"predictions": [[-0.000157582], [-0.000998327], [0.000598866], [0.0]
]
}
......
The first 3 values in prediction key is supposed to be degree, and x,y coordinates in an image which should be hundreds value... the last value is the True/False value casted to float32 comparing with 100.0
Ok.. and last, I have also tested my model using model.predict, which gives correct answer...
Now I'm completely confused. Can someone tell me where is wrong with my code?
use my script to export in tensorflow serving format
import sys
from keras.models import load_model
import tensorflow as tf
from keras import backend as K
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import graph_io
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import tag_constants
K.set_learning_phase(0)
K.set_image_data_format('channels_last')
INPUT_MODEL = sys.argv[1]
NUMBER_OF_OUTPUTS = 1
OUTPUT_NODE_PREFIX = 'output_node'
OUTPUT_FOLDER= 'frozen'
OUTPUT_GRAPH = 'frozen_model.pb'
OUTPUT_SERVABLE_FOLDER = sys.argv[2]
INPUT_TENSOR = sys.argv[3]
try:
model = load_model(INPUT_MODEL)
except ValueError as err:
print('Please check the input saved model file')
raise err
output = [None]*NUMBER_OF_OUTPUTS
output_node_names = [None]*NUMBER_OF_OUTPUTS
for i in range(NUMBER_OF_OUTPUTS):
output_node_names[i] = OUTPUT_NODE_PREFIX+str(i)
output[i] = tf.identity(model.outputs[i], name=output_node_names[i])
print('Output Tensor names: ', output_node_names)
sess = K.get_session()
try:
frozen_graph = graph_util.convert_variables_to_constants(sess, sess.graph.as_graph_def(), output_node_names)
graph_io.write_graph(frozen_graph, OUTPUT_FOLDER, OUTPUT_GRAPH, as_text=False)
print(f'Frozen graph ready for inference/serving at {OUTPUT_FOLDER}/{OUTPUT_GRAPH}')
except:
print('Error Occured')
builder = tf.saved_model.builder.SavedModelBuilder(OUTPUT_SERVABLE_FOLDER)
with tf.gfile.GFile(f'{OUTPUT_FOLDER}/{OUTPUT_GRAPH}', "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
sigs = {}
OUTPUT_TENSOR = output_node_names
with tf.Session(graph=tf.Graph()) as sess:
tf.import_graph_def(graph_def, name="")
g = tf.get_default_graph()
inp = g.get_tensor_by_name(INPUT_TENSOR)
out = g.get_tensor_by_name(OUTPUT_TENSOR[0] + ':0')
sigs[signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY] = \
tf.saved_model.signature_def_utils.predict_signature_def(
{"input": inp}, {"outout": out})
builder.add_meta_graph_and_variables(sess,
[tag_constants.SERVING],
signature_def_map=sigs)
try:
builder.save()
print(f'Model ready for deployment at {OUTPUT_SERVABLE_FOLDER}/saved_model.pb')
print('Prediction signature : ')
print(sigs['serving_default'])
except:
print('Error Occured, please checked frozen graph')

Cannot freeze Tensorflow models into frozen(.pb) file

I am referring (here) to freeze models into .pb file. My model is CNN for text classification I am using (Github) link to train CNN for text classification and exporting in form of models. I have trained models to 4 epoch and My checkpoints folders look as follows:
I want to freeze this model into (.pb file). For that I am using following script:
import os, argparse
import tensorflow as tf
# The original freeze_graph function
# from tensorflow.python.tools.freeze_graph import freeze_graph
dir = os.path.dirname(os.path.realpath(__file__))
def freeze_graph(model_dir, output_node_names):
"""Extract the sub graph defined by the output nodes and convert
all its variables into constant
Args:
model_dir: the root folder containing the checkpoint state file
output_node_names: a string, containing all the output node's names,
comma separated
"""
if not tf.gfile.Exists(model_dir):
raise AssertionError(
"Export directory doesn't exists. Please specify an export "
"directory: %s" % model_dir)
if not output_node_names:
print("You need to supply the name of a node to --output_node_names.")
return -1
# We retrieve our checkpoint fullpath
checkpoint = tf.train.get_checkpoint_state(model_dir)
input_checkpoint = checkpoint.model_checkpoint_path
# We precise the file fullname of our freezed graph
absolute_model_dir = "/".join(input_checkpoint.split('/')[:-1])
output_graph = absolute_model_dir + "/frozen_model.pb"
# We clear devices to allow TensorFlow to control on which device it will load operations
clear_devices = True
# We start a session using a temporary fresh Graph
with tf.Session(graph=tf.Graph()) as sess:
# We import the meta graph in the current default Graph
saver = tf.train.import_meta_graph(input_checkpoint + '.meta', clear_devices=clear_devices)
# We restore the weights
saver.restore(sess, input_checkpoint)
# We use a built-in TF helper to export variables to constants
output_graph_def = tf.graph_util.convert_variables_to_constants(
sess, # The session is used to retrieve the weights
tf.get_default_graph().as_graph_def(), # The graph_def is used to retrieve the nodes
output_node_names.split(",") # The output node names are used to select the usefull nodes
)
# Finally we serialize and dump the output graph to the filesystem
with tf.gfile.GFile(output_graph, "wb") as f:
f.write(output_graph_def.SerializeToString())
print("%d ops in the final graph." % len(output_graph_def.node))
return output_graph_def
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--model_dir", type=str, default="", help="Model folder to export")
parser.add_argument("--output_node_names", type=str, default="", help="The name of the output nodes, comma separated.")
args = parser.parse_args()
freeze_graph(args.model_dir, args.output_node_names)
I am using following argument parser to run the above code
python3 freeze_graph.py --model_dir /Users/path_to_checkpoints/ --output_node_names softmax
It is giving error
assert d in name_to_node_map, "%s is not in graph" % d
AssertionError: softmax is not in graph
My model is CNN for text classification. What should I write in output_node_names ? to produce a successful .pb file in the output
Use the below script to print the tensors... the last tensor would be the output tensor.
Original author: https://blog.metaflow.fr/tensorflow-how-to-freeze-a-model-and-serve-it-with-a-python-api-d4f3596b3adc
import argparse
import tensorflow as tf
def print_tensors(pb_file):
print('Model File: {}\n'.format(pb_file))
# read pb into graph_def
with tf.gfile.GFile(pb_file, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
# import graph_def
with tf.Graph().as_default() as graph:
tf.import_graph_def(graph_def)
# print operations
for op in graph.get_operations():
print(op.name + '\t' + str(op.values()))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--pb_file", type=str, required=True, help="Pb file")
args = parser.parse_args()
print_tensors(args.pb_file)

tf.gfile.FastGFile(imagePath, 'rb').read()

i try to inception_v3 example.
I use the under code, but I want to put the image itself, not the image path. Is there a way?
def run_inference_on_image():
answer = None
if not tf.gfile.Exists(imagePath):
tf.logging.fatal('File does not exist %s', imagePath)
return answer
image_data = tf.gfile.FastGFile(imagePath, 'rb').read()
create_graph()
with tf.Session() as sess:
softmax_tensor = sess.graph.get_tensor_by_name('final_result:0')
predictions = sess.run(softmax_tensor,
{'DecodeJpeg/contents:0': image_data})
predictions = np.squeeze(predictions)
top_k = predictions.argsort()[-2:][::-1]
f = open(labelsFullPath, 'rb')
lines = f.readlines()
labels = [str(w).replace("\n", "") for w in lines]
for node_id in top_k:
human_string = labels[node_id]
score = predictions[node_id]
print('%s (score = %.5f)' % (human_string, score))
answer = labels[top_k[0]]
return answer
An image is an image that I want to classify as a learned PB file.
image_data is the actual image bytes in your code. And they are being used inside sess.run() function. Below line of code returns the image bytes as a string.
tf.gfile.FastGFile(imagePath, 'rb').read()
So, all you have to do is pass a string of image bytes to sess.run() which can be done in several ways :-
# method 1
import cv2
img_str = cv2.imencode('.jpg', img)[1].tostring()
# method 2
from scipy.ndimage import imread
imgbytes = imread(img_path)
img_str = imgbytes.tostring()
Check what works for you.

Precision and recall from text file

I have this chunk of code which takes in a directory and spits of 5 prediction in descending order and stores it in a text file.
Any sugestions as to how may i edit this to calculate precsion and recall for the directory?
Thanks in advance.
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# change this as you see fit
image_path = sys.argv[1]
extension = ['*.jpeg', '*.jpg']
files=[]
for e in extension:
directory = os.path.join(image_path, e)
fileList = glob.glob(directory)
for f in fileList:
files.append(f)
# Loads label file, strips off carriage return
label_lines = [line.rstrip() for line
in tf.gfile.GFile("/tf_files/retrained_labels.txt")]
# Unpersists graph from file
with tf.gfile.FastGFile("/tf_files/retrained_graph.pb", 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
with tf.Session() as sess:
# Feed the image_data as input to the graph and get first prediction
softmax_tensor = sess.graph.get_tensor_by_name('final_result:0')
# Read in the image_data
for file in files:
image_data = tf.gfile.FastGFile(file, 'rb').read()
predictions = sess.run(softmax_tensor, \
{'DecodeJpeg/contents:0': image_data})
# Sort to show labels of first prediction in order of confidence
top_k = predictions[0].argsort()[-len(predictions[0]):][::-1]
print("Image Name: " + file)
for node_id in top_k:
human_string = label_lines[node_id]
score = predictions[0][node_id]
print('%s (score = %.5f)' % (human_string, score))
Consider using precision_recall_fscore_support or confusion_matrix.
For both of these you need actual labels and the predicted labels by model.

Categories