tf slim inceptionv3 gives wrong output - python

I want to predict images with the nets from tf slim.
But I get random results for inceptionv3.
For resnet50 everything works fine.
resnet50:
import tensorflow as tf
import cv2
import numpy as np
import tensorflow.contrib.slim.nets as nets
slim = tf.contrib.slim
with tf.device('/gpu:1'):
inputs = tf.placeholder(tf.float32, shape=[None,299,299,3])
with slim.arg_scope(nets.resnet_v1.resnet_arg_scope()):
features,net = nets.resnet_v1.resnet_v1_50(inputs=inputs, num_classes=1000)
saver = tf.train.Saver()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement=True
with tf.Session(config=config) as sess:
saver.restore(sess, 'weights/resnet_v1_50.ckpt')
img = cv2.imread('images/dog_ball.jpg')
img = cv2.resize(img,(299,299))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = img/255.0
curr_features, curr_net = sess.run([features, net], feed_dict={inputs: [img,img, img]})
for curr_feature in curr_features:
f_ind = np.argsort(curr_feature[0][0])[-4:] # resnet50v1
for i in f_ind:
print i
print ' '
But if I try inception_v3, it's not working.
The results are not even the same, even if the images are the same.
First I thought, the weights didn't load properly, but everything looks fine.
inceptionv3:
import tensorflow as tf
import cv2
import numpy as np
import tensorflow.contrib.slim.nets as nets
slim = tf.contrib.slim
with tf.device('/gpu:1'):
inputs = tf.placeholder(tf.float32, shape=[None,299,299,3])
with slim.arg_scope(nets.inception.inception_v3_arg_scope()):
features,net = nets.inception.inception_v3(inputs=inputs, num_classes=1001)
saver = tf.train.Saver()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement=True
with tf.Session(config=config) as sess:
saver.restore(sess, 'weights/inception_v3.ckpt')
img = cv2.imread('images/dog_ball.jpg')
img = cv2.resize(img,(299,299))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = img/255.0
curr_features, curr_net = sess.run([features, net], feed_dict={inputs: [img,img, img]})
for curr_feature in curr_features:
f_ind = np.argsort(curr_feature)[-4:] # inceptionv3
for i in f_ind:
print i
print ' '
Do you know, where my mistake is?

Found the answer
If you have the same problem write:
features,net = nets.inception.inception_v3(inputs=inputs, num_classes=1001, is_training=False)

Related

Tensorflow object detection slow inference time

I need to measure the inference time for a TF1 object detection model. I have developed the following code by looking at various tutorials and at the official Tensorflow Github repository.
import numpy as np
import os
import sys
import tensorflow as tf
import time
from collections import defaultdict
from matplotlib import pyplot as plt
from PIL import Image
from object_detection.utils import ops as utils_ops
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util
%matplotlib inline
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # Suppress TensorFlow logging (1)
import pathlib
tf.get_logger().setLevel('ERROR') # Suppress TensorFlow logging (2)
# Enable GPU dynamic memory allocation
gpus = tf.config.experimental.list_physical_devices('GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
# Initialize tf.Graph()
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile('/home/luigi/Downloads/SSD_MobileNet_300000/frozen_inference_graph.pb', 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
# Loads labels
label_map = label_map_util.load_labelmap('/home/luigi/Downloads/DOTA-3/train/Objects_label_map.pbtxt')
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=15, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
# Run Inference and populates results in a dict.
def run_inference(graph, image):
with graph.as_default():
with tf.Session() as sess:
ops = tf.get_default_graph().get_operations()
all_tensor_names = [output.name for op in ops for output in op.outputs]
tensor_dict = {}
tensor_keys = ['num_detections', 'detection_boxes', 'detection_scores', 'detection_classes']
for key in tensor_keys:
tensor_name = key + ':0'
if tensor_name in all_tensor_names:
tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(tensor_name)
# Actual inference.
image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0')
output_dict = sess.run(tensor_dict, feed_dict={image_tensor: np.expand_dims(image, 0)})
output_dict['num_detections'] = int(output_dict['num_detections'][0])
output_dict['detection_classes'] = output_dict['detection_classes'][0].astype(np.uint8)
output_dict['detection_boxes'] = output_dict['detection_boxes'][0]
output_dict['detection_scores'] = output_dict['detection_scores'][0]
return output_dict
path = '/home/luigi/Downloads/test/'
#[os.path.join('/home/luigi/Downloads/test/', image_path)]
for image_path in os.listdir(path):
input_path = os.path.join(path, image_path)
print('Evaluating:', input_path)
image = Image.open(input_path)
img_width, img_height = image.size
image_np = np.array(image.getdata()).reshape((img_height, img_width, 3)).astype(np.uint8)
# Run inference.
start = time.time()
output_dict = run_inference(detection_graph, image_np)
end = time.time()
# Visualization of the results of a detection.
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
output_dict['detection_boxes'],
output_dict['detection_classes'],
output_dict['detection_scores'],
category_index,
use_normalized_coordinates=True,
line_thickness=8)
plt.figure(figsize=(12, 8))
plt.imshow(image_np)
print("Prediction time for: " + image_path + " " + str(end-start))
I think that for some reason related to the code above, the inference times that I get are slow and could be improved. I have been looking online but haven't had any luck finding anything useful. Any tips?

Error: All inputs to the layer should be tensors- when trying to use VGG19 model

While trying to import VGG19 model, the code below generates an error of non tensor inputs. Although I am following another this code snippet here.
Code:
from keras.applications.vgg19 import VGG19
import keras.backend as K
from keras.models import Model
import imageio as iio
image_shape = (384,384,3)
vgg19 = VGG19(include_top=False, weights='imagenet', input_shape=image_shape)
vgg19.trainable = False
# Make trainable as False
for l in vgg19.layers:
l.trainable = False
model = Model(inputs=vgg19.input, outputs=vgg19.get_layer('block5_conv4').output)
model.trainable = False
img1 = iio.imread('img1.jpg')
img2 = iio.imread('img2.jpg')
mean = K.mean(K.square(model(img1) - model(img2)))
Error:
...,
[164, 90, 0, 255],
[164, 90, 0, 255],
[164, 90, 0, 255]]]], dtype=uint8)]. All inputs to the layer should be tensors.
unable to figure out why.
Maybe try converting your images to tensors:
import numpy
from PIL import Image
from keras.applications.vgg19 import VGG19
import keras.backend as K
from keras.models import Model
import imageio as iio
# Create random images
for n in range(2):
a = numpy.random.rand(384,384,3) * 255
im = Image.fromarray(a.astype('uint8')).convert('RGB')
im.save('test%0d.jpg' % n)
image_shape = (384,384,3)
vgg19 = VGG19(include_top=False, weights='imagenet', input_shape=image_shape)
vgg19.trainable = False
# Make trainable as False
for l in vgg19.layers:
l.trainable = False
model = Model(inputs=vgg19.input, outputs=vgg19.get_layer('block5_conv4').output)
model.trainable = False
img1 = iio.imread('test0.jpg')
img2 = iio.imread('test1.jpg')
img1 = tf.expand_dims(tf.constant(img1), axis=0)
img2 = tf.expand_dims(tf.constant(img2), axis=0)
mean = K.mean(K.square(model(img1) - model(img2)))
print(mean)
tf.Tensor(5.283036, shape=(), dtype=float32)
Instead of tf.expand_dims, you could also just do this:
img1 = tf.constant([img1])
img2 = tf.constant([img2])
There is also an option to load your images with tf.keras.preprocessing.image.load_img:
img1 = tf.keras.preprocessing.image.load_img('test0.jpg')
img2 = tf.keras.preprocessing.image.load_img('test1.jpg')
img1 = tf.constant([tf.keras.preprocessing.image.img_to_array(img1)])
img2 = tf.constant([tf.keras.preprocessing.image.img_to_array(img2)])
mean = K.mean(K.square(model(img1) - model(img2)))
print(mean)
the code from this page works fine.
i changed the code little bit.
image_shape = (384,384,3)
base_model = VGG19(include_top=False, weights='imagenet', input_shape=image_shape)
model = Model(inputs=base_model.input, outputs=base_model.get_layer('block5_conv4').output)
img01 = iio.imread('test0.jpg').astype('float32')
img11 = iio.imread('test1.jpg').astype('float32')
imgx1 = normalize(img01)
imgx2 = normalize(img11)
img1 = np.expand_dims(imgx1, axis=0)
img2 = np.expand_dims(imgx2, axis=0)
mean = np.mean((model.predict(img1) - model.predict(img2))**2)
print(mean)

python : keras package : (ValueError: No such layer: fc1)

when i tryed to execute my python code, i get this error: (ValueError: No such layer: fc1) : error capture
i use in my code TensorFlow and Keras package to detect Object in image and return the similar images from custom Dataset.
it s work perfectly on local, but when i trayed in the server OVH there is always the error
(i trayed to change the layer to 'block5_pool' but it's not working with my code.)
my code :
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications.vgg16 import VGG16, preprocess_input
from tensorflow.keras.models import Model
import numpy as np
from PIL import Image
from datetime import datetime
from flask import Flask, request, render_template
from pathlib import Path
class FeatureExtractor:
def __init__(self):
base_model = VGG16(weights='imagenet', include_top=False)
self.model = Model(inputs=base_model.input, outputs=base_model.get_layer('fc1').output)
def extract(self, img):
"""
Extract a deep feature from an input image
Args:
img: from PIL.Image.open(path) or tensorflow.keras.preprocessing.image.load_img(path)
Returns:
feature (np.ndarray): deep feature with the shape=(4096, )
"""
img = img.resize((224, 224))
img = img.convert('RGB')
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
feature = self.model.predict(x)[0]
return feature / np.linalg.norm(feature)
path = "/home/virtuag/www/storage/searchSCB.jpg"
img = Image.open(path)
app = Flask(__name__)
fe = FeatureExtractor()
features = []
img_paths = []
for feature_path in Path("/home/virtuag/www/storage/images_article").glob("*.npy"):
features.append(np.load(feature_path))
img_paths.append(Path("/home/virtuag/www/storage/images_article") / (feature_path.stem + ".jpg"))
features = np.array(features)
query = fe.extract(img)
dists = np.linalg.norm(features-query, axis=1) # L2 distances to features
ids = np.argsort(dists)[:30] # Top 30 results
scores = [img_paths[id] for id in ids]
print (scores)```
thank you
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications.vgg16 import VGG16, preprocess_input
from tensorflow.keras.models import Model
import numpy as np
from PIL import Image
#from feature_extractor import FeatureExtractor
from datetime import datetime
from flask import Flask, request, render_template
from pathlib import Path
from keras.optimizers import Adam
from tensorflow.keras.layers import Dropout, Dense, Activation, Flatten
class FeatureExtractor:
def __init__(self):
input_shape = (224, 224, 3)
base_model = VGG16(weights='imagenet', include_top=False, input_shape=input_shape)
for layer in base_model.layers:
layer.trainable = False
last = base_model.layers[-1].output
x = Flatten()(last)
x = Dense(1000, activation='relu', name='fc1')(x)
x = Dropout(0.3)(x)
x = Dense(10, activation='softmax', name='predictions')(x)
model = Model(base_model.input, x)
model.compile(optimizer=Adam(lr=0.001),
loss = 'categorical_crossentropy',metrics=['accuracy'])
self.model = Model(inputs=base_model.input, outputs=base_model.layers[-1].output)
def extract(self, img):
"""
Extract a deep feature from an input image
Args:
img: from PIL.Image.open(path) or tensorflow.keras.preprocessing.image.load_img(path)
Returns:
feature (np.ndarray): deep feature with the shape=(4096, )
"""
img = img.resize((224, 224))
img = img.convert('RGB')
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
feature = self.model.predict(x)[0]
return feature / np.linalg.norm(feature)
path = "/home/virtuag/www/storage/searchSCB.jpg"
#path = "c:/xamppp/htdocs/projet/V-stock/PWA/public/storage/searchSCB.jpg"
img = Image.open(path)
app = Flask(__name__)
fe = FeatureExtractor()
features = []
img_paths = []
for feature_path in Path("/home/virtuag/www/storage/images_article").glob("*.npy"):
#for feature_path in Path("c:/xamppp/htdocs/projet/V-stock/PWA/public/storage/images_article").glob("*.npy"):
features.append(np.load(feature_path))
#img_paths.append(Path("c:/xamppp/htdocs/projet/V-stock/PWA/public/storage/images_article") / (feature_path.stem + ".jpg"))
img_paths.append(Path("/home/virtuag/www/storage/images_article") / (feature_path.stem + ".jpg"))
features = np.array(features)
query = fe.extract(img)
dists = np.linalg.norm(features-query, axis=1)
ids = np.argsort(dists)[:30]
scores = [img_paths[id] for id in ids]
#print (img_paths)
#print(query)
and the error :
raceback (most recent call last): File "server.py", line 71, in <module> scores = [img_paths[id] for id in ids] File "server.py", line 71, in <listcomp> scores = [img_paths[id] for id in ids] TypeError: only integer scalar arrays can be converted to a scalar index

Read and preprocess image for tensorflow pretrained model

I don't have much experience in Tensorflow. I am trying to use a pretrained ResNet152 model to get the activations of the last layer as output. The images I use for input are stored on my harddrive. So I need to load the images, preprocess them and then get the output from the pretrained model. I found examples for that using URLs of images but when I try it with image paths I can't get it to work. This is what I have so far (only one image for now):
with tf.Graph().as_default():
filename_queue = tf.train.string_input_producer(['./testimg/A_008.jpg'])
reader = tf.WholeFileReader()
key, value = reader.read(filename_queue)
image = tf.image.decode_jpeg(value, channels=3)
preprocessing = preprocessing_factory.get_preprocessing('resnet_v2_152', is_training=False)
processed_image = preprocessing(image, 299,299)
processed_images = tf.expand_dims(processed_image, 0)
with slim.arg_scope(resnet_v2.resnet_arg_scope()):
logits, end_points = resnet_v2.resnet_v2_152(processed_images, is_training=False)
checkpoints_dir='./models/resnet_v2_152'
init_fn = slim.assign_from_checkpoint_fn(
os.path.join(checkpoints_dir, 'resnet_v2_152.ckpt'),
slim.get_variables_to_restore())
with tf.Session() as sess:
init_fn(sess)
np_image, fv = sess.run([image, logits])
I am doing this in a Jupyter Notebook. When I execute the code I don't get an error message, it just keeps running and running until I restart the kernel.
Any ideas what I did wrong? And how would I do it for multiple images?
I found the solution by replacing the tf.WholeFileReader() with tf.read_file():
graph = tf.Graph()
with graph.as_default():
image_path = image = tf.placeholder(tf.string)
image = tf.image.decode_jpeg(tf.read_file(image_path), channels=3)
preprocessing = preprocessing_factory.get_preprocessing('resnet_v2_152', is_training=False)
processed_image = preprocessing(image, image_size, image_size)
processed_images = tf.expand_dims(processed_image, 0)
with slim.arg_scope(resnet_v2.resnet_arg_scope()):
logits, end_points = resnet_v2.resnet_v2_152(processed_images, is_training=False)
checkpoints_dir='./models/resnet_v2_152'
init_fn = slim.assign_from_checkpoint_fn(
os.path.join(checkpoints_dir, 'resnet_v2_152.ckpt'),
slim.get_variables_to_restore())
images = ['./testimg/A_008.jpg', './testimg/logo.jpg']
with tf.Session(graph=graph) as sess:
init_fn(sess)
for img in images:
fv = sess.run(logits, feed_dict={image_path: img})
print(fv)

Tensorflow pretrained model import error

I am working on an object detection model in tensorflow. I have a file model.py:
from PIL import Image
import cv2
import numpy as np
import tensorflow as tf
from .squeezenet import SqueezeNet
save_path = "sqnet/squeezenet.ckpt"
sess = tf.Session()
model = SqueezeNet(save_path=save_path, sess=sess)
class Finder(object):
def __init__(self, image_path):
self.image_path = image_path
def predict(self):
image = process(self.image_path)
ans = sess.run(model.classifier, feed_dict={model.image:
image})
return ans
def process(path):
image = Image.open(path)
# image.show()
image = np.array(image)
image = cv2.resize(image, dsize=(224, 224),
interpolation=cv2.INTER_CUBIC)
image = image.reshape((1, 224, 224, 3))
#print(image.shape)
#img = Image.fromarray(image, 'RGB')
return image
image_path = "/home/jatin/ai.jpeg"
object_detector = Finder(image_path)
ans = object_detector.predict()
print(np.argmax(ans))
sess.close()
I have a folder named sqnet alongside the model.py file within which I have squuezenet.cpkt file. But running this gives the error:
InvalidArgumentError (see above for traceback): Unsuccessful
TensorSliceReader constructor: Failed to get matching files on
sqnet/squeezenet.ckpt: Not found: sqnet; No such file or directory.
What could be the issue?
Seems like a simple IO error to me. Have you tried using absolute path?
save_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'sqnet', 'squeezenet.ckpt')

Categories