I have an SSD tflite detection model that I am running with Python on a desktop computer. As for now, my script below takes a single image as an input for inference and it works fine:
# Load TFLite model and allocate tensors.
interpreter = tf.lite.Interpreter(model_path="model.tflite")
interpreter.allocate_tensors()
img_resized = Image.open(file_name)
input_data = np.expand_dims(img_resized, axis=0)
input_data = (np.float32(input_data) - input_mean) / input_std
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
output_data = interpreter.get_tensor(output_details[0]['index'])
How to run inference on a .mp4 video as an input?
Is it also possible to draw bounding boxes from detected objects on that video?
To Answer your first question of running inference on a video. Here is the code that you can use. I made this code for the inference of classification model, So in your case the output of the output_data variable will be in the form of bounding boxes, you have to map them on the frames using OpenCV which answer your second question as well (drawing bounding boxes on the video).
import cv2
from PIL import Image
import numpy as np
import tensorflow as tf
def read_tensor_from_readed_frame(frame, input_height=224, input_width=224,
input_mean=0, input_std=255):
output_name = "normalized"
float_caster = tf.cast(frame, tf.float32)
dims_expander = tf.expand_dims(float_caster, 0);
resized = tf.image.resize_bilinear(dims_expander, [input_height, input_width])
normalized = tf.divide(tf.subtract(resized, [input_mean]), [input_std])
sess = tf.Session()
result = sess.run(normalized)
return result
def load_labels(label_file):
label = []
proto_as_ascii_lines = tf.gfile.GFile(label_file).readlines()
for l in proto_as_ascii_lines:
label.append(l.rstrip())
return label
def VideoSrcInit(paath):
cap = cv2.VideoCapture(paath)
flag, image = cap.read()
if flag:
print("Valid Video Path. Lets move to detection!")
else:
raise ValueError("Video Initialization Failed. Please make sure video path is valid.")
return cap
def main():
Labels_Path = "labels.txt"
Model_Path = "model.tflite"
input_path = "video.mp4"
##Loading labels
labels = load_labels(Labels_Path)
##Load tflite model and allocate tensors
interpreter = tf.lite.Interpreter(model_path=Model_Path)
interpreter.allocate_tensors()
# Get input and output tensors.
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
input_shape = input_details[0]['shape']
##Read video
cap = VideoSrcInit(input_path)
while True:
ok, cv_image = cap.read()
if not ok:
break
##Converting the readed frame to RGB as opencv reads frame in BGR
image = Image.fromarray(cv_image).convert('RGB')
##Converting image into tensor
image_tensor = read_tensor_from_readed_frame(image ,224, 224)
##Test model
interpreter.set_tensor(input_details[0]['index'], image_tensor)
interpreter.invoke()
output_data = interpreter.get_tensor(output_details[0]['index'])
## You need to check the output of the output_data variable and
## map it on the frame in order to draw the bounding boxes.
cv2.namedWindow("cv_image", cv2.WINDOW_NORMAL)
cv2.imshow("cv_image",cv_image)
##Use p to pause the video and use q to termiate the program
key = cv2.waitKey(10) & 0xFF
if key == ord("q"):
break
elif key == ord("p"):
cv2.waitKey(0)
continue
cap.release()
if __name__ == '__main__':
main()
Related
For a current project, I am trying to set up a video recognition program leveraging TensorFlow 2 and OpenCV (Mac OS Catalina).
When running the below script with Python 3 through terminal or via Jupyter, the green "wecam light" is indicating that the camera is switched on and no error messages appear. However, there is not video image/window showing on my screen. I have tried various solutions, including adding camera screen frame data, none of which worked.
Does anyone know a smart tweak to make the camera image/window visible?
import os
import six.moves.urllib as urllib
import sys
import tarfile
import tensorflow as tf
import zipfile
import cv2
from collections import defaultdict
from io import StringIO
from matplotlib import pyplot as plt
from PIL import Image
from utils import label_map_util
from utils import visualization_utils as vis_util
# Define the video stream
cap = cv2.VideoCapture(0) # Change only if you have more than one webcams
# What model to download.
# Models can bee found here: https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/detection_model_zoo.md
MODEL_NAME = 'ssd_inception_v2_coco_2017_11_17'
MODEL_FILE = MODEL_NAME + '.tar.gz'
DOWNLOAD_BASE = 'http://download.tensorflow.org/models/object_detection/'
# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'
# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = os.path.join('data', 'mscoco_label_map.pbtxt')
# Number of classes to detect
NUM_CLASSES = 90
# Download Model
opener = urllib.request.URLopener()
opener.retrieve(DOWNLOAD_BASE + MODEL_FILE, MODEL_FILE)
tar_file = tarfile.open(MODEL_FILE)
for file in tar_file.getmembers():
file_name = os.path.basename(file.name)
if 'frozen_inference_graph.pb' in file_name:
tar_file.extract(file, os.getcwd())
# Load a (frozen) Tensorflow model into memory.
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
# Loading label map
# Label maps map indices to category names, so that when our convolution network predicts `5`, we know that this corresponds to `airplane`. Here we use internal utility functions, but anything that returns a dictionary mapping integers to appropriate string labels would be fine
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(
label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
# Helper code
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
# Detection
with detection_graph.as_default():
with tf.Session(graph=detection_graph) as sess:
while True:
# Read frame from camera
ret, image_np = cap.read()
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
# Extract image tensor
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Extract detection boxes
boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Extract detection scores
scores = detection_graph.get_tensor_by_name('detection_scores:0')
# Extract detection classes
classes = detection_graph.get_tensor_by_name('detection_classes:0')
# Extract number of detectionsd
num_detections = detection_graph.get_tensor_by_name(
'num_detections:0')
# Actual detection.
(boxes, scores, classes, num_detections) = sess.run(
[boxes, scores, classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
# Visualization of the results of a detection.
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=8)
# Display output
cv2.imshow('object detection', cv2.resize(image_np, (800, 600)))
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
Have you tried passing -1 or 1 as the device index of the VideoCapture? Just in case you haven't tried it yet.
But
First of all, you should know where it went wrong. We should verify if the system reads the frames properly.
You can try implementing this to test if your camera is running and being read properly:
cap = cv.VideoCapture(0)
if not cap.isOpened():
print("Cannot open camera")
exit()
while True:
# Capture frame-by-frame
ret, frame = cap.read()
# if frame is read correctly ret is True
if not ret:
print("Can't receive frame (stream end?). Exiting ...")
break
# Our operations on the frame come here
gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
# Display the resulting frame
cv.imshow('frame', gray)
if cv.waitKey(1) == ord('q'):
break
# When everything done, release the capture
cap.release()
cv.destroyAllWindows()
cap.read() returns a bool (True/False). If the frame is read correctly, it will be True. So you can check for the end of the video by checking this returned value.
Sometimes, cap may not have initialized the capture. In that case, this code shows an error. You can check whether it is initialized or not by the method cap.isOpened(). If it is True, OK. Otherwise open it using cap.open().
with this, it will help you and us to determine what part has gone wrong and can suggest furthermore solutions.
After this, if the test shows no error, this link will be a little bit related.
You can check it out.
Provide us the result from this so we can inspect furthermore.
I'm working on multiple object tracking, I'm using the TensorFlow API to generate detections. I have managed to modify it a bit to make it return coordinates of the detected objects, now I want to feed the coordinates (bounding boxes) to an object tracker (CRST or KCF).
However running both detection and tracking simultaneously would be too computationally expensive.
Is there any other methods to pass the coordinates or pause the detection?
Below is the detection code.
And in this link is the tracking code https://github.com/spmallick/learnopencv/blob/master/MultiObjectTracker/multiTracker.py
import numpy as np
import os
import six.moves.urllib as urllib
import sys
sys.path.insert(0,r'C:\Users\Ahmed.DESKTOP-KJ6U1BJ\.spyder-py3\TensorFlow\models\research\object_detection')
import tarfile
import tensorflow as tf
import zipfile
from collections import defaultdict
from io import StringIO
from matplotlib import pyplot as plt
from PIL import Image
import cv2
import imutils
from protos import string_int_label_map_pb2
from utils import visualization_utils2 as vis_util
def scale(bbox, width, height):
x = int(bbox[0]*width)
y = int(bbox[1]*height)
w = int(bbox[2]*width)
h = int(bbox[3]*height)
return (x,y,w,h)
W = 800
H = 600
videopath = "file:///C:/Users/Ahmed.DESKTOP-KJ6U1BJ/.spyder-py3/soccer4.mp4"
cap = cv2.VideoCapture(videopath)
# This is needed since the notebook is stored in the object_detection folder.
sys.path.append("..")
# # Model preparation
# Any model exported using the `export_inference_graph.py` tool can be loaded here simply by changing `PATH_TO_CKPT` to point to a new .pb file.
# By default we use an "SSD with Mobilenet" model here. See the [detection model zoo](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/detection_model_zoo.md) for a list of other models that can be run out-of-the-box with varying speeds and accuracies.
# What model to download.
MODEL_NAME = 'ssd_mobilenet_v1_coco_2017_11_17'
MODEL_FILE = MODEL_NAME + '.tar.gz'
DOWNLOAD_BASE = 'http://download.tensorflow.org/models/object_detection/'
# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'
# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = r'C:\Users\Ahmed.DESKTOP-KJ6U1BJ\.spyder-py3\TensorFlow\models\research\object_detection\data\mscoco_label_map.pbtxt'
NUM_CLASSES = 90
# ## Download Model ( uncomment if the model isn't downloaded / comment if you alredy have the model)
"""
opener = urllib.request.URLopener()
opener.retrieve(DOWNLOAD_BASE + MODEL_FILE, MODEL_FILE)
tar_file = tarfile.open(MODEL_FILE)
for file in tar_file.getmembers():
file_name = os.path.basename(file.name)
if 'frozen_inference_graph.pb' in file_name:
tar_file.extract(file, os.getcwd())
"""
# ## Load a (frozen) Tensorflow model into memory.
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
# ## Loading label map
# Label maps map indices to category names, so that when our convolution network predicts `5`, we know that this corresponds to `airplane`. Here we use internal utility functions, but anything that returns a dictionary mapping integers to appropriate string labels would be fine
import label_map_util
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
# # Detection
with detection_graph.as_default():
with tf.Session(graph=detection_graph) as sess:
while True :
ret, image_np = cap.read()
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
# Definite input and output Tensors for detection_graph
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
# Actual detection.
(boxes, scores, classes, num) = sess.run(
[detection_boxes, detection_scores, detection_classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
# Visualization of the results of a detection.
boxes2 = np.squeeze(boxes)
max_boxes_to_draw =boxes2.shape[0]
scores2 = np.squeeze(scores)
min_score_thresh=0.7
classes2 = np.squeeze(classes).astype(np.int32)
for i in range(min(max_boxes_to_draw, boxes2.shape[0])):
if boxes2 is None or scores2[i] > min_score_thresh:
class_name = category_index[classes2[i]]['name']
print ("This box is gonna get used", scale(boxes2[i], W , H), class_name)
cv2.imshow('Object Detection',cv2.resize(image_np,(800,600)))
k = cv2.waitKey(1) & 0xff
if k == 27:
cv2.destroyAllWindows()
cap.release()
cv2.destroyAllWindows()
cap.release
you could count frames with a simple counter in the while True loop and "pause" the detection with an if statement before session.run like:
frame_count = 0
with detection_graph.as_default():
with tf.Session(graph=detection_graph) as sess:
while True :
ret, image_np = cap.read()
#the first frame and every 10 frames do the detection
if frame_count == 0:
###detection here
#restart counter (from -10 to 0)
frame_count = -10
##do tracking here
frame_count += 1
This way the actual detection is done for the first frame and then every 10th frame, so in the other 9 frames you can do whatever you want.
I want to run a model locally. I'm trying to train and predict models from web course:
https://github.com/GoogleCloudPlatform/tensorflow-without-a-phd/blob/master/tensorflow-planespotting/trainer_yolo/main.py
A model was trained with above code. This is a YOLO object detection model that detect airplane built with tf.estimator. Training was done successfully with provided codes but I don't know about how to inference the model.
import tensorflow as tf
# DATA
DATA = './samples/airplane_sample.png'
# Model: This directory contains saved_model.pb and variables
SAVED_MODEL_DIR = './1559196417/'
def decode_image():
img_bytes = tf.read_file(DATA)
decoded = tf.image.decode_image(img_bytes, channels=3)
return tf.cast(decoded, dtype=tf.uint8)
def main1():
with tf.Session(graph=tf.Graph()) as sess:
tf.saved_model.loader.load(sess, [tf.saved_model.tag_constants.SERVING], SAVED_MODEL_DIR)
img = decode_image()
result = sess.run(['classes'], feed_dict={'input': img})
print(result)
def main2():
model = tf.contrib.predictor.from_saved_model(SAVED_MODEL_DIR)
pred = model({'image_bytes': [decode_image()], 'square_size': [tf.placeholder(tf.int32)]})
print(pred)
if __name__ == "__main__":
main2()
Above is a code written by me but it doesn't work. Even I don't know what is a problem. Incorrect input type? Improper API? Could you give me some advice to me?
First run saved_model_cli show --all --dir SAVED_MODEL_DIR in the terminal outside of python to inspect the saved model and check that it has the right tags, inputs and outputs. From there it takes a bit of wrangling to get the necessary info out of the API.
def extract_tensors(signature_def, graph):
output = dict()
for key in signature_def:
value = signature_def[key]
if isinstance(value, tf.TensorInfo):
output[key] = graph.get_tensor_by_name(value.name)
return output
def extract_tags(signature_def, graph):
output = dict()
for key in signature_def:
output[key] = dict()
output[key]['inputs'] = extract_tensors(
signature_def[key].inputs, graph)
output[key]['outputs'] = extract_tensors(
signature_def[key].outputs, graph)
return output
with tf.Session(graph=tf.Graph()) as session:
serve = tf.saved_model.load(
session, tags=['serve'], export_dir=SAVED_MODEL_DIR)
tags = extract_tags(serve.signature_def, session.graph)
model = tags['serving_default']
From there you can try print(model['inputs'], model['outputs']) to see which inputs and outputs were exported and if they agree with saved_model_cli, if you need another tag then just replace serving_default with that.
Maybe this will work:
import tensorflow as tf
import cv2
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile('./1559196417/saved_model.pb', 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
with detection_graph.as_default():
with tf.Session(graph=detection_graph) as sess:
image = cv2.imread('./samples/airplane_sample.png')
rgb_img = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
rgb_img_expanded = np.expand_dims(rgb_img, axis=0)
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
classes = detection_graph.get_tensor_by_name('classes:0')
result = sess.run([classes],feed_dict={image_tensor: rgb_img_expanded})
i try to inception_v3 example.
I use the under code, but I want to put the image itself, not the image path. Is there a way?
def run_inference_on_image():
answer = None
if not tf.gfile.Exists(imagePath):
tf.logging.fatal('File does not exist %s', imagePath)
return answer
image_data = tf.gfile.FastGFile(imagePath, 'rb').read()
create_graph()
with tf.Session() as sess:
softmax_tensor = sess.graph.get_tensor_by_name('final_result:0')
predictions = sess.run(softmax_tensor,
{'DecodeJpeg/contents:0': image_data})
predictions = np.squeeze(predictions)
top_k = predictions.argsort()[-2:][::-1]
f = open(labelsFullPath, 'rb')
lines = f.readlines()
labels = [str(w).replace("\n", "") for w in lines]
for node_id in top_k:
human_string = labels[node_id]
score = predictions[node_id]
print('%s (score = %.5f)' % (human_string, score))
answer = labels[top_k[0]]
return answer
An image is an image that I want to classify as a learned PB file.
image_data is the actual image bytes in your code. And they are being used inside sess.run() function. Below line of code returns the image bytes as a string.
tf.gfile.FastGFile(imagePath, 'rb').read()
So, all you have to do is pass a string of image bytes to sess.run() which can be done in several ways :-
# method 1
import cv2
img_str = cv2.imencode('.jpg', img)[1].tostring()
# method 2
from scipy.ndimage import imread
imgbytes = imread(img_path)
img_str = imgbytes.tostring()
Check what works for you.
Hello to all senior programmer! I have an error on eigenfaces image training part.
The error is : OpenCV Error: Unsupported format or combination of formats (In the Eigenfaces method all input samples (training images) must be of equal size! Expected 27889 pixels, but was 27556 pixels.) in cv::face::Eigenfaces::train, file C:\projects\opencv-python\opencv_contrib\modules\face\src\eigen_faces.cpp, line 68
Which mean my pictures don't be in equal size. I try cv2.rezise() when I capture picture from camera but it still doesn't work.
here is my capture code :
import cv2
cam = cv2.VideoCapture(0)
detector = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
Id = input('enter your id: ')
sampleNum = 0
while(True):
ret, img = cam.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = detector.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
sampleNum = sampleNum+1
cv2.imwrite("dataSet/user."+Id+'.'+str(sampleNum)+".jpg",cv2.resize
(gray[y:y+h,x:x+w],(70,70)))
cv2.imshow('frame',img)
if cv2.waitKey(100) & 0xFF == ord('q'):#waitKey is for delay in video capture
break
elif sampleNum >= 50:#how many picture capture?
break
cam.release()
cv2.destroyAllWindows()
and here is training part:
import cv2,os
import numpy as np
recognizer = cv2.face.EigenFaceRecognizer_create()
detector= cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
def getImagesAndLabels(path):
imagePaths=[os.path.join(path,f) for f in os.listdir(path)]
faceSamples=[]
Ids=[]
for imagePath in imagePaths:
pilImage = Image.open(imagePath).convert('L')
imageNp = np.array(pilImage,'uint8')
Id = int(os.path.split(imagePath)[-1].split(".")[1])
faces = detector.detectMultiScale(imageNp)
for (x,y,w,h) in faces:
faceSamples.append(imageNp[y:y+h,x:x+w])
Ids.append(Id)
return faceSamples,Ids
faces,Ids = getImagesAndLabels('dataSet')
recognizer.train(faces, np.array(Ids))
recognizer.write('trainner/trainnerEi.yml')
PS. I adapt this code from LBPHFaceRecognizer
Thank you!*3
Okay, so EigenFaces only works if the dimension of all the images is same in pixel space
Which means if one image used in training is of size 28x28 then every other image in the training as well as in testing has to be of size 28x28
If the image size is not same then opencv will throw you that error
The error simply says that one of the image was of 27889 dimensions in pixel space and other was 27556 dimensions pixel space.
I would recommend you to use cv2.resize() function to make all images of the same size
Use the below code as a reference for you training part:
import cv2,os
import numpy as np
recognizer = cv2.face.EigenFaceRecognizer_create()
detector= cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
def getImagesAndLabels(path):
width_d, height_d = 280, 280 # Declare your own width and height
imagePaths=[os.path.join(path,f) for f in os.listdir(path)]
faceSamples=[]
Ids=[]
for imagePath in imagePaths:
pilImage = Image.open(imagePath).convert('L')
imageNp = np.array(pilImage,'uint8')
Id = int(os.path.split(imagePath)[-1].split(".")[1])
faces = detector.detectMultiScale(imageNp)
for (x,y,w,h) in faces:
########################################
# The line to be changed by cv2.resize()
########################################
faceSamples.append(cv2.resize(imageNp[y:y+h,x:x+w], (width_d, height_d))
Ids.append(Id)
return faceSamples,Ids
faces,Ids = getImagesAndLabels('dataSet')
recognizer.train(faces, np.array(Ids))
recognizer.write('trainner/trainnerEi.yml')
Keep in mind even the test images has to be of same size