I am trying to feed my image roi into the Tensorflow classifier I took from here. The idea is to first run a simple filter, get rectangle candidates, and then check (using the network) whether each rectangle(roi) is actually what I am looking for.
class ScrewDetector:
def __init__(self):
self.session = None # an internal variable needed for inception network
# to keep the screw data in
self.screw_data = dict()
# load the labels of the classification: screw / non-screw
self.class_labels = [line.rstrip() for line in tf.gfile.GFile(home + "/imagine_weights/screw_detector/retrained_labels.txt")]
# prepare the network
with tf.gfile.FastGFile(home + "/weights/screw_detector/retrained_graph.pb", 'rb') as f:
graph_def = tf.GraphDef() ## the graph-graph_def is a saved copy of a TensorFlow graph, object initialization
graph_def.ParseFromString(f.read()) # parse serialized protocol buffer data into variable
_ = tf.import_graph_def(graph_def, name='') # import a serialized TensorFlow GraphDef protocol buffer, extract objects in the GraphDef as tf.Tensor
# start the session
with tf.Session() as self.session:
self.softmax_tensor = self.session.graph.get_tensor_by_name('final_result:0')
def detect_screw(self):
# get a copy and resize it
img_raw = self.cv_image.copy()
resized_img = cv2.resize(img_raw, (0,0), fx=RESIZE_FACTOR, fy=RESIZE_FACTOR)
# grayscale it
gray = cv2.cvtColor(resized_img, cv2.COLOR_BGR2GRAY)
# detect circles in the image
circles = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, 1, 100, param1=50,param2=35,minRadius=15,maxRadius=30)
# ensure at least some circles were found
if circles is not None:
# convert the (x, y) coordinates and radius of the circles to integers
circles = np.round(circles[0, :]).astype("int")
# get a counter
screw_id = 0
# loop over the (x, y) coordinates and radius of the circles
for (x, y, r) in circles:
# draw the circle in the output image, then draw a rectangle corresponding to the center of the circle
#cv2.circle(resized_img, (x, y), r, (0, 255, 0), 4)
cv2.rectangle(resized_img, (x - r, y - r), (x + r, y + r), (0, 0, 255), 5)
# get the above rectangle as ROI
screw_roi = resized_img[y:y+r, x:x+r]
# feed it into the network
#import IPython; IPython.embed()
predictions = self.session.run(self.softmax_tensor, feed_dict={screw_id: [screw_roi.flatten()]})
# get prediction values in array back
top_k = predictions[0].argsort()[-len(predictions[0]):][::-1]
# output
for node_id in top_k:
human_string = self.class_labels[node_id]
score = predictions[0][node_id]
print('%s (score = %.5f)' % (human_string, score))
# if it is a screw, go on, save its coordinates and append into the network
# remap in the original image
scaled_point = (round(x * (1/RESIZE_FACTOR)), round(y * (1/RESIZE_FACTOR)))
# append to the dict
self.screw_data[scaled_point] = r * RESIZE_FACTOR
# iterate the counter
screw_id += screw_id
# publish the result, which is an image (scaled)
result_image_msg = Image()
try:
result_image_msg = self.bridge.cv2_to_imgmsg(resized_img, "bgr8")
#print(self.screw_data)
except CvBridgeError as e:
print("Could not make it through the cv bridge of death.")
self.result_image_pub.publish(result_image_msg)
else:
print("No detection of circles.")
but I get:
TypeError: Cannot interpret feed_dict key as Tensor: Can not convert a int into a Tensor.
I do know that the variables screw_id and screw_roi are not empty. And I do know that one needs to feed a dictionary in, which is why in the first place I was trying to do that. But I can't get it running for the reason above.
Any thoughts?
EDIT: So normally, this code loads the image and conducts the prediction as follows:
image_data = tf.gfile.FastGFile(image_path, 'rb').read()
softmax_tensor = sess.graph.get_tensor_by_name('final_result:0')
predictions = sess.run(softmax_tensor, {'DecodeJpeg/contents:0': image_data})
All I want is to turn this into a form which operates with the image ROI provided during the operation. It can't be too complicated.
It's not a rocket science, it turns out.
One somehow needs to convert image so that he can pass a string of image bytes, because that's what the function sess.run() expects.
If you don't have a file that you want to load from a file system, then the following is the way:
image_data = cv2.imencode('.jpg', screw_roi)[1].tostring() # pass a string of image bytes
after this, you simply can run:
predictions = self.session.run(self.softmax_tensor, {'DecodeJpeg/contents:0': image_data})
That's it.
feed_dict expect a dictionary with tensors as keys, to populate the placeholders with the specified valued. It's not in your code snippet how does the screw_id is initiated, but I bet it's not a tensor of any kind, hence, your error.
Related
This code generates error:
IndexError: invalid index to scalar variable.
at the line: results.append(RMSPE(np.expm1(y_train[testcv]), [y[1] for y in y_test]))
How to fix it?
import pandas as pd
import numpy as np
from sklearn import ensemble
from sklearn import cross_validation
def ToWeight(y):
w = np.zeros(y.shape, dtype=float)
ind = y != 0
w[ind] = 1./(y[ind]**2)
return w
def RMSPE(y, yhat):
w = ToWeight(y)
rmspe = np.sqrt(np.mean( w * (y - yhat)**2 ))
return rmspe
forest = ensemble.RandomForestRegressor(n_estimators=10, min_samples_split=2, n_jobs=-1)
print ("Cross validations")
cv = cross_validation.KFold(len(train), n_folds=5)
results = []
for traincv, testcv in cv:
y_test = np.expm1(forest.fit(X_train[traincv], y_train[traincv]).predict(X_train[testcv]))
results.append(RMSPE(np.expm1(y_train[testcv]), [y[1] for y in y_test]))
testcv is:
[False False False ..., True True True]
You are trying to index into a scalar (non-iterable) value:
[y[1] for y in y_test]
# ^ this is the problem
When you call [y for y in test] you are iterating over the values already, so you get a single value in y.
Your code is the same as trying to do the following:
y_test = [1, 2, 3]
y = y_test[0] # y = 1
print(y[0]) # this line will fail
I'm not sure what you're trying to get into your results array, but you need to get rid of [y[1] for y in y_test].
If you want to append each y in y_test to results, you'll need to expand your list comprehension out further to something like this:
[results.append(..., y) for y in y_test]
Or just use a for loop:
for y in y_test:
results.append(..., y)
YOLO Object Detection
layer_names = net.getLayerNames() output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]
Don't need to indexing i in layer_names[i[0] - 1] . Just remove it and do layer_names[i - 1]
layer_names = net.getLayerNames() output_layers = [layer_names[i - 1] for i in net.getUnconnectedOutLayers()]
It Work For Me
YOLO Object Detection
python <= 3.7
ln = net.getLayerNames()
ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]
python >3.7
ln = net.getLayerNames()
ln = [ln[i - 1] for i in net.getUnconnectedOutLayers()]
Basically, 1 is not a valid index of y. If the visitor is coming from his own code he should check if his y contains the index which he tries to access (in this case the index is 1).
In the for, you have an iteration, then for each element of that loop which probably is a scalar, has no index. When each element is an empty array, single variable, or scalar and not a list or array you cannot use indices.
Editing the yolo_video.py file in repo is required for those who are using darknet code.`This file works, replaced with required edits
# import the necessary packages
import numpy as np
import argparse
import imutils
import time
import cv2
import os
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--input", required=True,
help="path to input video")
ap.add_argument("-o", "--output", required=True,
help="path to output video")
ap.add_argument("-y", "--yolo", required=True,
help="base path to YOLO directory")
ap.add_argument("-c", "--confidence", type=float, default=0.5,
help="minimum probability to filter weak detections")
ap.add_argument("-t", "--threshold", type=float, default=0.3,
help="threshold when applyong non-maxima suppression")
args = vars(ap.parse_args())
# load the COCO class labels our YOLO model was trained on
labelsPath = os.path.sep.join([args["yolo"], "biscuits.names"])
LABELS = open(labelsPath).read().strip().split("\n")
# initialize a list of colors to represent each possible class label
np.random.seed(42)
COLORS = np.random.randint(0, 255, size=(len(LABELS), 3),
dtype="uint8")
# derive the paths to the YOLO weights and model configuration
weightsPath = os.path.sep.join([args["yolo"], "yolov4-custom_best.weights"])
configPath = os.path.sep.join([args["yolo"], "yolov4-custom.cfg"])
# load our YOLO object detector trained on COCO dataset (80 classes)
# and determine only the *output* layer names that we need from YOLO
print("[INFO] loading YOLO from disk...")
net = cv2.dnn.readNetFromDarknet(configPath, weightsPath)
ln = net.getLayerNames()
print("ln",net)
ln = [ln[i - 1] for i in net.getUnconnectedOutLayers()]
# initialize the video stream, pointer to output video file, and
# frame dimensions
vs = cv2.VideoCapture(args["input"])
writer = None
(W, H) = (None, None)
# try to determine the total number of frames in the video file
try:
prop = cv2.cv.CV_CAP_PROP_FRAME_COUNT if imutils.is_cv2()\
else cv2.CAP_PROP_FRAME_COUNT
total = int(vs.get(prop))
print("[INFO] {} total frames in video".format(total))
# an error occurred while trying to determine the total
# number of frames in the video file
except:
print("[INFO] could not determine # of frames in video")
print("[INFO] no approx. completion time can be provided")
total = -1
# loop over frames from the video file stream
while True:
# read the next frame from the file
(grabbed, frame) = vs.read()
# if the frame was not grabbed, then we have reached the end
# of the stream
if not grabbed:
break
# if the frame dimensions are empty, grab them
if W is None or H is None:
(H, W) = frame.shape[:2]
# construct a blob from the input frame and then perform a forward
# pass of the YOLO object detector, giving us our bounding boxes
# and associated probabilities
blob = cv2.dnn.blobFromImage(frame, 1 / 255.0, (416, 416),
swapRB=True, crop=False)
net.setInput(blob)
start = time.time()
layerOutputs = net.forward(ln)
end = time.time()
# initialize our lists of detected bounding boxes, confidences,
# and class IDs, respectively
boxes = []
confidences = []
classIDs = []
# loop over each of the layer outputs
for output in layerOutputs:
# loop over each of the detections
for detection in output:
# extract the class ID and confidence (i.e., probability)
# of the current object detection
scores = detection[5:]
classID = np.argmax(scores)
confidence = scores[classID]
# filter out weak predictions by ensuring the detected
# probability is greater than the minimum probability
if confidence > args["confidence"]:
# scale the bounding box coordinates back relative to
# the size of the image, keeping in mind that YOLO
# actually returns the center (x, y)-coordinates of
# the bounding box followed by the boxes' width and
# height
box = detection[0:4] * np.array([W, H, W, H])
(centerX, centerY, width, height) = box.astype("int")
# use the center (x, y)-coordinates to derive the top
# and and left corner of the bounding box
x = int(centerX - (width / 2))
y = int(centerY - (height / 2))
# update our list of bounding box coordinates,
# confidences, and class IDs
boxes.append([x, y, int(width), int(height)])
confidences.append(float(confidence))
classIDs.append(classID)
# apply non-maxima suppression to suppress weak, overlapping
# bounding boxes
idxs = cv2.dnn.NMSBoxes(boxes, confidences, args["confidence"],
args["threshold"])
# ensure at least one detection exists
if len(idxs) > 0:
# loop over the indexes we are keeping
for i in idxs.flatten():
# extract the bounding box coordinates
(x, y) = (boxes[i][0], boxes[i][1])
(w, h) = (boxes[i][2], boxes[i][3])
# draw a bounding box rectangle and label on the frame
color = [int(c) for c in COLORS[classIDs[i]]]
cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)
text = "{}: {:.4f}".format(LABELS[classIDs[i]],
confidences[i])
cv2.putText(frame, text, (x, y - 5),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
# check if the video writer is None
if writer is None:
# initialize our video writer
fourcc = cv2.VideoWriter_fourcc(*"MJPG")
writer = cv2.VideoWriter(args["output"], fourcc, 30,
(frame.shape[1], frame.shape[0]), True)
# some information on processing single frame
if total > 0:
elap = (end - start)
print("[INFO] single frame took {:.4f} seconds".format(elap))
print("[INFO] estimated total time to finish: {:.4f}".format(
elap * total))
# write the output frame to disk
writer.write(frame)
# release the file pointers
print("[INFO] cleaning up...")
writer.release()
vs.release()`
I am a novice in Computer Vision. I am trying to implement Real Time Face Recognition with Local Binary Patterns with its Face Detection part based on Deep Learning dnn module. I am using the caltech_faces dataset and have added a folder with my 20 photos to it.
So, this is my code. I basically transformed the code of the Face Recognition of sample images to a Real Time Face Recognition by making some changes and additions.
I get the following error when executing the below code:
predName = le.inverse_transform([predictions[i]])[0]
^
TabError: inconsistent use of tabs and spaces in indentation
I checked all the tabs and indentations, and cant find what and where to fix. I kindly ask you to give me a hint on what to do. Thank you very much!
# import the necessary packages
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from imutils.video import VideoStream
from imutils import paths
import matplotlib.pyplot as plt
import numpy as np
import argparse
import imutils
import time
import cv2
import os
#Creating our face detector
def detect_faces(net, frame, minConfidence=0.5):
# grab the dimensions of the image and then construct a blob
# from it
(h, w) = frame.shape[:2]
blob = cv2.dnn.blobFromImage(frame, 1.0, (300, 300),
(104.0, 177.0, 123.0))
# pass the blob through the network to obtain the face detections,
# then initialize a list to store the predicted bounding boxes
net.setInput(blob)
detections = net.forward()
boxes = []
# loop over the detections
for i in range(0, detections.shape[2]):
# extract the confidence (i.e., probability) associated with
# the detection
confidence = detections[0, 0, i, 2]
# filter out weak detections by ensuring the confidence is
# greater than the minimum confidence
if confidence > minConfidence:
# compute the (x, y)-coordinates of the bounding box for
# the object
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
# update our bounding box results list
boxes.append((startX, startY, endX, endY))
# return the face detection bounding boxes
return boxes
#Loading the CALTECH Faces dataset
def load_face_dataset(inputPath, net, minConfidence=0.5,
minSamples=15):
# grab the paths to all images in our input directory, extract
# the name of the person (i.e., class label) from the directory
# structure, and count the number of example images we have per
# face
imagePaths = list(paths.list_images(inputPath))
names = [p.split(os.path.sep)[-2] for p in imagePaths]
(names, counts) = np.unique(names, return_counts=True)
names = names.tolist()
# initialize lists to store our extracted faces and associated
# labels
faces = []
labels = []
# loop over the image paths
for imagePath in imagePaths:
# load the image from disk and extract the name of the person
# from the subdirectory structure
frame = cv2.imread(imagePath)
name = imagePath.split(os.path.sep)[-2]
# only process images that have a sufficient number of
# examples belonging to the class
if counts[names.index(name)] < minSamples:
continue
# perform face detection
boxes = detect_faces(net, frame, minConfidence)
# loop over the bounding boxes
for (startX, startY, endX, endY) in boxes:
# extract the face ROI, resize it, and convert it to
# grayscale
faceROI = frame[startY:endY, startX:endX]
faceROI = cv2.resize(faceROI, (47, 62))
faceROI = cv2.cvtColor(faceROI, cv2.COLOR_BGR2GRAY)
# update our faces and labels lists
faces.append(faceROI)
labels.append(name)
# convert our faces and labels lists to NumPy arrays
faces = np.array(faces)
labels = np.array(labels)
# return a 2-tuple of the faces and labels
return (faces, labels)
#Implementing Local Binary Patterns for face recognition
# # construct the argument parser and parse the arguments
# ap = argparse.ArgumentParser()
# ap.add_argument("-i", "--input", type=str, required=True,
# help="path to input directory of images")
# ap.add_argument("-f", "--face", type=str,
# default="face_detector",
# help="path to face detector model directory")
# ap.add_argument("-c", "--confidence", type=float, default=0.5,
# help="minimum probability to filter weak detections")
# args = vars(ap.parse_args())
# since we are using Jupyter Notebooks we can replace our argument
# parsing code with *hard coded* arguments and values
args = {
"input": "caltech_faces",
"face": "face_detector",
"confidence": 0.5,
}
# load our serialized face detector model from disk
print("[INFO] loading face detector model...")
prototxtPath = os.path.sep.join([args["face"], "deploy.prototxt"])
weightsPath = os.path.sep.join([args["face"],
"res10_300x300_ssd_iter_140000.caffemodel"])
net = cv2.dnn.readNet(prototxtPath, weightsPath)
# load the CALTECH faces dataset
print("[INFO] loading dataset...")
(faces, labels) = load_face_dataset(args["input"], net,
minConfidence=0.5, minSamples=20)
print("[INFO] {} images in dataset".format(len(faces)))
# encode the string labels as integers
le = LabelEncoder()
labels = le.fit_transform(labels)
# construct our training and testing split
(trainX, testX, trainY, testY) = train_test_split(faces,
labels, test_size=0.25, stratify=labels, random_state=42)
# train our LBP face recognizer
print("[INFO] training face recognizer...")
recognizer = cv2.face.LBPHFaceRecognizer_create(
radius=2, neighbors=16, grid_x=8, grid_y=8)
start = time.time()
recognizer.train(trainX, trainY)
end = time.time()
print("[INFO] training took {:.4f} seconds".format(end - start))
# initialize the list of predictions and confidence scores
print("[INFO] gathering predictions...")
predictions = []
confidence = []
start = time.time()
# loop over the test data
for i in range(0, len(testX)):
# classify the face and update the list of predictions and
# confidence scores
(prediction, conf) = recognizer.predict(testX[i])
predictions.append(prediction)
confidence.append(conf)
# measure how long making predictions took
end = time.time()
print("[INFO] inference took {:.4f} seconds".format(end - start))
# show the classification report
print(classification_report(testY, predictions,
target_names=le.classes_))
# initialize the video stream and allow the cammera sensor to warmup
print("[INFO] starting video stream...")
vs = VideoStream(src=0).start()
time.sleep(2.0)
# loop over the frames from the video stream
while True:
# grab the frame from the threaded video stream and resize it
# to have a maximum width of 400 pixels
face = vs.read()
face = imutils.resize(face, width=400)
# loop over the detections
for i in range(0, detections.shape[2]):
# grab the predicted name and actual name
predName = le.inverse_transform([predictions[i]])[0]
actualName = le.classes_[testY[i]]
# draw the predicted name and actual name on the image
cv2.putText(face, "pred: {}".format(predName), (5, 25),
cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 2)
cv2.putText(face, "actual: {}".format(actualName), (5, 60),
cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), 2)
# display the predicted name, actual name, and confidence of the
# prediction (i.e., chi-squared distance; the *lower* the distance
# is the *more confident* the prediction is)
print("[INFO] prediction: {}, actual: {}, confidence: {:.2f}".format(predName, actualName, confidence[i]))
# show the output frame
cv2.imshow("Face", face)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
You have a for loop without any line of code but a comment just before the line that cause the problem:
# loop over the detections
for i in range(0, detections.shape[2]):
# grab the predicted name and actual name
predName = le.inverse_transform([predictions[i]])[0]
actualName = le.classes_[testY[i]]
The problem comes from this empty loop; if you have a loop, you must have at least one line of code inside. So delete it or add the pass keyword inside.
I am using google collab for this and first of all, make sure you have OpenCV installed. You can install it using pip:
pip install opencv-python
Before detecting the face we should have to open the web camera using google collab.
from IPython.display import display, Javascript
from google.colab.output import eval_js
from base64 import b64decode
def take_photo(filename='photo.jpg', quality=0.8):
js = Javascript('''
async function takePhoto(quality) {
const div = document.createElement('div');
const capture = document.createElement('button');
capture.textContent = 'Capture';
div.appendChild(capture);
const video = document.createElement('video');
video.style.display = 'block';
const stream = await navigator.mediaDevices.getUserMedia({video: true});
document.body.appendChild(div);
div.appendChild(video);
video.srcObject = stream;
await video.play();
// Resize the output to fit the video element. google.colab.output.setIframeHeight(document.documentElement.scrollHeight, true);
// Wait for Capture to be clicked.
await new Promise((resolve) => capture.onclick = resolve);
const canvas = document.createElement('canvas');
canvas.width = video.videoWidth;
canvas.height = video.videoHeight;
canvas.getContext('2d').drawImage(video, 0, 0);
stream.getVideoTracks()[0].stop();
div.remove();
return canvas.toDataURL('image/jpeg', quality);
}
''')
display(js)
data = eval_js('takePhoto({})'.format(quality))
binary = b64decode(data.split(',')[1])
with open(filename, 'wb') as f:
f.write(binary)
return filename
You have to run the below code as the second step.
from IPython.display import Image
try:
filename = take_photo()
print('Saved to {}'.format(filename))
# Show the image which was just taken.
display(Image(filename))
except Exception as err:
# Errors will be thrown if the user does not have a webcam or if they do
not
# grant the page permission to access it.
print(str(err))
After running these two codes, the web camera is opened and you can capture a photo.
The photo is saved as photo.jpg.
Face detection using Haar cascades is a machine learning-based approach where a cascade function is trained with a set of input data. OpenCV already contains many pre-trained classifiers for face, eyes, smiles, etc. Today we will be using the face classifier. You can experiment with other classifiers as well.
I am using PyTorch for object detection and refining an existing model (transfer learning) as described in the following link -
https://pytorch.org/tutorials/intermediate/torchvision_tutorial.html
While different transformations are used for image augmentation (horizontal flip in this tutorial), the tutorial doesnt mention anything on transforming the bounding box/annotation to ensure they are in line with the transformed image. Am I missing something basic here?
In the training phase, the transforms are indeed applied on both images and targets, while loading the data. In the PennFudanDataset class, we have these two lines:
if self.transforms is not None:
img, target = self.transforms(img, target)
where target is a dictionary containing:
target = {}
target["boxes"] = boxes
target["labels"] = labels
target["masks"] = masks
target["image_id"] = image_id
target["area"] = area
target["iscrowd"] = iscrowd
self.transforms() in PennFudanDataset class is set to a list of transforms comprising [transforms.ToTensor(), transforms.Compose()], the return value from get_transform() while instantiating the dataset with:
dataset = PennFudanDataset('PennFudanPed', get_transform(train=True))
The transforms transforms.Compose() comes from T, a custom transform written for object detection task. Specifically, in the __call__ of RandomHorizontalFlip(), we process both the image and target (e.g., mask, keypoints):
For the sake of completeness, I borrow the code from the github repo:
def __call__(self, image, target):
if random.random() < self.prob:
height, width = image.shape[-2:]
image = image.flip(-1)
bbox = target["boxes"]
bbox[:, [0, 2]] = width - bbox[:, [2, 0]]
target["boxes"] = bbox
if "masks" in target:
target["masks"] = target["masks"].flip(-1)
if "keypoints" in target:
keypoints = target["keypoints"]
keypoints = _flip_coco_person_keypoints(keypoints, width)
target["keypoints"] = keypoints
return image, target
Here, we can understand how they perform the flipping on the masks and keypoints in accordance with the image.
I am trying to use human pose estimation through keras implementation. I am using this source https://github.com/michalfaber/keras_Realtime_Multi-Person_Pose_Estimation. My problem is how can I generate the skeleton view of the following image, the one on the left part? However, I can generate the one on the right part.
** Source Photograph taken from Pexels
Below is the code I am using to achieve this.
# vgg normalization (subtracting mean) on input images
model = get_testing_model()
model.load_weights(keras_weights_file)
# load config
params, model_params = config_reader()
input_image = cv2.imread(image_path) # B,G,R order
body_parts, all_peaks, subset, candidate = extract_parts(input_image, params, model, model_params)
canvas = draw(input_image, all_peaks, subset, candidate)
toc = time.time()
print('processing time is %.5f' % (toc - tic))
cv2.imwrite(output, canvas)
cv2.destroyAllWindows()
You need to draw over black image not input image for your requirement. Here below in the updated code.
# vgg normalization (subtracting mean) on input images
model = get_testing_model()
model.load_weights(keras_weights_file)
# load config
params, model_params = config_reader()
input_image = cv2.imread(image_path) # B,G,R order
body_parts, all_peaks, subset, candidate = extract_parts(input_image, params, model, model_params)
black_img = np.zeros_like(input_image, np.uint8)
canvas = draw(black_img, all_peaks, subset, candidate)
toc = time.time()
print('processing time is %.5f' % (toc - tic))
cv2.imwrite(output, canvas)
cv2.destroyAllWindows()
After using TensorFlow's retrain.py
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/image_retraining/retrain.py
I've successfully generated the "retrained_labels.txt" and "retrained_graph.pb" files. For anybody not familiar with this process, I'm essentially following this tutorial:
https://codelabs.developers.google.com/codelabs/tensorflow-for-poets/#0
which is effectively the same steps as this popular video:
https://www.youtube.com/watch?v=QfNvhPx5Px8
After the retraining process, I'm attempting to write a Python script that opens all the images in a test images directory, and successively shows each image in an OpenCV window and also runs TensorFlow to classify the image.
Problem is, I can't seem to work out how to open the image as a NumPy array (this is the format that the Python OpenCV wrapper uses) and then convert it into a format I can pass into TensorFlow's sess.run().
Currently I'm opening the image with cv2.imread() and then opening it again with tf.gfile.FastGFile(). This is a very poor practice; I'd much rather open the image once and then convert it.
Here is the relevant portion of the code where I'm stuck:
# open the image with OpenCV
openCVImage = cv2.imread(imageFileWithPath)
# show the OpenCV image
cv2.imshow(fileName, openCVImage)
# get the final tensor from the graph
finalTensor = sess.graph.get_tensor_by_name('final_result:0')
# open the image in TensorFlow
tfImage = tf.gfile.FastGFile(imageFileWithPath, 'rb').read()
# run the network to get the predictions
predictions = sess.run(finalTensor, {'DecodeJpeg/contents:0': tfImage})
After reading these posts:
How to convert numpy arrays to standard TensorFlow format?
Feeding image data in tensorflow for transfer learning
I've tried the following:
# show the OpenCV image
cv2.imshow(fileName, openCVImage)
# get the final tensor from the graph
finalTensor = sess.graph.get_tensor_by_name('final_result:0')
# convert the NumPy array / OpenCV image to a TensorFlow image
openCVImageAsArray = np.asarray(openCVImage, np.float32)
tfImage = tf.convert_to_tensor(openCVImageAsArray, np.float32)
# run the network to get the predictions
predictions = sess.run(finalTensor, {'DecodeJpeg/contents:0': tfImage})
This results in this error on the sess.run() line:
TypeError: The value of a feed cannot be a tf.Tensor object. Acceptable feed values include Python scalars, strings, lists, numpy ndarrays, or TensorHandles.
I've also tried this:
# show the OpenCV image
cv2.imshow(fileName, openCVImage)
# get the final tensor from the graph
finalTensor = sess.graph.get_tensor_by_name('final_result:0')
# convert the NumPy array / OpenCV image to a TensorFlow image
tfImage = np.array(openCVImage)[:, :, 0:3]
# run the network to get the predictions
predictions = sess.run(finalTensor, {'DecodeJpeg/contents:0': tfImage})
which results in this error:
ValueError: Cannot feed value of shape (257, 320, 3) for Tensor 'DecodeJpeg/contents:0', which has shape '()'
--- EDIT ---
I've also tried this:
# show the OpenCV image
cv2.imshow(fileName, openCVImage)
# get the final tensor from the graph
finalTensor = sess.graph.get_tensor_by_name('final_result:0')
# convert the NumPy array / OpenCV image to a TensorFlow image
tfImage = np.expand_dims(openCVImage, axis=0)
# run the network to get the predictions
predictions = sess.run(finalTensor, feed_dict={finalTensor: tfImage})
which results in this error:
ValueError: Cannot feed value of shape (1, 669, 1157, 3) for Tensor 'final_result:0', which has shape '(?, 2)'
and I've also tried this:
# show the OpenCV image
cv2.imshow(fileName, openCVImage)
# get the final tensor from the graph
finalTensor = sess.graph.get_tensor_by_name('final_result:0')
# convert the NumPy array / OpenCV image to a TensorFlow image
tfImage = np.expand_dims(openCVImage, axis=0)
# run the network to get the predictions
predictions = sess.run(finalTensor, feed_dict={'DecodeJpeg/contents:0': tfImage})
which results in this error:
ValueError: Cannot feed value of shape (1, 669, 1157, 3) for Tensor 'DecodeJpeg/contents:0', which has shape '()'
I'm not sure if this is necessary, but if anyone is curious here is the entire script. Note that this works great except for having to open the image twice:
# test.py
import os
import tensorflow as tf
import numpy as np
import cv2
# module-level variables ##############################################################################################
RETRAINED_LABELS_TXT_FILE_LOC = os.getcwd() + "/" + "retrained_labels.txt"
RETRAINED_GRAPH_PB_FILE_LOC = os.getcwd() + "/" + "retrained_graph.pb"
TEST_IMAGES_DIR = os.getcwd() + "/test_images"
#######################################################################################################################
def main():
# get a list of classifications from the labels file
classifications = []
# for each line in the label file . . .
for currentLine in tf.gfile.GFile(RETRAINED_LABELS_TXT_FILE_LOC):
# remove the carriage return
classification = currentLine.rstrip()
# and append to the list
classifications.append(classification)
# end for
# show the classifications to prove out that we were able to read the label file successfully
print("classifications = " + str(classifications))
# load the graph from file
with tf.gfile.FastGFile(RETRAINED_GRAPH_PB_FILE_LOC, 'rb') as retrainedGraphFile:
# instantiate a GraphDef object
graphDef = tf.GraphDef()
# read in retrained graph into the GraphDef object
graphDef.ParseFromString(retrainedGraphFile.read())
# import the graph into the current default Graph, note that we don't need to be concerned with the return value
_ = tf.import_graph_def(graphDef, name='')
# end with
# if the test image directory listed above is not valid, show an error message and bail
if not os.path.isdir(TEST_IMAGES_DIR):
print("the test image directory does not seem to be a valid directory, check file / directory paths")
return
# end if
with tf.Session() as sess:
# for each file in the test images directory . . .
for fileName in os.listdir(TEST_IMAGES_DIR):
# if the file does not end in .jpg or .jpeg (case-insensitive), continue with the next iteration of the for loop
if not (fileName.lower().endswith(".jpg") or fileName.lower().endswith(".jpeg")):
continue
# end if
# show the file name on std out
print(fileName)
# get the file name and full path of the current image file
imageFileWithPath = os.path.join(TEST_IMAGES_DIR, fileName)
# attempt to open the image with OpenCV
openCVImage = cv2.imread(imageFileWithPath)
# if we were not able to successfully open the image, continue with the next iteration of the for loop
if openCVImage is None:
print("unable to open " + fileName + " as an OpenCV image")
continue
# end if
# show the OpenCV image
cv2.imshow(fileName, openCVImage)
# get the final tensor from the graph
finalTensor = sess.graph.get_tensor_by_name('final_result:0')
# ToDo: find a way to convert from a NumPy array / OpenCV image to a TensorFlow image
# instead of opening the file twice, these attempts don't work
# attempt 1:
# openCVImageAsArray = np.asarray(openCVImage, np.float32)
# tfImage = tf.convert_to_tensor(openCVImageAsArray, np.float32)
# attempt 2:
# tfImage = np.array(openCVImage)[:, :, 0:3]
# open the image in TensorFlow
tfImage = tf.gfile.FastGFile(imageFileWithPath, 'rb').read()
# run the network to get the predictions
predictions = sess.run(finalTensor, {'DecodeJpeg/contents:0': tfImage})
# sort predictions from most confidence to least confidence
sortedPredictions = predictions[0].argsort()[-len(predictions[0]):][::-1]
print("---------------------------------------")
# keep track of if we're going through the next for loop for the first time so we can show more info about
# the first prediction, which is the most likely prediction (they were sorted descending above)
onMostLikelyPrediction = True
# for each prediction . . .
for prediction in sortedPredictions:
strClassification = classifications[prediction]
# if the classification (obtained from the directory name) ends with the letter "s", remove the "s" to change from plural to singular
if strClassification.endswith("s"):
strClassification = strClassification[:-1]
# end if
# get confidence, then get confidence rounded to 2 places after the decimal
confidence = predictions[0][prediction]
# if we're on the first (most likely) prediction, state what the object appears to be and show a % confidence to two decimal places
if onMostLikelyPrediction:
scoreAsAPercent = confidence * 100.0
print("the object appears to be a " + strClassification + ", " + "{0:.2f}".format(scoreAsAPercent) + "% confidence")
onMostLikelyPrediction = False
# end if
# for any prediction, show the confidence as a ratio to five decimal places
print(strClassification + " (" + "{0:.5f}".format(confidence) + ")")
# end for
# pause until a key is pressed so the user can see the current image (shown above) and the prediction info
cv2.waitKey()
# after a key is pressed, close the current window to prep for the next time around
cv2.destroyAllWindows()
# end for
# end with
# write the graph to file so we can view with TensorBoard
tfFileWriter = tf.summary.FileWriter(os.getcwd())
tfFileWriter.add_graph(sess.graph)
tfFileWriter.close()
# end main
#######################################################################################################################
if __name__ == "__main__":
main()
You were pretty close:
{'DecodeJpeg/contents:0': tfImage} decodes a binary jpeg image.
You need to use {'DecodeJpeg:0': tfImage} instead if the image is already decoded.
Read more here
So your code should look like this:
tfImage = np.array(openCVImage)[:, :, 0:3]
# run the network to get the predictions
predictions = sess.run(finalTensor, {'DecodeJpeg:0': tfImage})