I am a novice in Computer Vision. I am trying to implement Real Time Face Recognition with Local Binary Patterns with its Face Detection part based on Deep Learning dnn module. I am using the caltech_faces dataset and have added a folder with my 20 photos to it.
So, this is my code. I basically transformed the code of the Face Recognition of sample images to a Real Time Face Recognition by making some changes and additions.
I get the following error when executing the below code:
predName = le.inverse_transform([predictions[i]])[0]
^
TabError: inconsistent use of tabs and spaces in indentation
I checked all the tabs and indentations, and cant find what and where to fix. I kindly ask you to give me a hint on what to do. Thank you very much!
# import the necessary packages
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from imutils.video import VideoStream
from imutils import paths
import matplotlib.pyplot as plt
import numpy as np
import argparse
import imutils
import time
import cv2
import os
#Creating our face detector
def detect_faces(net, frame, minConfidence=0.5):
# grab the dimensions of the image and then construct a blob
# from it
(h, w) = frame.shape[:2]
blob = cv2.dnn.blobFromImage(frame, 1.0, (300, 300),
(104.0, 177.0, 123.0))
# pass the blob through the network to obtain the face detections,
# then initialize a list to store the predicted bounding boxes
net.setInput(blob)
detections = net.forward()
boxes = []
# loop over the detections
for i in range(0, detections.shape[2]):
# extract the confidence (i.e., probability) associated with
# the detection
confidence = detections[0, 0, i, 2]
# filter out weak detections by ensuring the confidence is
# greater than the minimum confidence
if confidence > minConfidence:
# compute the (x, y)-coordinates of the bounding box for
# the object
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
# update our bounding box results list
boxes.append((startX, startY, endX, endY))
# return the face detection bounding boxes
return boxes
#Loading the CALTECH Faces dataset
def load_face_dataset(inputPath, net, minConfidence=0.5,
minSamples=15):
# grab the paths to all images in our input directory, extract
# the name of the person (i.e., class label) from the directory
# structure, and count the number of example images we have per
# face
imagePaths = list(paths.list_images(inputPath))
names = [p.split(os.path.sep)[-2] for p in imagePaths]
(names, counts) = np.unique(names, return_counts=True)
names = names.tolist()
# initialize lists to store our extracted faces and associated
# labels
faces = []
labels = []
# loop over the image paths
for imagePath in imagePaths:
# load the image from disk and extract the name of the person
# from the subdirectory structure
frame = cv2.imread(imagePath)
name = imagePath.split(os.path.sep)[-2]
# only process images that have a sufficient number of
# examples belonging to the class
if counts[names.index(name)] < minSamples:
continue
# perform face detection
boxes = detect_faces(net, frame, minConfidence)
# loop over the bounding boxes
for (startX, startY, endX, endY) in boxes:
# extract the face ROI, resize it, and convert it to
# grayscale
faceROI = frame[startY:endY, startX:endX]
faceROI = cv2.resize(faceROI, (47, 62))
faceROI = cv2.cvtColor(faceROI, cv2.COLOR_BGR2GRAY)
# update our faces and labels lists
faces.append(faceROI)
labels.append(name)
# convert our faces and labels lists to NumPy arrays
faces = np.array(faces)
labels = np.array(labels)
# return a 2-tuple of the faces and labels
return (faces, labels)
#Implementing Local Binary Patterns for face recognition
# # construct the argument parser and parse the arguments
# ap = argparse.ArgumentParser()
# ap.add_argument("-i", "--input", type=str, required=True,
# help="path to input directory of images")
# ap.add_argument("-f", "--face", type=str,
# default="face_detector",
# help="path to face detector model directory")
# ap.add_argument("-c", "--confidence", type=float, default=0.5,
# help="minimum probability to filter weak detections")
# args = vars(ap.parse_args())
# since we are using Jupyter Notebooks we can replace our argument
# parsing code with *hard coded* arguments and values
args = {
"input": "caltech_faces",
"face": "face_detector",
"confidence": 0.5,
}
# load our serialized face detector model from disk
print("[INFO] loading face detector model...")
prototxtPath = os.path.sep.join([args["face"], "deploy.prototxt"])
weightsPath = os.path.sep.join([args["face"],
"res10_300x300_ssd_iter_140000.caffemodel"])
net = cv2.dnn.readNet(prototxtPath, weightsPath)
# load the CALTECH faces dataset
print("[INFO] loading dataset...")
(faces, labels) = load_face_dataset(args["input"], net,
minConfidence=0.5, minSamples=20)
print("[INFO] {} images in dataset".format(len(faces)))
# encode the string labels as integers
le = LabelEncoder()
labels = le.fit_transform(labels)
# construct our training and testing split
(trainX, testX, trainY, testY) = train_test_split(faces,
labels, test_size=0.25, stratify=labels, random_state=42)
# train our LBP face recognizer
print("[INFO] training face recognizer...")
recognizer = cv2.face.LBPHFaceRecognizer_create(
radius=2, neighbors=16, grid_x=8, grid_y=8)
start = time.time()
recognizer.train(trainX, trainY)
end = time.time()
print("[INFO] training took {:.4f} seconds".format(end - start))
# initialize the list of predictions and confidence scores
print("[INFO] gathering predictions...")
predictions = []
confidence = []
start = time.time()
# loop over the test data
for i in range(0, len(testX)):
# classify the face and update the list of predictions and
# confidence scores
(prediction, conf) = recognizer.predict(testX[i])
predictions.append(prediction)
confidence.append(conf)
# measure how long making predictions took
end = time.time()
print("[INFO] inference took {:.4f} seconds".format(end - start))
# show the classification report
print(classification_report(testY, predictions,
target_names=le.classes_))
# initialize the video stream and allow the cammera sensor to warmup
print("[INFO] starting video stream...")
vs = VideoStream(src=0).start()
time.sleep(2.0)
# loop over the frames from the video stream
while True:
# grab the frame from the threaded video stream and resize it
# to have a maximum width of 400 pixels
face = vs.read()
face = imutils.resize(face, width=400)
# loop over the detections
for i in range(0, detections.shape[2]):
# grab the predicted name and actual name
predName = le.inverse_transform([predictions[i]])[0]
actualName = le.classes_[testY[i]]
# draw the predicted name and actual name on the image
cv2.putText(face, "pred: {}".format(predName), (5, 25),
cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 2)
cv2.putText(face, "actual: {}".format(actualName), (5, 60),
cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), 2)
# display the predicted name, actual name, and confidence of the
# prediction (i.e., chi-squared distance; the *lower* the distance
# is the *more confident* the prediction is)
print("[INFO] prediction: {}, actual: {}, confidence: {:.2f}".format(predName, actualName, confidence[i]))
# show the output frame
cv2.imshow("Face", face)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
You have a for loop without any line of code but a comment just before the line that cause the problem:
# loop over the detections
for i in range(0, detections.shape[2]):
# grab the predicted name and actual name
predName = le.inverse_transform([predictions[i]])[0]
actualName = le.classes_[testY[i]]
The problem comes from this empty loop; if you have a loop, you must have at least one line of code inside. So delete it or add the pass keyword inside.
I am using google collab for this and first of all, make sure you have OpenCV installed. You can install it using pip:
pip install opencv-python
Before detecting the face we should have to open the web camera using google collab.
from IPython.display import display, Javascript
from google.colab.output import eval_js
from base64 import b64decode
def take_photo(filename='photo.jpg', quality=0.8):
js = Javascript('''
async function takePhoto(quality) {
const div = document.createElement('div');
const capture = document.createElement('button');
capture.textContent = 'Capture';
div.appendChild(capture);
const video = document.createElement('video');
video.style.display = 'block';
const stream = await navigator.mediaDevices.getUserMedia({video: true});
document.body.appendChild(div);
div.appendChild(video);
video.srcObject = stream;
await video.play();
// Resize the output to fit the video element. google.colab.output.setIframeHeight(document.documentElement.scrollHeight, true);
// Wait for Capture to be clicked.
await new Promise((resolve) => capture.onclick = resolve);
const canvas = document.createElement('canvas');
canvas.width = video.videoWidth;
canvas.height = video.videoHeight;
canvas.getContext('2d').drawImage(video, 0, 0);
stream.getVideoTracks()[0].stop();
div.remove();
return canvas.toDataURL('image/jpeg', quality);
}
''')
display(js)
data = eval_js('takePhoto({})'.format(quality))
binary = b64decode(data.split(',')[1])
with open(filename, 'wb') as f:
f.write(binary)
return filename
You have to run the below code as the second step.
from IPython.display import Image
try:
filename = take_photo()
print('Saved to {}'.format(filename))
# Show the image which was just taken.
display(Image(filename))
except Exception as err:
# Errors will be thrown if the user does not have a webcam or if they do
not
# grant the page permission to access it.
print(str(err))
After running these two codes, the web camera is opened and you can capture a photo.
The photo is saved as photo.jpg.
Face detection using Haar cascades is a machine learning-based approach where a cascade function is trained with a set of input data. OpenCV already contains many pre-trained classifiers for face, eyes, smiles, etc. Today we will be using the face classifier. You can experiment with other classifiers as well.
Related
This code generates error:
IndexError: invalid index to scalar variable.
at the line: results.append(RMSPE(np.expm1(y_train[testcv]), [y[1] for y in y_test]))
How to fix it?
import pandas as pd
import numpy as np
from sklearn import ensemble
from sklearn import cross_validation
def ToWeight(y):
w = np.zeros(y.shape, dtype=float)
ind = y != 0
w[ind] = 1./(y[ind]**2)
return w
def RMSPE(y, yhat):
w = ToWeight(y)
rmspe = np.sqrt(np.mean( w * (y - yhat)**2 ))
return rmspe
forest = ensemble.RandomForestRegressor(n_estimators=10, min_samples_split=2, n_jobs=-1)
print ("Cross validations")
cv = cross_validation.KFold(len(train), n_folds=5)
results = []
for traincv, testcv in cv:
y_test = np.expm1(forest.fit(X_train[traincv], y_train[traincv]).predict(X_train[testcv]))
results.append(RMSPE(np.expm1(y_train[testcv]), [y[1] for y in y_test]))
testcv is:
[False False False ..., True True True]
You are trying to index into a scalar (non-iterable) value:
[y[1] for y in y_test]
# ^ this is the problem
When you call [y for y in test] you are iterating over the values already, so you get a single value in y.
Your code is the same as trying to do the following:
y_test = [1, 2, 3]
y = y_test[0] # y = 1
print(y[0]) # this line will fail
I'm not sure what you're trying to get into your results array, but you need to get rid of [y[1] for y in y_test].
If you want to append each y in y_test to results, you'll need to expand your list comprehension out further to something like this:
[results.append(..., y) for y in y_test]
Or just use a for loop:
for y in y_test:
results.append(..., y)
YOLO Object Detection
layer_names = net.getLayerNames() output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]
Don't need to indexing i in layer_names[i[0] - 1] . Just remove it and do layer_names[i - 1]
layer_names = net.getLayerNames() output_layers = [layer_names[i - 1] for i in net.getUnconnectedOutLayers()]
It Work For Me
YOLO Object Detection
python <= 3.7
ln = net.getLayerNames()
ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]
python >3.7
ln = net.getLayerNames()
ln = [ln[i - 1] for i in net.getUnconnectedOutLayers()]
Basically, 1 is not a valid index of y. If the visitor is coming from his own code he should check if his y contains the index which he tries to access (in this case the index is 1).
In the for, you have an iteration, then for each element of that loop which probably is a scalar, has no index. When each element is an empty array, single variable, or scalar and not a list or array you cannot use indices.
Editing the yolo_video.py file in repo is required for those who are using darknet code.`This file works, replaced with required edits
# import the necessary packages
import numpy as np
import argparse
import imutils
import time
import cv2
import os
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--input", required=True,
help="path to input video")
ap.add_argument("-o", "--output", required=True,
help="path to output video")
ap.add_argument("-y", "--yolo", required=True,
help="base path to YOLO directory")
ap.add_argument("-c", "--confidence", type=float, default=0.5,
help="minimum probability to filter weak detections")
ap.add_argument("-t", "--threshold", type=float, default=0.3,
help="threshold when applyong non-maxima suppression")
args = vars(ap.parse_args())
# load the COCO class labels our YOLO model was trained on
labelsPath = os.path.sep.join([args["yolo"], "biscuits.names"])
LABELS = open(labelsPath).read().strip().split("\n")
# initialize a list of colors to represent each possible class label
np.random.seed(42)
COLORS = np.random.randint(0, 255, size=(len(LABELS), 3),
dtype="uint8")
# derive the paths to the YOLO weights and model configuration
weightsPath = os.path.sep.join([args["yolo"], "yolov4-custom_best.weights"])
configPath = os.path.sep.join([args["yolo"], "yolov4-custom.cfg"])
# load our YOLO object detector trained on COCO dataset (80 classes)
# and determine only the *output* layer names that we need from YOLO
print("[INFO] loading YOLO from disk...")
net = cv2.dnn.readNetFromDarknet(configPath, weightsPath)
ln = net.getLayerNames()
print("ln",net)
ln = [ln[i - 1] for i in net.getUnconnectedOutLayers()]
# initialize the video stream, pointer to output video file, and
# frame dimensions
vs = cv2.VideoCapture(args["input"])
writer = None
(W, H) = (None, None)
# try to determine the total number of frames in the video file
try:
prop = cv2.cv.CV_CAP_PROP_FRAME_COUNT if imutils.is_cv2()\
else cv2.CAP_PROP_FRAME_COUNT
total = int(vs.get(prop))
print("[INFO] {} total frames in video".format(total))
# an error occurred while trying to determine the total
# number of frames in the video file
except:
print("[INFO] could not determine # of frames in video")
print("[INFO] no approx. completion time can be provided")
total = -1
# loop over frames from the video file stream
while True:
# read the next frame from the file
(grabbed, frame) = vs.read()
# if the frame was not grabbed, then we have reached the end
# of the stream
if not grabbed:
break
# if the frame dimensions are empty, grab them
if W is None or H is None:
(H, W) = frame.shape[:2]
# construct a blob from the input frame and then perform a forward
# pass of the YOLO object detector, giving us our bounding boxes
# and associated probabilities
blob = cv2.dnn.blobFromImage(frame, 1 / 255.0, (416, 416),
swapRB=True, crop=False)
net.setInput(blob)
start = time.time()
layerOutputs = net.forward(ln)
end = time.time()
# initialize our lists of detected bounding boxes, confidences,
# and class IDs, respectively
boxes = []
confidences = []
classIDs = []
# loop over each of the layer outputs
for output in layerOutputs:
# loop over each of the detections
for detection in output:
# extract the class ID and confidence (i.e., probability)
# of the current object detection
scores = detection[5:]
classID = np.argmax(scores)
confidence = scores[classID]
# filter out weak predictions by ensuring the detected
# probability is greater than the minimum probability
if confidence > args["confidence"]:
# scale the bounding box coordinates back relative to
# the size of the image, keeping in mind that YOLO
# actually returns the center (x, y)-coordinates of
# the bounding box followed by the boxes' width and
# height
box = detection[0:4] * np.array([W, H, W, H])
(centerX, centerY, width, height) = box.astype("int")
# use the center (x, y)-coordinates to derive the top
# and and left corner of the bounding box
x = int(centerX - (width / 2))
y = int(centerY - (height / 2))
# update our list of bounding box coordinates,
# confidences, and class IDs
boxes.append([x, y, int(width), int(height)])
confidences.append(float(confidence))
classIDs.append(classID)
# apply non-maxima suppression to suppress weak, overlapping
# bounding boxes
idxs = cv2.dnn.NMSBoxes(boxes, confidences, args["confidence"],
args["threshold"])
# ensure at least one detection exists
if len(idxs) > 0:
# loop over the indexes we are keeping
for i in idxs.flatten():
# extract the bounding box coordinates
(x, y) = (boxes[i][0], boxes[i][1])
(w, h) = (boxes[i][2], boxes[i][3])
# draw a bounding box rectangle and label on the frame
color = [int(c) for c in COLORS[classIDs[i]]]
cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)
text = "{}: {:.4f}".format(LABELS[classIDs[i]],
confidences[i])
cv2.putText(frame, text, (x, y - 5),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
# check if the video writer is None
if writer is None:
# initialize our video writer
fourcc = cv2.VideoWriter_fourcc(*"MJPG")
writer = cv2.VideoWriter(args["output"], fourcc, 30,
(frame.shape[1], frame.shape[0]), True)
# some information on processing single frame
if total > 0:
elap = (end - start)
print("[INFO] single frame took {:.4f} seconds".format(elap))
print("[INFO] estimated total time to finish: {:.4f}".format(
elap * total))
# write the output frame to disk
writer.write(frame)
# release the file pointers
print("[INFO] cleaning up...")
writer.release()
vs.release()`
I have a face recognition project using features extraction.
I want to generate the embedding vector using DeepFace or VGGFace instead of ResNet-34 without changing the whole scripts. Any help will be appreciated
I've written a script (encoding.py) doing features extraction based on ResNet-34. The script loops throw images in the train-set and extract the encoding (a 128-d vector) and the name of each image from the train set, then put them in a dictionary.
And when executing this script we generate a file (encodings.pickle) that contains our dictionary.
The script is below:
from imutils import paths
import face_recognition
import argparse
import pickle
import cv2
import os
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--dataset", required=True,
help="path to input directory of faces + images")
ap.add_argument("-e", "--encodings", required=True,
help="path to serialized db of facial encodings")
ap.add_argument("-d", "--detection-method", type=str, default="cnn",
help="face detection model to use: either `hog` or `cnn`")
args = vars(ap.parse_args())
# grab the paths to the input images in our dataset
print("[INFO] quantifying faces...")
imagePaths = list(paths.list_images(args["dataset"]))
# initialize the list of known encodings and known names
knownEncodings = []
knownNames = []
# loop over the image paths
for (i, imagePath) in enumerate(imagePaths):
# extract the person name from the image path
print("[INFO] processing image {}/{}".format(i + 1,
len(imagePaths)))
name = imagePath.split(os.path.sep)[-2]
# load the input image and convert it from BGR (OpenCV ordering)
# to dlib ordering (RGB)
image = cv2.imread(imagePath)
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# detect the (x, y)-coordinates of the bounding boxes
# corresponding to each face in the input image
boxes = face_recognition.face_locations(rgb,
model=args["detection_method"])
# compute the facial embedding for the face
encodings = face_recognition.face_encodings(rgb, boxes)
# loop over the encodings
for encoding in encodings:
# add each encoding + name to our set of known names and
# encodings
knownEncodings.append(encoding)
knownNames.append(name)
# dump the facial encodings + names to disk
print("[INFO] serializing encodings...")
data = {"encodings": knownEncodings, "names": knownNames}
f = open(args["encodings"], "wb")
f.write(pickle.dumps(data))
f.close()
And to make predictions I've used another script that loads a single image then do the encoding part and make a comparison between its encoding and the file generated before.
The 2nd script is below:
# import the necessary packages
import face_recognition
import argparse
import pickle
import cv2
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-e", "--encodings", required=True,
help="path to serialized db of facial encodings")
ap.add_argument("-i", "--image", required=True,
help="path to input image")
ap.add_argument("-d", "--detection-method", type=str, default="hog",
help="face detection model to use: either `hog` or `cnn`")
args = vars(ap.parse_args())
# load the known faces and embeddings
print("[INFO] loading encodings...")
data = pickle.loads(open(args["encodings"], "rb").read())
# load the input image and convert it from BGR to RGB
image = cv2.imread(args["image"])
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# detect the (x, y)-coordinates of the bounding boxes corresponding
# to each face in the input image, then compute the facial embeddings
# for each face
print("[INFO] recognizing faces...")
boxes = face_recognition.face_locations(rgb,
model=args["detection_method"])
encodings = face_recognition.face_encodings(rgb, boxes)
# initialize the list of names for each face detected
names = []
# loop over the facial embeddings
for encoding in encodings:
# attempt to match each face in the input image to our known
# encodings
matches = face_recognition.compare_faces(data["encodings"],
encoding)
name = "Unknown"
# check to see if we have found a match
if True in matches:
# find the indexes of all matched faces then initialize a
# dictionary to count the total number of times each face
# was matched
matchedIdxs = [i for (i, b) in enumerate(matches) if b]
counts = {}
# loop over the matched indexes and maintain a count for
# each recognized face face
for i in matchedIdxs:
name = data["names"][i]
counts[name] = counts.get(name, 0) + 1
# determine the recognized face with the largest number of
# votes (note: in the event of an unlikely tie Python will
# select first entry in the dictionary)
name = max(counts, key=counts.get)
# update the list of names
names.append(name)
print(names)
# loop over the recognized faces
for ((top, right, bottom, left), name) in zip(boxes, names):
# draw the predicted face name on the image
cv2.rectangle(image, (left, top), (right, bottom), (0, 255, 0), 2)
y = top - 15 if top - 15 > 15 else top + 15
cv2.putText(image, name, (left, y), cv2.FONT_HERSHEY_SIMPLEX,
0.75, (0, 255, 0), 2)
# show the output image
cv2.imshow("Image", image)
cv2.waitKey(0)
DeepFace has a represent function in its interface.
from deepface import DeepFace
img_path = "img.jpg"
models = ["VGG-Face", "Facenet", "OpenFace", "DeepFace", "Dlib", "ArcFace"]
embedding = DeepFace.represent(img_path, model_name = models[0])
If you already read an image, you can pass it directly to the deepface instead of passing its exact path.
image = cv2.imread(imagePath)
embedding = DeepFace.represent(image, model_name = models[0])
I made a convolutional neural network, that predicts faces and returns coordinates (y1, x1, y2, x2). Iam able to create rectangle that serves as mask that covers the desired coordinates. I need a way to cover the images in real time. Is there a way to get live image sequence without saving the frames, just overwriting them, and how do i extract the coordinates in openCV? I was using pyplot and was saving the images, it is slow and ineffective.
Yeah, so I managed to come up with a solution, but I found out that, 1 frame takes about 0.54s to compute, so 2FPS, not great for live streaming, so I am switching to haarcascade.
Code below is used to configure and call the model.
from numpy import expand_dims
from mrcnn.config import Config
from mrcnn.model import MaskRCNN
from mrcnn.model import mold_image
import cv2
import time
# define the prediction configuration
class PredictionConfig(Config):
# define the name of the configuration
NAME = "face_cfg"
# number of classes (background + face)
NUM_CLASSES = 1 + 1
# simplify GPU config
GPU_COUNT = 1
IMAGES_PER_GPU = 1
def classify_image(image,model,cfg):
# convert pixel values (e.g. center)
scaled_image = mold_image(image, cfg)
# convert image into one sample
sample = expand_dims(scaled_image, 0)
# make prediction
tic = time.time()
yhat = model.detect(sample, verbose=0)[0]
print(time.time() - tic)
return yhat['rois']
def image_bnd_highlight(image,coordinates):
for box in coordinates:
# get coordinates
y1, x1, y2, x2 = box
# create the shape
new_img = cv2.rectangle(image,(x1,y1),(x2,y2),(255,255,255),5)
return new_img
# create config
cfg: PredictionConfig = PredictionConfig()
# define the model
model = MaskRCNN(mode='inference', model_dir='./', config=cfg)
# load model weights
model_path = 'mask_rcnn_face_cfg_0029.h5'
model.load_weights(model_path, by_name=True)
definitive_model = model
Then I call my functions, that I created above.
import cv2 as cv
import acapture
from RealTime import definitive_model
from RealTime import cfg
from RealTime import classify_image
from RealTime import image_bnd_highlight
import time
# cap = acapture.open(0)
cap = cv.VideoCapture(0)
cap.set(3,128) #set frame width
cap.set(4,128) #set frame height
cap.set(cv.CAP_PROP_FPS, 2) #adjusting fps to 2
# cap.set(cv.CAP_PROP_BUFFERSIZE,3)
# if not cap.isOpened():
# print("Cannot open camera")
# exit()
while True:
# Capture frame-by-frame
ret, frame = cap.read()
# if frame is read correctly ret is True
if not ret:
print("Can't receive frame (stream end?). Exiting ...")
break
# let's resize our image to be 150 pixels wide, but in order to
# prevent our resized image from being skewed/distorted, we must
# first calculate the ratio of the *new* width to the *old* width
r = 150.0 / frame.shape[1]
dim = (150, int(frame.shape[0] * r))
# perform the actual resizing of the image
resized = cv.resize(frame, dim, interpolation=cv.INTER_AREA)
# tic = time.time()
coords = classify_image(resized,definitive_model,cfg)
# print(time.time() - tic)
image = image_bnd_highlight(resized,coords)
# Display the resulting frame
cv.imshow('frame', image)
if cv.waitKey(1) == ord('q'):
break
# When everything done, release the capture
cap.release()
cv.destroyAllWindows()
I am trying to use human pose estimation through keras implementation. I am using this source https://github.com/michalfaber/keras_Realtime_Multi-Person_Pose_Estimation. My problem is how can I generate the skeleton view of the following image, the one on the left part? However, I can generate the one on the right part.
** Source Photograph taken from Pexels
Below is the code I am using to achieve this.
# vgg normalization (subtracting mean) on input images
model = get_testing_model()
model.load_weights(keras_weights_file)
# load config
params, model_params = config_reader()
input_image = cv2.imread(image_path) # B,G,R order
body_parts, all_peaks, subset, candidate = extract_parts(input_image, params, model, model_params)
canvas = draw(input_image, all_peaks, subset, candidate)
toc = time.time()
print('processing time is %.5f' % (toc - tic))
cv2.imwrite(output, canvas)
cv2.destroyAllWindows()
You need to draw over black image not input image for your requirement. Here below in the updated code.
# vgg normalization (subtracting mean) on input images
model = get_testing_model()
model.load_weights(keras_weights_file)
# load config
params, model_params = config_reader()
input_image = cv2.imread(image_path) # B,G,R order
body_parts, all_peaks, subset, candidate = extract_parts(input_image, params, model, model_params)
black_img = np.zeros_like(input_image, np.uint8)
canvas = draw(black_img, all_peaks, subset, candidate)
toc = time.time()
print('processing time is %.5f' % (toc - tic))
cv2.imwrite(output, canvas)
cv2.destroyAllWindows()
I'm using OpenCV + Python to apply a deep learning model and classify objects in 8 categories (animal types) namely, cat, dog, horse, deer, bear, lizard, monkey, no object detected (when the is no object detected in the image).
I have a folder that has images of all types of animals in it. I read all the images in one folder and then I apply the deep learning model to extract bounding box coordinates of each object in each image.
I want to first categorize each image by putting each type of animal image in the related folder. second, save the coordinate of the bounding box of that image in the same folder. For example if the network detected cat, I want to save that image and corresponding coordinates(as a text file .text) in the cat folder and if it did not find any of those objects in the image just put it in the no object detected folder.
My question is how can I save the original image and the bounding box coordinates of that object inside the 8 category folder?
here is my code:
import cv2
import numpy as np
import os
import glob
import argparse
import time
img_dir="/path/imgt/"
data_path=os.path.join(img_dir,'*g')
files=glob.glob(data_path)
data=[]
i = 0
for f1 in files:
image=cv2.imread(f1)
data.append(image)
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True,
help="path to input image")
ap.add_argument("-y", "--yolo", required=True,
help="base path to YOLO directory")
ap.add_argument("-c", "--confidence", type=float, default=0.5,
help="minimum probability to filter weak detections")
ap.add_argument("-t", "--threshold", type=float, default=0.3,
help="threshold when applyong non-maxima suppression")
args = vars(ap.parse_args())
# load the COCO class labels our YOLO model was trained on
labelsPath = os.path.sep.join([args["yolo"], "obj.names"])
LABELS = open(labelsPath).read().strip().split("\n")
# initialize a list of colors to represent each possible class label
np.random.seed(42)
COLORS = np.random.randint(0, 255, size=(len(LABELS), 3),
dtype="uint8")
# derive the paths to the YOLO weights and model configuration
weightsPath = os.path.sep.join([args["yolo"], "yolo-obj_last.weights"])
configPath = os.path.sep.join([args["yolo"], "yolo-obj.cfg"])
# load our YOLO object detector trained on COCO dataset (80 classes)
print("[INFO] loading YOLO from disk...")
net = cv2.dnn.readNetFromDarknet(configPath, weightsPath)
# load our input image and grab its spatial dimensions
# image = cv2.imread(args["image"])
(H, W) = image.shape[:2]
# determine only the *output* layer names that we need from YOLO
ln = net.getLayerNames()
ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]
# construct a blob from the input image and then perform a forward
# pass of the YOLO object detector, giving us our bounding boxes and
# associated probabilities
blob = cv2.dnn.blobFromImage(image, 1 / 255.0, (416, 416),
swapRB=True, crop=False)
net.setInput(blob)
start = time.time()
layerOutputs = net.forward(ln)
end = time.time()
# show timing information on YOLO
print("[INFO] YOLO took {:.6f} seconds".format(end - start))
# initialize our lists of detected bounding boxes, confidences, and
# class IDs, respectively
boxes = []
confidences = []
classIDs = []
# loop over each of the layer outputs
for output in layerOutputs:
# loop over each of the detections
for detection in output:
# extract the class ID and confidence (i.e., probability) of
# the current object detection
scores = detection[5:]
classID = np.argmax(scores)
confidence = scores[classID]
# filter out weak predictions by ensuring the detected
# probability is greater than the minimum probability
if confidence > args["confidence"]:
# scale the bounding box coordinates back relative to the
# size of the image, keeping in mind that YOLO actually
# returns the center (x, y)-coordinates of the bounding
# box followed by the boxes' width and height
box = detection[0:4] * np.array([W, H, W, H])
(centerX, centerY, width, height) = box.astype("int")
# use the center (x, y)-coordinates to derive the top and
# and left corner of the bounding box
x = int(centerX - (width / 2))
y = int(centerY - (height / 2))
# update our list of bounding box coordinates, confidences,
# and class IDs
boxes.append([x, y, int(width), int(height)])
confidences.append(float(confidence))
classIDs.append(classID)
# apply non-maxima suppression to suppress weak, overlapping bounding
# boxes
idxs = cv2.dnn.NMSBoxes(boxes, confidences, args["confidence"],
args["threshold"])
# ensure at least one detection exists
if len(idxs) > 0:
# loop over the indexes we are keeping
for i in idxs.flatten():
# extract the bounding box coordinates
(x, y) = (boxes[i][0], boxes[i][1])
(w, h) = (boxes[i][2], boxes[i][3])
# draw a bounding box rectangle and label on the image
color = [int(c) for c in COLORS[classIDs[i]]]
cv2.rectangle(image, (x, y), (x + w, y + h), color, 2)
text = "{}: {:.4f}".format(LABELS[classIDs[i]], confidences[i])
cv2.putText(image, text, (x, y - 7), cv2.FONT_HERSHEY_SIMPLEX,0.6, color, 2)
path = '/path/imgr/' + LABELS[classIDs[i]] + '/'
cv2.imwrite(os.path.join(path, 'image' + str(i) + '.jpg'), image)
with open(os.path.join(path, 'image' + str(i) + '.txt'), 'a+') as f:
f.write(str(classIDs[i]) + ' ' + str(x) + ' ' + str(y) + ' ' + str(w) + ' ' + str(h))
how does the text file look like?
.txt -file for each .jpg-image-file - in the same directory and with the same name, but with .txt-extension, and put to file: object number and object coordinates on this image, for each object in new line: <object-class> <x> <y> <width> <height>
Where:
<object-class> - integer number of object from 0 to (classes-1)
<x> <y> <width> <height> - float values relative to width and height of image, it can be equal from (0.0 to 1.0]
for example: <x> = <absolute_x> / <image_width> or <height> = <absolute_height> / <image_height>
atention: <x> <y> - are center of rectangle (are not top-left corner)
For example for img1.jpg you will be created img1.txt containing:
1 0.716797 0.395833 0.216406 0.147222
0 0.687109 0.379167 0.255469 0.158333
1 0.420312 0.395833 0.140625 0.166667
Maybe something like this:
path = os.path.join('/path/imgr/', LABELS[classID], image_name)
cv2.imwrite(path + '.jpg', image)
with open(path + '.txt'), 'a+') as f:
f.write(str(classID) + ' ' + str(detection[0]) + ' ' + str(detection[1]) + ' ' + str(detection[2]) + ' ' + str(detection[3]) + '\n')
You may have multiple objects in an image, in which case it should write to each of the relevant folders and append to the text file if it exists.
image_name will be something you generate, you can use the name that you are reading in, or a counter.
This snippet should go somewhere under the if statement:
if confidence > args["confidence"]:
I would put it at the end. You may need to make minor adjustments, but that is the gist.
More explicitly:
import cv2
import numpy as np
import os
import glob
import argparse
import time
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
#ap.add_argument("-i", "--image", required=True,
# help="path to input image")
ap.add_argument("-y", "--yolo", required=True,
help="base path to YOLO directory")
ap.add_argument("-c", "--confidence", type=float, default=0.5,
help="minimum probability to filter weak detections")
ap.add_argument("-t", "--threshold", type=float, default=0.3,
help="threshold when applyong non-maxima suppression")
args = vars(ap.parse_args())
# load the COCO class labels our YOLO model was trained on
labelsPath = os.path.sep.join([args["yolo"], "obj.names"])
LABELS = open(labelsPath).read().strip().split("\n")
# derive the paths to the YOLO weights and model configuration
weightsPath = os.path.sep.join([args["yolo"], "yolo-obj_last.weights"])
configPath = os.path.sep.join([args["yolo"], "yolo-obj.cfg"])
# load our YOLO object detector trained on COCO dataset (80 classes)
print("[INFO] loading YOLO from disk...")
net = cv2.dnn.readNetFromDarknet(configPath, weightsPath)
# determine only the *output* layer names that we need from YOLO
ln = net.getLayerNames()
ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]
img_dir="/path/imgt/"
data_path=os.path.join(img_dir,'*g')
files=glob.glob(data_path)
for f1 in files:
# load our input image and grab its spatial dimensions
image=cv2.imread(f1)
# construct a blob from the input image and then perform a forward
# pass of the YOLO object detector, giving us our bounding boxes and
# associated probabilities
blob = cv2.dnn.blobFromImage(image, 1 / 255.0, (416, 416),
swapRB=True, crop=False)
net.setInput(blob)
layerOutputs = net.forward(ln)
# loop over each of the layer outputs
for output in layerOutputs:
# loop over each of the detections
for detection in output:
# extract the class ID and confidence (i.e., probability) of
# the current object detection
scores = detection[5:]
classID = np.argmax(scores)
confidence = scores[classID]
box = detection[0:4]
# get upper left corner
box[0] = box[0] - box[2]/2
box[1] = box[1] - box[3]/2
# filter out weak predictions by ensuring the detected
# probability is greater than the minimum probability
if confidence > args["confidence"]:
# write output files
class_dir = os.path.join('/path/imgr/', LABELS[classID])
if not os.path.exists(class_dir):
os.makedirs(class_dir)
path = os.path.join(class_dir, f1.split('/')[-1][:-4])
cv2.imwrite(path + '.jpg', image)
with open(path + '.txt'), 'a+') as f:
f.write(str(classID) + ' ' + str(box[0]) + ' ' + str(box[1]) + ' ' + str(box[2]) + ' ' + str(box[3]) + '\n')
Read through it and make sure you understand what each part in the for loop is doing. Once you are comfortable with this minimal example you could add back in the non-maximal suppression and drawing the bounding boxes if you like.