IndexError: invalid index to Scalar Variable. YOLO Project [duplicate] - python

This code generates error:
IndexError: invalid index to scalar variable.
at the line: results.append(RMSPE(np.expm1(y_train[testcv]), [y[1] for y in y_test]))
How to fix it?
import pandas as pd
import numpy as np
from sklearn import ensemble
from sklearn import cross_validation
def ToWeight(y):
w = np.zeros(y.shape, dtype=float)
ind = y != 0
w[ind] = 1./(y[ind]**2)
return w
def RMSPE(y, yhat):
w = ToWeight(y)
rmspe = np.sqrt(np.mean( w * (y - yhat)**2 ))
return rmspe
forest = ensemble.RandomForestRegressor(n_estimators=10, min_samples_split=2, n_jobs=-1)
print ("Cross validations")
cv = cross_validation.KFold(len(train), n_folds=5)
results = []
for traincv, testcv in cv:
y_test = np.expm1(forest.fit(X_train[traincv], y_train[traincv]).predict(X_train[testcv]))
results.append(RMSPE(np.expm1(y_train[testcv]), [y[1] for y in y_test]))
testcv is:
[False False False ..., True True True]

You are trying to index into a scalar (non-iterable) value:
[y[1] for y in y_test]
# ^ this is the problem
When you call [y for y in test] you are iterating over the values already, so you get a single value in y.
Your code is the same as trying to do the following:
y_test = [1, 2, 3]
y = y_test[0] # y = 1
print(y[0]) # this line will fail
I'm not sure what you're trying to get into your results array, but you need to get rid of [y[1] for y in y_test].
If you want to append each y in y_test to results, you'll need to expand your list comprehension out further to something like this:
[results.append(..., y) for y in y_test]
Or just use a for loop:
for y in y_test:
results.append(..., y)

YOLO Object Detection
layer_names = net.getLayerNames() output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]
Don't need to indexing i in layer_names[i[0] - 1] . Just remove it and do layer_names[i - 1]
layer_names = net.getLayerNames() output_layers = [layer_names[i - 1] for i in net.getUnconnectedOutLayers()]
It Work For Me

YOLO Object Detection
python <= 3.7
ln = net.getLayerNames()
ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]
python >3.7
ln = net.getLayerNames()
ln = [ln[i - 1] for i in net.getUnconnectedOutLayers()]

Basically, 1 is not a valid index of y. If the visitor is coming from his own code he should check if his y contains the index which he tries to access (in this case the index is 1).

In the for, you have an iteration, then for each element of that loop which probably is a scalar, has no index. When each element is an empty array, single variable, or scalar and not a list or array you cannot use indices.

Editing the yolo_video.py file in repo is required for those who are using darknet code.`This file works, replaced with required edits
# import the necessary packages
import numpy as np
import argparse
import imutils
import time
import cv2
import os
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--input", required=True,
help="path to input video")
ap.add_argument("-o", "--output", required=True,
help="path to output video")
ap.add_argument("-y", "--yolo", required=True,
help="base path to YOLO directory")
ap.add_argument("-c", "--confidence", type=float, default=0.5,
help="minimum probability to filter weak detections")
ap.add_argument("-t", "--threshold", type=float, default=0.3,
help="threshold when applyong non-maxima suppression")
args = vars(ap.parse_args())
# load the COCO class labels our YOLO model was trained on
labelsPath = os.path.sep.join([args["yolo"], "biscuits.names"])
LABELS = open(labelsPath).read().strip().split("\n")
# initialize a list of colors to represent each possible class label
np.random.seed(42)
COLORS = np.random.randint(0, 255, size=(len(LABELS), 3),
dtype="uint8")
# derive the paths to the YOLO weights and model configuration
weightsPath = os.path.sep.join([args["yolo"], "yolov4-custom_best.weights"])
configPath = os.path.sep.join([args["yolo"], "yolov4-custom.cfg"])
# load our YOLO object detector trained on COCO dataset (80 classes)
# and determine only the *output* layer names that we need from YOLO
print("[INFO] loading YOLO from disk...")
net = cv2.dnn.readNetFromDarknet(configPath, weightsPath)
ln = net.getLayerNames()
print("ln",net)
ln = [ln[i - 1] for i in net.getUnconnectedOutLayers()]
# initialize the video stream, pointer to output video file, and
# frame dimensions
vs = cv2.VideoCapture(args["input"])
writer = None
(W, H) = (None, None)
# try to determine the total number of frames in the video file
try:
prop = cv2.cv.CV_CAP_PROP_FRAME_COUNT if imutils.is_cv2()\
else cv2.CAP_PROP_FRAME_COUNT
total = int(vs.get(prop))
print("[INFO] {} total frames in video".format(total))
# an error occurred while trying to determine the total
# number of frames in the video file
except:
print("[INFO] could not determine # of frames in video")
print("[INFO] no approx. completion time can be provided")
total = -1
# loop over frames from the video file stream
while True:
# read the next frame from the file
(grabbed, frame) = vs.read()
# if the frame was not grabbed, then we have reached the end
# of the stream
if not grabbed:
break
# if the frame dimensions are empty, grab them
if W is None or H is None:
(H, W) = frame.shape[:2]
# construct a blob from the input frame and then perform a forward
# pass of the YOLO object detector, giving us our bounding boxes
# and associated probabilities
blob = cv2.dnn.blobFromImage(frame, 1 / 255.0, (416, 416),
swapRB=True, crop=False)
net.setInput(blob)
start = time.time()
layerOutputs = net.forward(ln)
end = time.time()
# initialize our lists of detected bounding boxes, confidences,
# and class IDs, respectively
boxes = []
confidences = []
classIDs = []
# loop over each of the layer outputs
for output in layerOutputs:
# loop over each of the detections
for detection in output:
# extract the class ID and confidence (i.e., probability)
# of the current object detection
scores = detection[5:]
classID = np.argmax(scores)
confidence = scores[classID]
# filter out weak predictions by ensuring the detected
# probability is greater than the minimum probability
if confidence > args["confidence"]:
# scale the bounding box coordinates back relative to
# the size of the image, keeping in mind that YOLO
# actually returns the center (x, y)-coordinates of
# the bounding box followed by the boxes' width and
# height
box = detection[0:4] * np.array([W, H, W, H])
(centerX, centerY, width, height) = box.astype("int")
# use the center (x, y)-coordinates to derive the top
# and and left corner of the bounding box
x = int(centerX - (width / 2))
y = int(centerY - (height / 2))
# update our list of bounding box coordinates,
# confidences, and class IDs
boxes.append([x, y, int(width), int(height)])
confidences.append(float(confidence))
classIDs.append(classID)
# apply non-maxima suppression to suppress weak, overlapping
# bounding boxes
idxs = cv2.dnn.NMSBoxes(boxes, confidences, args["confidence"],
args["threshold"])
# ensure at least one detection exists
if len(idxs) > 0:
# loop over the indexes we are keeping
for i in idxs.flatten():
# extract the bounding box coordinates
(x, y) = (boxes[i][0], boxes[i][1])
(w, h) = (boxes[i][2], boxes[i][3])
# draw a bounding box rectangle and label on the frame
color = [int(c) for c in COLORS[classIDs[i]]]
cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)
text = "{}: {:.4f}".format(LABELS[classIDs[i]],
confidences[i])
cv2.putText(frame, text, (x, y - 5),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
# check if the video writer is None
if writer is None:
# initialize our video writer
fourcc = cv2.VideoWriter_fourcc(*"MJPG")
writer = cv2.VideoWriter(args["output"], fourcc, 30,
(frame.shape[1], frame.shape[0]), True)
# some information on processing single frame
if total > 0:
elap = (end - start)
print("[INFO] single frame took {:.4f} seconds".format(elap))
print("[INFO] estimated total time to finish: {:.4f}".format(
elap * total))
# write the output frame to disk
writer.write(frame)
# release the file pointers
print("[INFO] cleaning up...")
writer.release()
vs.release()`

Related

iIam getting error "ZeroDivisionError: division by zero " while using yolov3

I am new to yolov3 and trying to do object detection using yolov3 but getting the error as shown in fig if possible please tell me what I am doing wrong.
This is the screenshot
Traceback (most recent call last):
File "c:\Yolo\YOLO-3-OpenCV\YOLO-3-OpenCV\yolo-3-video.py", line 359, in
print('FPS:', round((f / t), 1))
ZeroDivisionError: division by zero
# Importing needed libraries
import numpy as np
import cv2
import time
video = cv2.VideoCapture(r'videos\traffic-cars.mp4')
# Preparing variable for writer
# that we will use to write processed frames
writer = None
# Preparing variables for spatial dimensions of the frames
h, w = None, None
with open(r'C:\Yolo\YOLO-3-OpenCV\YOLO-3-OpenCV\yolo-coco-data\coco.names') as f:
# Getting labels reading every line
# and putting them into the list
labels = [line.strip() for line in f]
network = cv2.dnn.readNetFromDarknet(r'C:\Yolo\YOLO-3-OpenCV\YOLO-3-OpenCV\yolo-coco-data\yolov3.cfg',
r'C:\Yolo\YOLO-3-OpenCV\YOLO-3-OpenCV\yolo-coco-data\yolov3.weights')
layers_names_all = network.getLayerNames()
# Getting only output layers' names that we need from YOLO v3 algorithm
# with function that returns indexes of layers with unconnected outputs
layers_names_output = \
[layers_names_all[i - 1] for i in network.getUnconnectedOutLayers()]
# Setting minimum probability to eliminate weak predictions
probability_minimum = 0.5
colours = np.random.randint(0, 255, size=(len(labels), 3), dtype='uint8')
f = 0
# Defining variable for counting total time
# At the end we will show time spent for processing all frames
t = 0
# Defining loop for catching frames
while True:
# Capturing frame-by-frame
ret, frame = video.read()
if not ret:
break
if w is None or h is None:
# Slicing from tuple only first two elements
h, w = frame.shape[:2]
blob = cv2.dnn.blobFromImage(frame, 1 / 255.0, (416, 416),
swapRB=True, crop=False)
network.setInput(blob) # setting blob as input to the network
start = time.time()
output_from_network = network.forward(layers_names_output)
end = time.time()
f += 1
t += end - start
print('Frame number {0} took {1:.5f} seconds'.format(f, end - start))
bounding_boxes = []
confidences = []
class_numbers = []
# Going through all output layers after feed forward pass
for result in output_from_network:
# Going through all detections from current output layer
for detected_objects in result:
# Getting 80 classes' probabilities for current detected object
scores = detected_objects[5:]
# Getting index of the class with the maximum value of probability
class_current = np.argmax(scores)
# Getting value of probability for defined class
confidence_current = scores[class_current]
# Eliminating weak predictions with minimum probability
if confidence_current > probability_minimum:
# Scaling bounding box coordinates to the initial frame size
# YOLO data format keeps coordinates for center of bounding box
# and its current width and height
# That is why we can just multiply them elementwise
# to the width and height
# of the original frame and in this way get coordinates for center
# of bounding box, its width and height for original frame
box_current = detected_objects[0:4] * np.array([w, h, w, h])
# Now, from YOLO data format, we can get top left corner coordinates
# that are x_min and y_min
x_center, y_center, box_width, box_height = box_current
x_min = int(x_center - (box_width / 2))
y_min = int(y_center - (box_height / 2))
# Adding results into prepared lists
bounding_boxes.append([x_min, y_min,
int(box_width), int(box_height)])
confidences.append(float(confidence_current))
class_numbers.append(class_current)
results = cv2.dnn.NMSBoxes(bounding_boxes, confidences,
probability_minimum, threshold)
if len(results) > 0:
# Going through indexes of results
for i in results.flatten():
# Getting current bounding box coordinates,
# its width and height
x_min, y_min = bounding_boxes[i][0], bounding_boxes[i][1]
box_width, box_height = bounding_boxes[i][2], bounding_boxes[i][3]
# Preparing colour for current bounding box
# and converting from numpy array to list
colour_box_current = colours[class_numbers[i]].tolist()
# # # Check point
# print(type(colour_box_current)) # <class 'list'>
# print(colour_box_current) # [172 , 10, 127]
# Drawing bounding box on the original current frame
cv2.rectangle(frame, (x_min, y_min),
(x_min + box_width, y_min + box_height),
colour_box_current, 2)
# Preparing text with label and confidence for current bounding box
text_box_current = '{}: {:.4f}'.format(labels[int(class_numbers[i])],
confidences[i])
# Putting text with label and confidence on the original image
cv2.putText(frame, text_box_current, (x_min, y_min - 5),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, colour_box_current, 2)
if writer is None:
# Constructing code of the codec
# to be used in the function VideoWriter
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
# Writing current processed frame into the video file
# Pay attention! If you're using Windows, yours path might looks like:
# r'videos\result-traffic-cars.mp4'
# or:
# 'videos\\result-traffic-cars.mp4'
writer = cv2.VideoWriter('videos/result-traffic-cars.mp4', fourcc, 30,
(frame.shape[1], frame.shape[0]), True)
# Write processed current frame to the file
writer.write(frame)
"""
End of:
Writing processed frame into the file
"""
"""
End of:
Reading frames in the loop
"""
# Printing final results
print()
print('Total number of frames', f)
print('Total amount of time {:.5f} seconds'.format(t))
print('FPS:', round((f / t), 1))
# Releasing video reader and writer
video.release()
writer.release()

idxs = cv2.dnn.NMSBoxes(boxes, confidence, MIN_CORP, NMS_THRESH) TypeError: Can't parse 'scores'. Input argument doesn't provide sequence protocol

help meee TT i received error in my coding of social distancing detection system using webcam. i done search the error but there is nothing difference with my code TT i wite my coding using notepad++ and run using command prompt. below is my error :
C:\Users\User\Downloads\Social_Distancing_Detection_Real_Time>python Run.py
[INFO] loading YOLO from disk...
[INFO] setting preferable backend and target to CUDA...
[INFO] accessing video stream...
[ WARN:0] global D:\a\opencv-python\opencv-python\opencv\modules\dnn\src\dnn.cpp (1447) cv::dnn::dnn4_v20211004::Net::Impl::setUpNet DNN module was not built with CUDA backend; switching to CPU
Traceback (most recent call last):
File "C:\Users\User\Downloads\Social_Distancing_Detection_Real_Time\Run.py", line 77, in <module>
results = detect_people(frame, net, ln,
File "C:\Users\User\Downloads\Social_Distancing_Detection_Real_Time\mylib\detection.py", line 58, in detect_people
idxs = cv2.dnn.NMSBoxes(boxes, confidence, MIN_CORP, NMS_THRESH)
TypeError: Can't parse 'scores'. Input argument doesn't provide sequence protocol
[ WARN:1] global D:\a\opencv-python\opencv-python\opencv\modules\videoio\src\cap_msmf.cpp (438) `anonymous-namespace'::SourceReaderCB::~SourceReaderCB terminating async callback
my error
below here is my full code of file detection.py
#import the necessary packages
from .config import NMS_THRESH, MIN_CORP, People_Counter
import numpy as np
import cv2
def detect_people(frame, net, In, personIdx = 0):
#grab the dimensions of the frame and initialize the list of results
(H, W) = frame.shape[:2]
results = []
#construct a blob from the input frame and then perform a forward
#pass of the YOLO object detector, giving us our boarding boxes
#add associated probabilities
blob = cv2.dnn.blobFromImage(frame, 1 / 255.0, (416, 416),
swapRB=True, crop=False)
net.setInput(blob)
layerOutputs = net.forward(In)
#initialize out lists of detected bounding boxes, centroids and
#confidence, respectively
boxes = []
centroids = []
confidences = []
#loop over each of the layer outputs
for output in layerOutputs:
#for detection in output;
for detection in output:
#extract the class ID and confidence[i.e., probability)
#of the current object detection
scores = detection[5:]
classID = np.argmax(scores)
confidence = scores[classID]
#filter detections by (1) ensuring that the object
#detected was a person and (2) that the minimum
#confidence is met
if classID == personIdx and confidence > MIN_CORP:
#scale the bounding box coordinates back relative to
#the size of the image, keeping in mind that YOLO
#actually returns the center (x,y) -coordinates of
#the bounding box followed by the boxes' width and height
box = detection[0:4] * np.array([W, H, W, H])
(centerX, centerY, width, height) = box.astype("int")
#use the center (x,y) -coordinates to derive the top
#and left corner of the bounding box
x = int(centerX - (width / 2))
y = int(centerY - (height / 2))
#update our list of bounding box coordinates,
#centroids and confidences
boxes.append([x, y, int(width), int(height)])
centroids.append((centerX, centerY))
confidences.append(float(confidence))
#apply non-maxim suppression to suppress weak, overlapping bounding boxes
idxs = cv2.dnn.NMSBoxes(boxes, confidence, MIN_CORP, NMS_THRESH)
#print('Total people count:', len(idxs))
#compute the total people counter
#if People_Counter:
#human_count = "Human count: {}".format(len(idxs))
#cv2.putText(frame, human_count, (470, frame.shape[0] - 75), cv2.FONT_HERSHEY_SIMPLEX, 0.70, (0, 0, 0), 2)
#ensure at least one detection exists
if len(idxs) > 0:
#loop over the indexes we are keeping
for i in idxs.flatten():
#extract the bounding box coordinates
(x, y) = (boxes[i][0], boxes[i][1])
(w, h) = (boxes[i][2], boxes[i][3])
#update our results list to consist of the person
#prediction probability, bounding box coordinates,
#and the centroids
r = (confidences[i], (x, y, x + w, y + h), centroids[i])
results.append(r)
#return the list of the results
return results
The answer to your problem (as usually) likes in response from the interpreter:
TypeError: Can't parse 'scores'. Input argument doesn't provide sequence protocol
scores is the second argument to cv2.dnn.NMSBoxes which in your case is confidence. confidence is a single number, you can't iterate over it. You've made a typo and probably you wanted to pass confidences which is a list.
Change your code to:
idxs = cv2.dnn.NMSBoxes(boxes, confidences, MIN_CORP, NMS_THRESH)

Real Time Face Recognition with LBP, Deep Learning and OpenCV

I am a novice in Computer Vision. I am trying to implement Real Time Face Recognition with Local Binary Patterns with its Face Detection part based on Deep Learning dnn module. I am using the caltech_faces dataset and have added a folder with my 20 photos to it.
So, this is my code. I basically transformed the code of the Face Recognition of sample images to a Real Time Face Recognition by making some changes and additions.
I get the following error when executing the below code:
predName = le.inverse_transform([predictions[i]])[0]
^
TabError: inconsistent use of tabs and spaces in indentation
I checked all the tabs and indentations, and cant find what and where to fix. I kindly ask you to give me a hint on what to do. Thank you very much!
# import the necessary packages
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from imutils.video import VideoStream
from imutils import paths
import matplotlib.pyplot as plt
import numpy as np
import argparse
import imutils
import time
import cv2
import os
#Creating our face detector
def detect_faces(net, frame, minConfidence=0.5):
# grab the dimensions of the image and then construct a blob
# from it
(h, w) = frame.shape[:2]
blob = cv2.dnn.blobFromImage(frame, 1.0, (300, 300),
(104.0, 177.0, 123.0))
# pass the blob through the network to obtain the face detections,
# then initialize a list to store the predicted bounding boxes
net.setInput(blob)
detections = net.forward()
boxes = []
# loop over the detections
for i in range(0, detections.shape[2]):
# extract the confidence (i.e., probability) associated with
# the detection
confidence = detections[0, 0, i, 2]
# filter out weak detections by ensuring the confidence is
# greater than the minimum confidence
if confidence > minConfidence:
# compute the (x, y)-coordinates of the bounding box for
# the object
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
# update our bounding box results list
boxes.append((startX, startY, endX, endY))
# return the face detection bounding boxes
return boxes
#Loading the CALTECH Faces dataset
def load_face_dataset(inputPath, net, minConfidence=0.5,
minSamples=15):
# grab the paths to all images in our input directory, extract
# the name of the person (i.e., class label) from the directory
# structure, and count the number of example images we have per
# face
imagePaths = list(paths.list_images(inputPath))
names = [p.split(os.path.sep)[-2] for p in imagePaths]
(names, counts) = np.unique(names, return_counts=True)
names = names.tolist()
# initialize lists to store our extracted faces and associated
# labels
faces = []
labels = []
# loop over the image paths
for imagePath in imagePaths:
# load the image from disk and extract the name of the person
# from the subdirectory structure
frame = cv2.imread(imagePath)
name = imagePath.split(os.path.sep)[-2]
# only process images that have a sufficient number of
# examples belonging to the class
if counts[names.index(name)] < minSamples:
continue
# perform face detection
boxes = detect_faces(net, frame, minConfidence)
# loop over the bounding boxes
for (startX, startY, endX, endY) in boxes:
# extract the face ROI, resize it, and convert it to
# grayscale
faceROI = frame[startY:endY, startX:endX]
faceROI = cv2.resize(faceROI, (47, 62))
faceROI = cv2.cvtColor(faceROI, cv2.COLOR_BGR2GRAY)
# update our faces and labels lists
faces.append(faceROI)
labels.append(name)
# convert our faces and labels lists to NumPy arrays
faces = np.array(faces)
labels = np.array(labels)
# return a 2-tuple of the faces and labels
return (faces, labels)
#Implementing Local Binary Patterns for face recognition
# # construct the argument parser and parse the arguments
# ap = argparse.ArgumentParser()
# ap.add_argument("-i", "--input", type=str, required=True,
# help="path to input directory of images")
# ap.add_argument("-f", "--face", type=str,
# default="face_detector",
# help="path to face detector model directory")
# ap.add_argument("-c", "--confidence", type=float, default=0.5,
# help="minimum probability to filter weak detections")
# args = vars(ap.parse_args())
# since we are using Jupyter Notebooks we can replace our argument
# parsing code with *hard coded* arguments and values
args = {
"input": "caltech_faces",
"face": "face_detector",
"confidence": 0.5,
}
# load our serialized face detector model from disk
print("[INFO] loading face detector model...")
prototxtPath = os.path.sep.join([args["face"], "deploy.prototxt"])
weightsPath = os.path.sep.join([args["face"],
"res10_300x300_ssd_iter_140000.caffemodel"])
net = cv2.dnn.readNet(prototxtPath, weightsPath)
# load the CALTECH faces dataset
print("[INFO] loading dataset...")
(faces, labels) = load_face_dataset(args["input"], net,
minConfidence=0.5, minSamples=20)
print("[INFO] {} images in dataset".format(len(faces)))
# encode the string labels as integers
le = LabelEncoder()
labels = le.fit_transform(labels)
# construct our training and testing split
(trainX, testX, trainY, testY) = train_test_split(faces,
labels, test_size=0.25, stratify=labels, random_state=42)
# train our LBP face recognizer
print("[INFO] training face recognizer...")
recognizer = cv2.face.LBPHFaceRecognizer_create(
radius=2, neighbors=16, grid_x=8, grid_y=8)
start = time.time()
recognizer.train(trainX, trainY)
end = time.time()
print("[INFO] training took {:.4f} seconds".format(end - start))
# initialize the list of predictions and confidence scores
print("[INFO] gathering predictions...")
predictions = []
confidence = []
start = time.time()
# loop over the test data
for i in range(0, len(testX)):
# classify the face and update the list of predictions and
# confidence scores
(prediction, conf) = recognizer.predict(testX[i])
predictions.append(prediction)
confidence.append(conf)
# measure how long making predictions took
end = time.time()
print("[INFO] inference took {:.4f} seconds".format(end - start))
# show the classification report
print(classification_report(testY, predictions,
target_names=le.classes_))
# initialize the video stream and allow the cammera sensor to warmup
print("[INFO] starting video stream...")
vs = VideoStream(src=0).start()
time.sleep(2.0)
# loop over the frames from the video stream
while True:
# grab the frame from the threaded video stream and resize it
# to have a maximum width of 400 pixels
face = vs.read()
face = imutils.resize(face, width=400)
# loop over the detections
for i in range(0, detections.shape[2]):
# grab the predicted name and actual name
predName = le.inverse_transform([predictions[i]])[0]
actualName = le.classes_[testY[i]]
# draw the predicted name and actual name on the image
cv2.putText(face, "pred: {}".format(predName), (5, 25),
cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 2)
cv2.putText(face, "actual: {}".format(actualName), (5, 60),
cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), 2)
# display the predicted name, actual name, and confidence of the
# prediction (i.e., chi-squared distance; the *lower* the distance
# is the *more confident* the prediction is)
print("[INFO] prediction: {}, actual: {}, confidence: {:.2f}".format(predName, actualName, confidence[i]))
# show the output frame
cv2.imshow("Face", face)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
You have a for loop without any line of code but a comment just before the line that cause the problem:
# loop over the detections
for i in range(0, detections.shape[2]):
# grab the predicted name and actual name
predName = le.inverse_transform([predictions[i]])[0]
actualName = le.classes_[testY[i]]
The problem comes from this empty loop; if you have a loop, you must have at least one line of code inside. So delete it or add the pass keyword inside.
I am using google collab for this and first of all, make sure you have OpenCV installed. You can install it using pip:
pip install opencv-python
Before detecting the face we should have to open the web camera using google collab.
from IPython.display import display, Javascript
from google.colab.output import eval_js
from base64 import b64decode
def take_photo(filename='photo.jpg', quality=0.8):
js = Javascript('''
async function takePhoto(quality) {
const div = document.createElement('div');
const capture = document.createElement('button');
capture.textContent = 'Capture';
div.appendChild(capture);
const video = document.createElement('video');
video.style.display = 'block';
const stream = await navigator.mediaDevices.getUserMedia({video: true});
document.body.appendChild(div);
div.appendChild(video);
video.srcObject = stream;
await video.play();
// Resize the output to fit the video element. google.colab.output.setIframeHeight(document.documentElement.scrollHeight, true);
// Wait for Capture to be clicked.
await new Promise((resolve) => capture.onclick = resolve);
const canvas = document.createElement('canvas');
canvas.width = video.videoWidth;
canvas.height = video.videoHeight;
canvas.getContext('2d').drawImage(video, 0, 0);
stream.getVideoTracks()[0].stop();
div.remove();
return canvas.toDataURL('image/jpeg', quality);
}
''')
display(js)
data = eval_js('takePhoto({})'.format(quality))
binary = b64decode(data.split(',')[1])
with open(filename, 'wb') as f:
f.write(binary)
return filename
You have to run the below code as the second step.
from IPython.display import Image
try:
filename = take_photo()
print('Saved to {}'.format(filename))
# Show the image which was just taken.
display(Image(filename))
except Exception as err:
# Errors will be thrown if the user does not have a webcam or if they do
not
# grant the page permission to access it.
print(str(err))
After running these two codes, the web camera is opened and you can capture a photo.
The photo is saved as photo.jpg.
Face detection using Haar cascades is a machine learning-based approach where a cascade function is trained with a set of input data. OpenCV already contains many pre-trained classifiers for face, eyes, smiles, etc. Today we will be using the face classifier. You can experiment with other classifiers as well.

How to apply yolo model to directory of images in the same time

I am new to object detection and I am using a pretrained yolo model from google and I want to apply detection of a single object from the coco dataset(for this case person).
The program is working if I give as input the photo name. But how can this apply to multiple images in the same time? Like for example if you have 100 images in a directory?
The code I use:
import cv2
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import os
image = plt.imread('images/kite.jpg')
classes = None
with open('coco.names', 'r') as f:
classes = [line.strip() for line in f.readlines()]
Width = image.shape[1]
Height = image.shape[0]
# read pre-trained model and config file
net = cv2.dnn.readNet('yolov3.weights', 'yolov3.cfg')
# create input blob
# set input blob for the network
net.setInput(cv2.dnn.blobFromImage(image, 0.00392, (416,416), (0,0,0), True, crop=False))
# run inference through the network
# and gather predictions from output layers
layer_names = net.getLayerNames()
output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]
outs = net.forward(output_layers)
class_ids = []
confidences = []
boxes = []
#create bounding box
for out in outs:
for detection in out:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
if confidence > 0.4:
center_x = int(detection[0] * Width)
center_y = int(detection[1] * Height)
w = int(detection[2] * Width)
h = int(detection[3] * Height)
x = center_x - w / 2
y = center_y - h / 2
class_ids.append(class_id)
confidences.append(float(confidence))
boxes.append([x, y, w, h])
indices = cv2.dnn.NMSBoxes(boxes, confidences, 0.1, 0.1)
#check if is people detection
for i in indices:
i = i[0]
box = boxes[i]
if class_ids[i]==0:
label = str(classes[class_id])
cv2.rectangle(image, (round(box[0]),round(box[1])), (round(box[0]+box[2]),round(box[1]+box[3])), (0, 0, 0), 2)
cv2.putText(image, label, (round(box[0])-10,round(box[1])-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 2)
print(label)
cv2.imshow('My window',image)
cv2.waitKey(0)
cv2.destroyAllWindows()
#!/usr/bin/env python
# coding: utf-8
# ## Create test command
# Creates a test command, stringing together many sub commands using test image folder and output image folder as inputs
# In[1]:
import os
# ### Set input folder, output folder
# - input folder should contain all the .jpg files you want to predict
# - output folder will save your predictions
# - both folders should exist before you run this script
# In[2]:
full_command = ''
skeleton = './darknet detector test cfg/coco.data cfg/yolo.cfg yolo.weights {};cp predictions.png {};'
input_image_path = './predict_these'
output_image_path = './predict_results'
# ### Generate command
# In[4]:
for input_filename in os.listdir(input_image_path):
if '.jpg' in input_filename:
test_full_path = os.path.join(input_image_path, input_filename)
output_filename = 'prediction_' + input_filename
command_partial = skeleton.format(test_full_path, os.path.join(output_image_path, output_filename))
full_command += command_partial
# ### Preview command
# In[5]:
print(full_command)
# ### Write to txt file in same directory as this notebook
# In[6]:
with open('command.txt', 'w') as f:
f.write(full_command)
f.close()
# ### Run terminal command from this notebook (optional)
# - Alternatively can copy and paste from .txt file created above and run in terminal.
# In[ ]:
# os.system(full_command)
# In[ ]:
https://danielcorcoranssql.wordpress.com/2018/12/23/yolov3-introduction-and-training-our-own-model/

Saving bounding box coordinates and images based on the class type into a different folder

I'm using OpenCV + Python to apply a deep learning model and classify objects in 8 categories (animal types) namely, cat, dog, horse, deer, bear, lizard, monkey, no object detected (when the is no object detected in the image).
I have a folder that has images of all types of animals in it. I read all the images in one folder and then I apply the deep learning model to extract bounding box coordinates of each object in each image.
I want to first categorize each image by putting each type of animal image in the related folder. second, save the coordinate of the bounding box of that image in the same folder. For example if the network detected cat, I want to save that image and corresponding coordinates(as a text file .text) in the cat folder and if it did not find any of those objects in the image just put it in the no object detected folder.
My question is how can I save the original image and the bounding box coordinates of that object inside the 8 category folder?
here is my code:
import cv2
import numpy as np
import os
import glob
import argparse
import time
img_dir="/path/imgt/"
data_path=os.path.join(img_dir,'*g')
files=glob.glob(data_path)
data=[]
i = 0
for f1 in files:
image=cv2.imread(f1)
data.append(image)
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True,
help="path to input image")
ap.add_argument("-y", "--yolo", required=True,
help="base path to YOLO directory")
ap.add_argument("-c", "--confidence", type=float, default=0.5,
help="minimum probability to filter weak detections")
ap.add_argument("-t", "--threshold", type=float, default=0.3,
help="threshold when applyong non-maxima suppression")
args = vars(ap.parse_args())
# load the COCO class labels our YOLO model was trained on
labelsPath = os.path.sep.join([args["yolo"], "obj.names"])
LABELS = open(labelsPath).read().strip().split("\n")
# initialize a list of colors to represent each possible class label
np.random.seed(42)
COLORS = np.random.randint(0, 255, size=(len(LABELS), 3),
dtype="uint8")
# derive the paths to the YOLO weights and model configuration
weightsPath = os.path.sep.join([args["yolo"], "yolo-obj_last.weights"])
configPath = os.path.sep.join([args["yolo"], "yolo-obj.cfg"])
# load our YOLO object detector trained on COCO dataset (80 classes)
print("[INFO] loading YOLO from disk...")
net = cv2.dnn.readNetFromDarknet(configPath, weightsPath)
# load our input image and grab its spatial dimensions
# image = cv2.imread(args["image"])
(H, W) = image.shape[:2]
# determine only the *output* layer names that we need from YOLO
ln = net.getLayerNames()
ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]
# construct a blob from the input image and then perform a forward
# pass of the YOLO object detector, giving us our bounding boxes and
# associated probabilities
blob = cv2.dnn.blobFromImage(image, 1 / 255.0, (416, 416),
swapRB=True, crop=False)
net.setInput(blob)
start = time.time()
layerOutputs = net.forward(ln)
end = time.time()
# show timing information on YOLO
print("[INFO] YOLO took {:.6f} seconds".format(end - start))
# initialize our lists of detected bounding boxes, confidences, and
# class IDs, respectively
boxes = []
confidences = []
classIDs = []
# loop over each of the layer outputs
for output in layerOutputs:
# loop over each of the detections
for detection in output:
# extract the class ID and confidence (i.e., probability) of
# the current object detection
scores = detection[5:]
classID = np.argmax(scores)
confidence = scores[classID]
# filter out weak predictions by ensuring the detected
# probability is greater than the minimum probability
if confidence > args["confidence"]:
# scale the bounding box coordinates back relative to the
# size of the image, keeping in mind that YOLO actually
# returns the center (x, y)-coordinates of the bounding
# box followed by the boxes' width and height
box = detection[0:4] * np.array([W, H, W, H])
(centerX, centerY, width, height) = box.astype("int")
# use the center (x, y)-coordinates to derive the top and
# and left corner of the bounding box
x = int(centerX - (width / 2))
y = int(centerY - (height / 2))
# update our list of bounding box coordinates, confidences,
# and class IDs
boxes.append([x, y, int(width), int(height)])
confidences.append(float(confidence))
classIDs.append(classID)
# apply non-maxima suppression to suppress weak, overlapping bounding
# boxes
idxs = cv2.dnn.NMSBoxes(boxes, confidences, args["confidence"],
args["threshold"])
# ensure at least one detection exists
if len(idxs) > 0:
# loop over the indexes we are keeping
for i in idxs.flatten():
# extract the bounding box coordinates
(x, y) = (boxes[i][0], boxes[i][1])
(w, h) = (boxes[i][2], boxes[i][3])
# draw a bounding box rectangle and label on the image
color = [int(c) for c in COLORS[classIDs[i]]]
cv2.rectangle(image, (x, y), (x + w, y + h), color, 2)
text = "{}: {:.4f}".format(LABELS[classIDs[i]], confidences[i])
cv2.putText(image, text, (x, y - 7), cv2.FONT_HERSHEY_SIMPLEX,0.6, color, 2)
path = '/path/imgr/' + LABELS[classIDs[i]] + '/'
cv2.imwrite(os.path.join(path, 'image' + str(i) + '.jpg'), image)
with open(os.path.join(path, 'image' + str(i) + '.txt'), 'a+') as f:
f.write(str(classIDs[i]) + ' ' + str(x) + ' ' + str(y) + ' ' + str(w) + ' ' + str(h))
how does the text file look like?
.txt -file for each .jpg-image-file - in the same directory and with the same name, but with .txt-extension, and put to file: object number and object coordinates on this image, for each object in new line: <object-class> <x> <y> <width> <height>
Where:
<object-class> - integer number of object from 0 to (classes-1)
<x> <y> <width> <height> - float values relative to width and height of image, it can be equal from (0.0 to 1.0]
for example: <x> = <absolute_x> / <image_width> or <height> = <absolute_height> / <image_height>
atention: <x> <y> - are center of rectangle (are not top-left corner)
For example for img1.jpg you will be created img1.txt containing:
1 0.716797 0.395833 0.216406 0.147222
0 0.687109 0.379167 0.255469 0.158333
1 0.420312 0.395833 0.140625 0.166667
Maybe something like this:
path = os.path.join('/path/imgr/', LABELS[classID], image_name)
cv2.imwrite(path + '.jpg', image)
with open(path + '.txt'), 'a+') as f:
f.write(str(classID) + ' ' + str(detection[0]) + ' ' + str(detection[1]) + ' ' + str(detection[2]) + ' ' + str(detection[3]) + '\n')
You may have multiple objects in an image, in which case it should write to each of the relevant folders and append to the text file if it exists.
image_name will be something you generate, you can use the name that you are reading in, or a counter.
This snippet should go somewhere under the if statement:
if confidence > args["confidence"]:
I would put it at the end. You may need to make minor adjustments, but that is the gist.
More explicitly:
import cv2
import numpy as np
import os
import glob
import argparse
import time
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
#ap.add_argument("-i", "--image", required=True,
# help="path to input image")
ap.add_argument("-y", "--yolo", required=True,
help="base path to YOLO directory")
ap.add_argument("-c", "--confidence", type=float, default=0.5,
help="minimum probability to filter weak detections")
ap.add_argument("-t", "--threshold", type=float, default=0.3,
help="threshold when applyong non-maxima suppression")
args = vars(ap.parse_args())
# load the COCO class labels our YOLO model was trained on
labelsPath = os.path.sep.join([args["yolo"], "obj.names"])
LABELS = open(labelsPath).read().strip().split("\n")
# derive the paths to the YOLO weights and model configuration
weightsPath = os.path.sep.join([args["yolo"], "yolo-obj_last.weights"])
configPath = os.path.sep.join([args["yolo"], "yolo-obj.cfg"])
# load our YOLO object detector trained on COCO dataset (80 classes)
print("[INFO] loading YOLO from disk...")
net = cv2.dnn.readNetFromDarknet(configPath, weightsPath)
# determine only the *output* layer names that we need from YOLO
ln = net.getLayerNames()
ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]
img_dir="/path/imgt/"
data_path=os.path.join(img_dir,'*g')
files=glob.glob(data_path)
for f1 in files:
# load our input image and grab its spatial dimensions
image=cv2.imread(f1)
# construct a blob from the input image and then perform a forward
# pass of the YOLO object detector, giving us our bounding boxes and
# associated probabilities
blob = cv2.dnn.blobFromImage(image, 1 / 255.0, (416, 416),
swapRB=True, crop=False)
net.setInput(blob)
layerOutputs = net.forward(ln)
# loop over each of the layer outputs
for output in layerOutputs:
# loop over each of the detections
for detection in output:
# extract the class ID and confidence (i.e., probability) of
# the current object detection
scores = detection[5:]
classID = np.argmax(scores)
confidence = scores[classID]
box = detection[0:4]
# get upper left corner
box[0] = box[0] - box[2]/2
box[1] = box[1] - box[3]/2
# filter out weak predictions by ensuring the detected
# probability is greater than the minimum probability
if confidence > args["confidence"]:
# write output files
class_dir = os.path.join('/path/imgr/', LABELS[classID])
if not os.path.exists(class_dir):
os.makedirs(class_dir)
path = os.path.join(class_dir, f1.split('/')[-1][:-4])
cv2.imwrite(path + '.jpg', image)
with open(path + '.txt'), 'a+') as f:
f.write(str(classID) + ' ' + str(box[0]) + ' ' + str(box[1]) + ' ' + str(box[2]) + ' ' + str(box[3]) + '\n')
Read through it and make sure you understand what each part in the for loop is doing. Once you are comfortable with this minimal example you could add back in the non-maximal suppression and drawing the bounding boxes if you like.

Categories