Opencv+arduino(face tracking) problems - python

This is a face tracker made with Opencv and Arduino, but something went wrong, this code will automatically stop after running.
My code is in this url:https://pastebin.com/Zeb2FMqc
def detectjob():
while True:
ret, img = cap.read()
if ret:
cv2.namedWindow("img", cv2.WINDOW_AUTOSIZE)
cv2.resizeWindow('img', 500, 500)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 5)
roi_gray = gray[y:y + h, x:x + w]
roi_color = img[y:y + h, x:x + w]
arr = {y: y + h, x: x + w}
xx = int(x + (x + h)) / 2
print(xx)
yy = int(y + (y + w)) / 2
print(yy)
data = "X{0:f}Y{0:f}".format(xx, yy) + 'c'
print(data)
arduino.write(data.encode())
cv2.imshow('img', img)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows()

Related

Alternate to cv2.waitKey()

I'm using
OS : Ubuntu
Python : 3.8*
Opencv version: 4.6.0
What I'm trying to do is capture the emotion & save the feed..For that I've used opencv 4.0
Code I've
while True:
ret, frame = cap.read()
frame = cv2.resize(frame, (720, 480))
if not ret:
break
grayFrame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
rects = detector(grayFrame, 0)
for rect in rects:
shape = predictor(grayFrame, rect)
points = shapePoints(shape)
(x, y, w, h) = rectPoints(rect)
grayFace = grayFrame[y:y + h, x:x + w]
try:
grayFace = cv2.resize(grayFace, (emotionTargetSize))
except:
continue
grayFace = grayFace.astype('float32')
grayFace = grayFace / 255.0
grayFace = (grayFace - 0.5) * 2.0
grayFace = np.expand_dims(grayFace, 0)
grayFace = np.expand_dims(grayFace, -1)
emotion_prediction = emotionClassifier.predict(grayFace)
emotion_probability = np.max(emotion_prediction)
if (emotion_probability > 0.36):
emotion_label_arg = np.argmax(emotion_prediction)
color = emotions[emotion_label_arg]['color']
cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)
cv2.line(frame, (x, y + h), (x + 20, y + h + 20),
color,
thickness=2)
cv2.rectangle(frame, (x + 20, y + h + 20), (x + 110, y + h + 40),
color, -1)
cv2.putText(frame, emotions[emotion_label_arg]['emotion'],
(x + 25, y + h + 36), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
(255, 255, 255), 1, cv2.LINE_AA)
else:
color = (255, 255, 255)
cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)
out.write(frame)
#Wait for user input - q, then you will stop the loop
k = cv2.waitKey(1)
if k == 27:
break
cap.release()
if args["isVideoWriter"] == True:
videoWrite.release()
cv2.destroyAllWindows()
Here Issue with wait key ...when I hit on esc program is not exiting...I've tried all the possible methods...the only method that working is keyboard interruption i.e, ctrl+c
Method1: Not working :(
c = cv2.waitKey(0) % 256
if c == ord('a'):
break
Method 2: Not working :(
if cv2.waitKey(0) & 0xFF == ord('q'):
break
Did i missing anything?...Thanks in advance

How can I make a delay in python without affecting video camera?

I want to set a delay just before my for loop gets executed. However doing so freezes my webcam.
while name != "welcome":
status, img = webcam.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = load.detectMultiScale(gray, 1.3, 5)
cv2.imshow("DEMO", img)
cv2.waitKey(10)
time.sleep(10)
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
id, conf = demo(gray[y:y + h, x:x + w])

Opencv giving a C++ exception error with LBPHFaceRecognizer

i am trying face recognition with opencv's LBPH with GUI made by tkinter. the first time my program runs . but when i exit camera with 'q' button and start again with push button in GUI it gives error. i have tried many workarounds but still no answer. can someone help me out?
HERE IS MY CODE
recognizer.read('C:/data.yml')
id = 0
# set text style
fontface = cv2.FONT_HERSHEY_SIMPLEX
fontscale = 1
fontcolor = (203, 23, 252)
cam = cv2.VideoCapture(0)
name_to_track=value_.get()
print(name_to_track)
# get data from sqlite by ID
while (True):
# camera read
ret, img = cam.read()
rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
result = face.process(rgb)
if result.detections:
for id_, detect in enumerate(result.detections):
bbox_ = detect.location_data.relative_bounding_box
ih, iw, ic = img.shape
bbox = int(bbox_.xmin * iw), int(bbox_.ymin * ih), \
int(bbox_.width * iw), int(bbox_.height * ih)
x = bbox[0]
y = bbox[1]
w = bbox[2]
h = bbox[3]
cv2.rectangle(img, bbox, (255, 0, 255), 2)
img_size=gray[y:y + h, x:x + w]
img_size=np.array(img_size)
print("x:"+str(x)+"y:"+str(y))
if(x<0 or y<0 or w <0 or h<0):
continue
id, conf = recognizer.predict(gray[y:y + h, x:x + w])
#print(conf)
if (conf < 50):
profile = getProfile(id)
# set text to window
if (profile != None):
# cv2.PutText(cv2.fromarray(img),str(id),(x+y+h),font,(0,0,255),2);
cv2.putText(img, "ID: " + str(profile[0]) + ' Acc:' + str(round(float(1 - conf / 100), 2)),
(x, y + h + 30), fontface, fontscale, fontcolor, 2)
if (profile[1] == name_to_track):
cv2.putText(img, "Tracking", (x, y + h + 60), fontface, fontscale, fontcolor, 2)
else:
cv2.putText(img, "Name " + str(profile[1]), (x, y + h + 60), fontface, fontscale, fontcolor, 2)
else:
cv2.putText(img, "Unknown", (x, y + h + 30), fontface, fontscale, [255, 0, 0], 2)
cv2.imshow("face", img)
if cv2.waitKey(1) == ord('q'):
break
cam.release()
cv2.destroyAllWindows()
THANKS!!

Make eye tracker that only scans the upper 1/2 of my face

My original idea can be seen in the code below but it's still detecting eyes on the lower half of my face. The goal is to make it only scan the upper half of my face, therefore weeding out incorrect matches.
import cv2
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
cap = cv2.VideoCapture(0) # sets up webcam
while 1: # capture frame, converts to greyscale, looks for faces
ret, img = cap.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in faces: # draws box around face
cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
roi_gray = gray[y:y + h, x:x + w]
roi_color = img[y:y + h, x:x + w]
half_point = y
print("half point: " + str(half_point))
eyes = eye_cascade.detectMultiScale(roi_gray) # looks for eyes
for (ex, ey, ew, eh) in eyes: # draws boxes around eyes
check_point = ey
print("check_point: " + str(check_point))
if check_point > half_point:
pass
else:
cv2.rectangle(roi_color, (ex, ey), (ex + ew, ey + eh), (0, 255, 0), 2)
cv2.imshow('img', img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
I only modified the line 15 and 16
cv2.rectangle(img, (x, y), (x + w, y + int(h / 2)), (255, 0, 0), 2)
roi_gray = gray[y:y + int(h / 2), x:x + w]
The full code:
import cv2
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
cap = cv2.VideoCapture(0) # sets up webcam
while 1: # capture frame, converts to greyscale, looks for faces
ret, img = cap.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in faces: # draws box around face
cv2.rectangle(img, (x, y), (x + w, y + int(h / 2)), (255, 0, 0), 2) #Modified
roi_gray = gray[y:y + int(h / 2), x:x + w] #Modified
roi_color = img[y:y + h, x:x + w]
half_point = y
print("half point: " + str(half_point))
eyes = eye_cascade.detectMultiScale(roi_gray) # looks for eyes
for (ex, ey, ew, eh) in eyes: # draws boxes around eyes
check_point = ey
print("check_point: " + str(check_point))
if check_point > half_point:
pass
else:
cv2.rectangle(roi_color, (ex, ey), (ex + ew, ey + eh), (0, 255, 0), 2)
cv2.imshow('img', img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
But, I recommend to use dlib insead. It's more reliable.
Here is my example:
import numpy as np
import cv2
import dlib
cap = cv2.VideoCapture(0)
predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
detector = dlib.get_frontal_face_detector()
def draw_on_frame(eye):
coordinates = np.array([])
for i in eye:
x = landmarks.part(i).x
y = landmarks.part(i).y
cv2.circle(frame, (x, y), 3, (0, 0, 255), -1)
coordinates = np.append(coordinates, [x, y])
x1, y1, w1, h1 = cv2.boundingRect(coordinates.reshape(-1, 2).astype(int))
cv2.rectangle(frame, (x1, y1), (x1 + w1, y1 + h1), (0, 255, 0), 1)
return x1, y1, w1, h1
while (cap.isOpened()):
ret, frame = cap.read()
if ret == True:
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = detector(gray)
for face in faces:
landmarks = predictor(gray, face)
left_eye = range(36, 42)
right_eye = range(42, 48)
left = draw_on_frame(left_eye)
right = draw_on_frame(right_eye)
roi_left = frame[left[1]:left[1]+left[3], left[0]:left[0]+left[2]]
roi_right = frame[right[1]:right[1] + right[3], right[0]:right[0] + right[2]]
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
cap.release()
cv2.destroyAllWindows()

OpenCV assertion failed with negative values

I am making a Face Liveliness detection program with OpenCV and Keras.
I am stuck with this error:
OpenCV assertion failed with negative values
I tried all the suggested answers but none of them solved my issue.
I have already tried all the solutions which are present on StackOverflow and Github issues but none of them worked out in my case.
video_capture = cv2.VideoCapture(0)
model = load_model()
print("[LOG] COLLECTING images.....")
images = []
for direc, _, files in tqdm(os.walk(dataset)):
for file in files:
if file.endswith("jpg"):
images.append(os.path.join(direc, file))
return model, face_detector, open_eyes_detector, left_eye__detector, right_eye_detector, video_capture, images
def process_and_encode(images):
known_encodings = []
known_names = []
print("[LOG] Encoding faces....")
for image_path in tqdm(images):
image = cv2.imread(image_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
boxes = face_recognition.face_locations(image, model='hog')
encoding = face_recognition.face_encodings(image, boxes)
name = image_path.split(os.path.sep)[-2]
if len(encoding) > 0:
known_encodings.append(encoding[0])
known_names.append(name)
return {"encodings": known_encodings, "names": known_names}
def detect_and_display(model, video_capture, face_detector, open_eyes_detector, left_eye_detector, right_eye_detector,
data, eyes_detected):
frame = video_capture.read()
try:
frame = cv2.resize(frame, (0, 0), fx=0.6, fy=0.6)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
faces = face_detector.detectMultiScale(
gray,
scaleFactor=1.2,
minNeighbors=5,
minSize=(50, 50),
flags=cv2.CASCADE_SCALE_IMAGE
)
for (x, y, w, h) in faces:
encoding = face_recognition.face_encodings(rgb, [(y, x + w, y + h, x)])
matches = face_recognition.compare_faces(data["encodings"], encoding)
name = "Unknown"
if True in matches:
matchedIdxs = [i for (i, b) in enumerate(matches) if b]
counts = {}
for i in matchedIdxs:
name = data["names"][i]
counts[name] = counts.get(name, 0) + 1
name = max(counts, key=counts.get)
face = frame[y:y + h, x:x + w]
gray_face = gray[y:y + h, x:x + w]
eyes = []
open_eyes_glasses = open_eyes_detector.detectMultiScale(
gray_face,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE
)
if len(open_eyes_glasses) == 2:
eyes_detected[name] += '1'
for (ex, ey, ew, eh) in open_eyes_glasses:
cv2.rectangle(face, (ex, ey), (ex + ew, ey + eh), (0, 255, 0), 2)
# otherwise try detecting eyes using left and right_eye_detector
# which can detect open and closed eyes
else:
# separate the face into left and right sides
left_face = frame[y:y + h, x + int(w / 2):x + w]
left_face_gray = gray[y:y + h, x + int(w / 2):x + w]
right_face = frame[y:y + h, x:x + int(w / 2)]
right_face_gray = gray[y:y + h, x:x + int(w / 2)]
# Detect the left eye
left_eye = left_eye_detector.detectMultiScale(
left_face_gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE
)
# Detect the right eye
right_eye = right_eye_detector.detectMultiScale(
right_face_gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE
)
eye_status = '1' # we suppose the eyes are open
# For each eye check wether the eye is closed.
# If one is closed we conclude the eyes are closed
for (ex, ey, ew, eh) in right_eye:
color = (0, 255, 0)
pred = predict(right_face[ey:ey + eh, ex:ex + ew], model)
if pred == 'closed':
eye_status = '0'
color = (0, 0, 255)
cv2.rectangle(right_face, (ex, ey), (ex + ew, ey + eh), color, 2)
for (ex, ey, ew, eh) in left_eye:
color = (0, 255, 0)
pred = predict(left_face[ey:ey + eh, ex:ex + ew], model)
if pred == 'closed':
eye_status = '0'
color = (0, 0, 255)
cv2.rectangle(left_face, (ex, ey), (ex + ew, ey + eh), color, 2)
eyes_detected[name] += eye_status
# Each time, we check if the person has blinked
# If yes, we display its name
if isBlinking(eyes_detected[name], 3):
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
# Display name
y = y - 15 if y - 15 > 15 else y + 15
cv2.putText(frame, name, (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 255, 0), 2)
return frame
except Exception as e:
print(str(e))
if __name__ == "__main__":
(model, face_detector, open_eyes_detector, left_eye_detector, right_eye_detector, video_capture, images) = init()
data = process_and_encode(images)
eyes_detected = defaultdict(str)
while True:
frame = detect_and_display(model, video_capture, face_detector, open_eyes_detector, left_eye_detector,
right_eye_detector, data, eyes_detected)
cv2.imshow("Face Liveness Detector", frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.destroyAllWindows()
video_capture.stop()
Error message:-
Expected cv::UMat for argument 'src'
Traceback (most recent call last):
File "C:/Users/Saksham Dubey/PycharmProjects/FacePay/FaceLive.py", line 190, in
cv2.imshow("Face Liveness Detector", frame)
cv2.error: OpenCV(4.1.0) C:\projects\opencv-python\opencv\modules\highgui\src\window.cpp:352: error: (-215:Assertion failed) size.width>0 && size.height>0 in function 'cv::imshow'
[ WARN:0] terminating async callback
That could probably be because you try to use imshow() but there have been no imwite() before. Not exactly a solution but a working example. Take a look:
import cv2 # pip install opencv-python
import datetime
from cv2.data import haarcascades as hc
import requests
cap = cv2.VideoCapture(0)
faceCascade = cv2.CascadeClassifier("%s/haarcascade_frontalface_default.xml" % hc)
eye_cascade = cv2.CascadeClassifier('%s/haarcascade_eye.xml' % hc)
profile_cascade = cv2.CascadeClassifier('%s/haarcascade_profileface.xml' % hc)
fullbody_cascade = cv2.CascadeClassifier('%s/haarcascade_fullbody.xml' % hc)
smile_cascade = cv2.CascadeClassifier('%s/haarcascade_smile.xml' % hc)
eyesglasses_cascade = cv2.CascadeClassifier('%s/haarcascade_eye_tree_eyeglasses.xml' % hc)
mouth_cascade = cv2.CascadeClassifier('%s/haarcascade_mcs_mouth.xml' % hc)
filename = 'output/'+datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
def recognizer():
while True:
ret, frame = cap.read()
profile_count = recognize_profile(frame)
face_count, eye_count = recognize_face(frame, True)
cv2.imwrite('%s.png' % filename, frame)
image = cv2.imread('%s.png' % filename)
cv2.imshow('image', image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
def recognize_profile(frame):
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
profiles = profile_cascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30)
# flags = cv2.CV_HAAR_SCALE_IMAGE
)
for (x, y, w, h) in profiles:
cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)
return len(profiles)
def recognize_face(frame, recognize_eyes=None):
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30)
# flags = cv2.CV_HAAR_SCALE_IMAGE
)
eyes = []
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
if recognize_eyes:
roi_gray = gray[y:y + h, x:x + w]
roi_color = frame[y:y + h, x:x + w]
eyes = eye_cascade.detectMultiScale(roi_gray)
# draw bounding boxes around detected features
for (ex, ey, ew, eh) in eyes:
eye_center = (ex + ew // 2, ey + eh // 2)
radius = int(round((ew + eh) * 0.25))
cv2.circle(roi_color, eye_center, radius, (0, 0, 255), 4)
return len(faces), len(eyes)
def snapshot():
while True:
# Capture frame-by-frame
ret, frame = cap.read()
recognize_profile(frame)
recognize_face(frame, True)
cv2.imwrite('%s.png' % filename, frame)
# if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
def live_video_recognizer():
frame_width = int(cap.get(3))
frame_height = int(cap.get(4))
out = cv2.VideoWriter('%s.avi' % filename, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), 10, (frame_width, frame_height))
while True:
ret, frame = cap.read()
recognize_profile(frame)
recognize_face(frame, True)
if ret is True:
out.write(frame)
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
cap.release()
out.release()
cv2.destroyAllWindows()
recognizer()
# snapshot()
# live_video_recognizer()

Categories