Alpha blending using opencv with a black png on python - python

I'm trying to create a aplication with opencv that overlaps a glass on me face. However, when the video appears the glasses have a black on the alpha layer. Here is my code:
video_capture = cv2.VideoCapture(0)
anterior = 0
glasses = cv2.imread('Glasses_1.png')
def put_glasses(glasses,fc,x,y,w,h):
face_width = w
face_height = h
glasses_width = int(face_width)
glasses_height = int(face_height*0.32857)
glasses = cv2.resize(glasses,(glasses_width,glasses_height))
for i in range(glasses_height):
for j in range(glasses_width):
for k in range(3):
if glasses[i][j][k]<235:
fc[y+i-int(-0.25*face_height)-1][x+j][k] = glasses[i][j][k]
return fc
while True:
if not video_capture.isOpened():
print('Unable to load camera.')
sleep(5)
pass
ret, frame = video_capture.read()
if ret is True:
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
else:
continue
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(40,40)
)
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
cv2.putText(frame,"Person Detected",(x,y),cv2.FONT_HERSHEY_SIMPLEX,1,(0,0,255),2)
frame = put_glasses(glasses, frame, x, y, w, h)
I will be very grateful if anyone could help.

You read the png in bgr format, not the bgra or unchanged format. Then I don't think your glass image in the program is shape of (h,w,4). You should read with flag cv2.IMREAD_UNCHANGED.
glasses = cv2.imread("xxx.png", cv2.IMREAD_UNCHANGED)
Maybe this link will help. How do I clear a white background in OpenCV with c++?
The bgra worm:
Blending:

Related

Problem with opencv (cv2) detecting motion in my code

I have written a code to detect motion through a webcam but whenever I start the code, it works but it detects the entire full screen instead of objects moving in the screen. I noticed that if I close the camera it seems to remove the detection; Here is the code:
import cv2
first_frame = None
video = cv2.VideoCapture(0)
while True:
check, frame = video.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (21,21),0)
if first_frame is None:
first_frame=gray
continue
delta_frame = cv2.absdiff(first_frame,gray)
thresh_frame = cv2.threshold(delta_frame, 30, 255, cv2.THRESH_BINARY)[1]
thresh_frame = cv2.dilate(thresh_frame,None, iterations=2)
(cnts,_) = cv2.findContours(thresh_frame.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for contour in cnts:
if cv2.contourArea(contour) < 4000:
continue
(x,y, w, h)= cv2.boundingRect(contour)
cv2.rectangle(frame, (x, y), (x + w, y + h),(0,225,0),3)
cv2.imshow("Delta Frame",delta_frame)
cv2.imshow("Capturing",gray)
cv2.imshow("Threshold Frame",thresh_frame)
cv2.imshow("Color Frame",frame)
key = cv2.waitKey(5)
if key==ord('q'):
break
video.release()
cv2.destroyAllWindows()
The code should be able to detect only moving object in the screen.
I had similar issue a while back when I started with opencv; the problem with the program is the first frame, the first frame captured a dark screen. The difference detected by cv2.absdiff of the first frame(first_frame) vs the next frames(gray) was big enough that the cv2.findContours was indicated on the whole screen. This may be caused by camera delay
It can be solved by incorporating a slight delay between when the camera load to when the program records the first frame(first_frame) with time.sleep(). Try this:
import cv2, time
first_frame = None
video = cv2.VideoCapture(0)
# the camera has some lag time hence the starting of video.read outside loop and sleep
video.read()
time.sleep(2)
while True:
check, frame = video.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (21,21),0)
if first_frame is None:
first_frame=gray
continue
delta_frame = cv2.absdiff(first_frame,gray)
thresh_frame = cv2.threshold(delta_frame, 30, 255, cv2.THRESH_BINARY)[1]
thresh_frame = cv2.dilate(thresh_frame,None, iterations=2)
(cnts,_) = cv2.findContours(thresh_frame.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for contour in cnts:
if cv2.contourArea(contour) < 4000:
continue
(x,y, w, h)= cv2.boundingRect(contour)
cv2.rectangle(frame, (x, y), (x + w, y + h),(0,225,0),3)
cv2.imshow("Delta Frame",delta_frame)
cv2.imshow("Capturing",gray)
cv2.imshow("Threshold Frame",thresh_frame)
cv2.imshow("Color Frame",frame)
key = cv2.waitKey(5)
if key==ord('q'):
break
video.release()
cv2.destroyAllWindows()
time.sleep takes care of the lag time of the camera starting.
Try the code, it worked here

Webcam window alwasy crash while trying to detect a face

I'm trying to make a simple haar cascade program to detect a face.
faceCascade = cv2.CascadeClassifier('D:\\Python\\Python37\\Lib\\site-packages\\cv2\\data\\haarcascade_frontalcatface.xml')
body_cascade = cv2.CascadeClassifier('haarcascade_upperbody.xml')
video_capture = cv2.VideoCapture(0)
img_counter = 0
while True:
# Capture frame-by-frame
ret, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.5,
minNeighbors=5,
minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE
)
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.imshow('FaceDetection', frame)
k = input()
# ESC Pressed
if k % 256 == 27:
break
video_capture.release()
cv2.destroyAllWindows()
But every time I launch it, my webcam window just froze and crash :(
My PC is powerful enough for sure, why could it happen?
I have made few changes to your code. For human face use haarcascade_frontalface_default.xml and for the cat face use haarcascade_frontalcatface.xml. Try the code below and it will work like a charm :)
import cv2
#faceCascade = cv2.CascadeClassifier('D:\\Python\\Python37\\Lib\\site-packages\\cv2\\data\\haarcascade_frontalcatface.xml')
faceCascade = cv2.CascadeClassifier('D:\\Python\\Python37\\Lib\\site-packages\\cv2\\data\\haarcascade_frontalface_default.xml')
body_cascade = cv2.CascadeClassifier('haarcascade_upperbody.xml')
video_capture = cv2.VideoCapture(0)
img_counter = 0
while True:
# Capture frame-by-frame
ret, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.5,
minNeighbors=5,
minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE
)
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.imshow('FaceDetection', frame)
#k = input()
k = cv2.waitKey(1) & 0xFF
if k == 27:
break
# ESC Pressed
# if k % 256 == 27:
# break
video_capture.release()
cv2.destroyAllWindows()

Cropping live video input from a webcam for facial identification

I am working with OpenCV in Python for facial identification and I want to crop the live video from my webcam to just output the face it recognizes.
I have tried using ROI but I do not know how to correctly implement it.
import cv2
import sys
cascPath = "haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml")
video_capture = cv2.VideoCapture(0)
while True:
# Capture frame-by-frame
ret, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE
)
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x,y), (x+w, y+h), (0, 255, 0), 2)
roi = frame[y:y+h, x:x+w]
cropped = frame[roi]
# Display the resulting frame
cv2.imshow('Face', cropped)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
.
Traceback (most recent call last):
File "C:/Users/Ben/Desktop/facerecog/facerecog2.py", line 31, in <module>
cv2.imshow('Face', cropped)
cv2.error: OpenCV(4.1.1) C:\projects\opencv-python\opencv\modules\core\src\array.cpp:2492: error: (-206:Bad flag (parameter or structure field)) Unrecognized or unsupported array type in function 'cvGetMat'
You get cropped image with
cropped = frame[y:y+h, x:x+w]
and then you can display it.
But sometimes there is no face on frame and it will not create cropped and you can get error. Better create this variabel before for and check it after for
cropped = None
for (x, y, w, h) in faces:
cropped = frame[y:y+h, x:x+w]
if cropped is not None:
cv2.imshow('Face', cropped)
#else:
# cv2.imshow('Face', frame)
or
if faces:
(x, y, w, h) = faces[0]
cropped = frame[y:y+h, x:x+w]
cv2.imshow('Face', cropped)
I don't know what you want to do if there will be many faces on frame.

OpenCV - Function to paint text to the Canvas

I am detecting a face from the camera and draw a rectangle around it.
Code below:
face_cascade = cv2.CascadeClassifier('face_classifier.xml')
def detect(gray, frame):
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in stops: # For each detected face:
cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 0, 0), 2)
return frame
video_capture = cv2.VideoCapture(0)
while True:
_, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
canvas = detect(gray, frame)
cv2.imshow('Video', canvas)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
Are there any functions in OpenCV that can print text?
When it detects a face on the screen say "Hi"?
Thank you for your time.
Should have done this first before asking the question but after some Googling and trying out a few things, here is the solution:
face_cascade = cv2.CascadeClassifier('face_classifier.xml')
def detect(gray, frame):
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in stops: # For each detected face:
cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 0, 0), 2)
font = cv2.FONT_HERSHEY_COMPLEX_SMALL
cv2.putText(frame,'Hello There!',(x+w, h),font, 0.8,(255,0,0),2,cv2.LINE_AA)
return frame
video_capture = cv2.VideoCapture(0)
while True:
_, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
canvas = detect(gray, frame)
cv2.imshow('Video', canvas)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
This will add "Hello There!" to the top right corner of the detection frame rectangle.

Haar- Cascade face detection OpenCv

I used the following code to detect a face using Haar cascade classifiers provided by OpenCv Python. But the faces are not detected and the square around the face is not drawn. How to solve this?
import cv2
index=raw_input("Enter the index No. : ")
cascPath = "haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(cascPath)
cap = cv2.VideoCapture(0)
cont=0
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
# Our operations on the frame come here
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=10,
minSize=(30, 30),
flags = cv2.cv.CV_HAAR_SCALE_IMAGE
)
for (x, y, w, h) in faces:
#cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)
# Display the resulting frame
cv2.imshow('frame',frame)
inpt=cv2.waitKey(1)
if inpt & 0xFF == ord('q'):
break
elif inpt & 0xFF == ord('s') :
#name='G:\XCODRA\Integrated_v_01\EigenFaceRecognizer\img2'+index+"."+(str(cont))+".png"
name='IC_image\\'+index+"."+(str(cont))+".png"
resized = cv2.resize(gray,None,fx=200, fy=200, interpolation = cv2.INTER_AREA)
img=cv2.equalizeHist(resized)
cv2.imwrite(name,img)
print cont
cont+=1
Use the full path for the classifier.

Categories