Overlay Image quality decreasing with every frame in Live Video Feed - python

I am trying to make a filter using OpenCV in which I am putting glasses over eyes in Live Video Capture
Feed. The problem I am facing is that the Video Feed starts off with good quality of overlaying glasses image but with each frame the image quality of glasses seems to decrease itself and the height of glasses seem to increase slowly frame by frame itself.
Here is my code:-
mport cv2
face_Cascade = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml")
eye_Cascade = cv2.CascadeClassifier(cv2.data.haarcascades + "frontalEyes35x16.xml")
nose_Cascade = cv2.CascadeClassifier(cv2.data.haarcascades + "Nose18x15.xml")
glasses = cv2.imread('glasses.png', -1)
mustache = cv2.imread('mustache.png',-1)
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
if ret == False:
continue
frame = cv2.cvtColor(frame , cv2.COLOR_BGR2BGRA) # so that we can use glasses and mustaches alpha value
# otherwise we get white box around them
faces = face_Cascade.detectMultiScale(gray_frame, 1.3, 5)
for (x,y,w,h) in faces:
#cv2.rectangle(frame, (x,y), (x+w, y+h), (255,255,255),3)
roi_gray = gray_frame[y:y+h , x:x+w]
roi_color = frame[y:y+h , x:x+w]
eyes = eye_Cascade.detectMultiScale(roi_gray, 1.3, 5)
for (ex,ey,ew,eh) in eyes:
#cv2.rectangle(roi_color, (ex,ey), (ex+ew, ey+eh), (0,255,0),3)
roi_eye_gray = roi_gray[ey:ey+eh, ex:ex+ew]
roi_eye_color = roi_color[ey:ey+eh, ex:ex+ew]
glasses = cv2.resize(glasses, (ew,eh), interpolation = cv2.INTER_AREA)
gw, gh, gc = glasses.shape
# We are going to iterate through every single pixel value in the glasses image and then we
# are going to replace it with roi_color
for i in range (0,gw):
for j in range(0,gh):
if glasses[i, j][3] != 0: # 3rd value [3] means alpha value there is 0 so we want it
#to be transparent and we dont need to change that pixel value in roi_color
roi_color[ey + i, ex+ j ] = glasses[i , j]
#nose = nose_Cascade.detectMultiScale(roi_gray, 1.3, 5)
#for (nx,ny,nw,nh) in nose:
#cv2.rectangle(roi_color, (nx,ny), (nx+nw, ny+nh), (255,0,0),3)
#roi_nose_gray = roi_gray[ny:ny+nh , nx:nx+nw]
#roi_nose_color = roi_color[ny:ny+nh , nx:nx+nw]
cv2.imshow("Video Frame",frame)
frame = cv2.cvtColor(frame , cv2.COLOR_BGRA2BGR)
# Wait for user Input s, then you will stop the loop
key_pressed = cv2.waitKey(1) & 0xFF # for converting waitkey(32 bit) into 8 bit
if key_pressed == ord('s'):
break
cap.release()
cv2.destroyAllWindows()

It is happening at this line:
glasses = cv2.resize(glasses, (ew,eh), interpolation = cv2.INTER_AREA)
because you keep resizing the glasses up and down in size at every iteration overwriting the original, so the same pair of glasses gets made bigger, then smaller, then bigger.
Instead, you should start from the original, high-quality glasses rather than from the resized glasses from the previous frame. So, outside the loop, change this line:
glasses = cv2.imread('glasses.png', -1)
to
origGlasses = cv2.imread('glasses.png', -1)
And inside the loop, change this line:
glasses = cv2.resize(glasses, (ew,eh), interpolation = cv2.INTER_AREA)
to:
glasses = cv2.resize(origGlasses, (ew,eh), interpolation = cv2.INTER_AREA)

Related

Face Recognition using python in fisheye camera

I wanted to create an application where it detects the face in fisheye camera but i have no idea how to compress it to fisheye camera but it detects the faces in the normal webcam i tired different ways like editing the points in the face and i couldn't even print the points in my face below are the source code
import face_recognition
import cv2
import numpy as np
import dlib
# This is a demo of running face recognition on live video from your webcam. It's a little more complicated than the
# other example, but it includes some basic performance tweaks to make things run a lot faster:
# 1. Process each video frame at 1/4 resolution (though still display it at full resolution)
# 2. Only detect faces in every other frame of video.
# PLEASE NOTE: This example requires OpenCV (the `cv2` library) to be installed only to read from your webcam.
# OpenCV is *not* required to use the face_recognition library. It's only required if you want to run this
# specific demo. If you have trouble installing it, try any of the other demos that don't require it instead.
# Get a reference to webcam #0 (the default one)
video_capture = cv2.VideoCapture(0)
# Load a sample picture and learn how to recognize it.
obama_image = face_recognition.load_image_file("obama.jpg")
obama_face_encoding = face_recognition.face_encodings(obama_image)[0]
# Load a second sample picture and learn how to recognize it.
biden_image = face_recognition.load_image_file("biden.jpg")
biden_face_encoding = face_recognition.face_encodings(biden_image)[0]
# Load a Third sample picture and learn how to recognize it.
Logesh_image = face_recognition.load_image_file("Upside Logesh.jpg")
Logesh_face_encoding = face_recognition.face_encodings(Logesh_image)[0]
# Create arrays of known face encodings and their names
known_face_encodings = [
obama_face_encoding,
biden_face_encoding,
Logesh_face_encoding
]
known_face_names = [
"Barack Obama",
"Joe Biden",
"Logesh"
]
# Initialize some variables
face_locations = []
face_encodings = []
face_names = []
face_position = []
process_this_frame = True
face_landmarks = []
while True:
# Grab a single frame of video
ret, frame = video_capture.read()
#Postion Frame
Direction_frame = cv2.resize(frame, (50, 50), fx=1.50, fy=1.50)
# Resize frame of video to 1/4 size for faster face recognition processing
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
# Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
rgb_small_frame = small_frame[:, :, ::1]
# Only process every other frame of video to save time
if process_this_frame:
# Find all the faces and face encodings in the current frame of video
face_position = face_recognition.face_landmarks(rgb_small_frame, face_locations)
face_position = face_recognition.face_landmarks(rgb_small_frame, face_locations)
face_locations = face_recognition.face_locations(rgb_small_frame)
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
face_landmarks = face_recognition.face_landmarks(rgb_small_frame, face_locations)
face_names = []
for face_encoding in face_encodings:
# See if the face is a match for the known face(s)
matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
name = "Unknown"
# # If a match was found in known_face_encodings, just use the first one.
# if True in matches:
# first_match_index = matches.index(True)
# name = known_face_names[first_match_index]
# Or instead, use the known face with the smallest distance to the new face
face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
name = known_face_names[best_match_index]
face_names.append(name)
process_this_frame = not process_this_frame
def rect_to_bb(rect):
# take a bounding predicted by dlib and convert it
# to the format (x, y, w, h) as we would normally do
# with OpenCV
x = rect.left()
y = rect.top()
w = rect.right() - x
h = rect.bottom() - y
# return a tuple of (x, y, w, h)
return x, y, w, h
# Display the results
for (top, right, bottom, left), name in zip(face_locations, face_names):
# Scale back up face locations since the frame we detected in was scaled to 1/4 size
top *= 4
right *= 4
bottom *= 4
left *= 4
# Draw a box around the face
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
# Draw a label with a name below the face
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 10, bottom - 6), font, 1.0, (255, 255, 255), 1)
# Movement of a person
if right < 448:
Right_Command = "You are in the right side"
cv2.putText(frame, Right_Command, (left - 100, bottom - 300), font, 1.0, (255, 255, 255), 1)
if left > 928:
Left_Command = "You are in the left side"
cv2.putText(frame, Left_Command, (left - 100, bottom - 300), font, 1.0, (255, 255, 255), 1)
# Display the resulting image
cv2.imshow('Video', frame)
# Hit 'q' on the keyboard to quit!
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap = cv2.VideoCapture(0)
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
while True:
_, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = detector(gray)
for face in faces:
x1 = face.left()
y1 = face.top()
x2 = face.right()
y2 = face.bottom()
#cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 3)
landmarks = predictor(gray, face)
for n in range(0, 68):
x = landmarks.part(n).x
y = landmarks.part(n).y
cv2.circle(frame, (x, y), 4, (255, 0, 0), -1)
cv2.imshow("Frame", frame)
key = cv2.waitKey(1)
if key == 27:
break
# Release handle to the webcam
video_capture.release()
cv2.destroyAllWindows()
You could remove the fisheye distortion and try face recognition afterwards, I don't know how fast that is though. You can do it in OpenCV, first you'll need to find out the camera's optical parameters with cv2.fisheye.calibrate() and then remove the distortion. This answer gives a brief tutorial.

Understanding how to deploy python code to pop up balloons

I'm a newbie in programming and I need to write code to detect balloon on the fixed background using numpy and openCV in live video and to return the centre of the object [balloon].
Sorry about the ignorance of the questions.
Since I'm new, I had troubles with thinking about the logic of doing it, I don't have the resources to "teach the machine" and creating cascade XML to detect balloons so I thought about 1 possible solution :
Using cv2.createBackgroundSubtractorMOG2() to detect motion with the same background and once there is some object [balloon], count all the white pixels in the live video and return the centre of it, with the right threshold amount of white pixels.
The problem is, I don't know how to get the value of the pixel from 0-255 to know if it's white or black and shows the video at the same time, I think that there is a much easier way that I couldn't find guides for it.
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
fgbg = cv2.createBackgroundSubtractorMOG2()
while(1):
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
fgmask = fgbg.apply(gray)
img_arr = np.array(fgmask)
cv2.imshow('frame',fgmask)
for i in fgmask:
for j in i:
print(fgmask)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
I'm getting fray video on the output and lots of values that I don't know how to understand them on the output.
I would use
changes = (fgmask>200).sum()
to compare all pixels with almost white value (>200) and count these pixels.
And then I can compare result with some value to treat it as move.
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
fgbg = cv2.createBackgroundSubtractorMOG2()
while True:
ret, frame = cap.read()
if frame is None:
break
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
fgmask = fgbg.apply(gray)
#changes = sum(sum(fgmask>200))
changes = (fgmask>200).sum()
is_moving = (changes > 10000)
print(changes, is_moving)
cv2.imshow('frame', fgmask)
k = cv2.waitKey(10) & 0xff
if k == 27:
break
cv2.destroyAllWindows()
cap.release()
print() needs some time to display text so printing all pixels (many times in loop) can slow down program. So I skip this. I don't have to know values of all pixels.
EDIT: Using answer in how to detect region of large # of white pixels using opencv? and add code which can find white regions and draw rectangle. Program opens two window - one with grayscale fgmask and other with RGB frame and they can be hidden one behind another. You have to move one window to see another.
EDIT: I added code which use cv2.contourArea(cnt) and (x,y,w,h) = cv2.boundingRect(cnt) to create list with items (area,x,y,w,h) for all counturs and then get max(items) to get contour with the biggest area. And then it use (x + w//2, y + h//2) as center for red circle.
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
fgbg = cv2.createBackgroundSubtractorMOG2()
while True:
ret, frame = cap.read()
if frame is None:
break
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
fgmask = fgbg.apply(gray)
#changes = sum(sum(fgmask>200))
changes = (fgmask>200).sum() #
is_moving = (changes > 10000)
print(changes, is_moving)
items = []
contours, hier = cv2.findContours(fgmask, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
area = cv2.contourArea(cnt)
if 200 < area:
(x,y,w,h) = cv2.boundingRect(cnt)
cv2.rectangle(fgmask, (x,y),(x+w,y+h),255, 2)
cv2.rectangle(frame, (x,y),(x+w,y+h),(0,255,0), 2)
items.append( (area, x, y, w, h) )
if items:
main_item = max(items)
area, x, y, w, h = main_item
if w > h:
r = w//2
else:
r = h//2
cv2.circle(frame, (x+w//2, y+h//2), r, (0,0,255), 2)
cv2.imshow('fgmask', fgmask)
cv2.imshow('frame', frame)
k = cv2.waitKey(10) & 0xff
if k == 27:
break
cv2.destroyAllWindows()
cap.release()

Plot values not updated in OpenCV's VideoCapture

I am trying to plot facial key points on the video frame using Open CV Video Capture. I am using a trained pytorch CNN model. Here is the code:
cap = cv.VideoCapture(0)
time.sleep(2.0)
while cap.isOpened():
ret, frame = cap.read()
gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
cv.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2)
face_gray = gray[y:y+h, x:x+w]
sample = cv.resize(face_gray, (96, 96))
sample = sample.astype('float32')/255
sample = np.asarray(sample).reshape(1,96,96)
sample = torch.from_numpy(sample).unsqueeze(0).to(device)
output = saved_model(sample)
output = output.view(-1, 2).detach()
output = (output * 48) + 48
output = output.cpu().numpy()
print(output)
for i in range(15):
cv.circle(frame, (output[i][0], output[i][1]), 1, (0, 0, 255), -1)
cv.imshow("Frame", frame)
key = cv.waitKey(1) & 0xFF
if key == ord('q'):
break
cap.release()
cv.destroyAllWindows()
Input dimension: torch.Tensor([1,1,96,96]), 1 grayscale image
Output dimension: torch.Tensor([15, 2]), (x,y) of 15 facial key points
When the face is detected (using Haar Cascade) in the Video Capture, the output values are the same due to which the key points plot does not change.
I don't see anything wrong with your code block. The only possibility of error that can happen is when you have a static face in the video frame and that is detected as the last face by the HaarCascade detector. By looking at your code block, it is apparent that you are trying to detect keypoints only one face per video frame. Try moving the sample = ..., output = ..., and for ...: block to render keypoints into the for ... iterator of faces.
The code block after suggested edits will look like this:
cap = cv.VideoCapture(0)
time.sleep(2.0)
while cap.isOpened():
ret, frame = cap.read()
gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
cv.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2)
face_gray = gray[y:y+h, x:x+w]
# Push this block into for iterator of faces
sample = cv.resize(face_gray, (96, 96))
sample = sample.astype('float32')/255
sample = np.asarray(sample).reshape(1,96,96)
sample = torch.from_numpy(sample).unsqueeze(0).to(device)
output = saved_model(sample)
output = output.view(-1, 2).detach()
output = (output * 48) + 48
output = output.cpu().numpy()
print(output)
for i in range(15):
cv.circle(frame, (output[i][0], output[i][1]), 1, (0, 0, 255), -1)
# End block
cv.imshow("Frame", frame)
key = cv.waitKey(1) & 0xFF
if key == ord('q'):
break
cap.release()
cv.destroyAllWindows()
That code isn't handling the case of len(faces) > 0 initially for some number of iterations, then len(faces) == 0 subsequently. Should that happen, face_gray will retain its prior value, and you'll be drawing onto a new frame based on a stale face_gray.

Drone Feedback: Object Detection / Color Detection? Is there way just to isolate detection on a specific object

I have been working on a code where an A.R Drone 2.0 will detect color and put a red dot in the middle of the image. I am using streaming for the drone. The goal is for the drone to detect a white gutter and fly straight over it from one point to the other. Essentially following a line. I noticed when I changed the BGR to 0, 0, 255, I get the entire gutter to be distinguished but it detects white spots as well. Is there to isolate my detection just to see the gutter. Maybe using shapes, once the gutter is detected, put a bounding box. And my finally question is how do I tell my drone to follow the red dot or maybe drawing a line. I looked at python-AR drone libraries but don't know how to apply it.This is my code.
import numpy as np
import cv2
# open the camera
cap = cv2.VideoCapture('tcp://192.168.1.1:5555')
def nothing(x):
pass
cv2.namedWindow('result')
# Starting with 100's to prevent error while masking
h,s,v = 100,100,100
# Creating track bar
cv2.createTrackbar('h', 'result',0,179,nothing)
cv2.createTrackbar('s', 'result',0,255,nothing)
cv2.createTrackbar('v', 'result',0,255,nothing)
while True:
#read the image from the camera
ret, frame = cap.read()
#You will need this later
frame = cv2.cvtColor(frame, 35)
#converting to HSV
hsv = cv2.cvtColor(frame,cv2.COLOR_BGR2HSV)
# get info from track bar and appy to result
h = cv2.getTrackbarPos('h','result')
s = cv2.getTrackbarPos('s','result')
v = cv2.getTrackbarPos('v','result')
# Normal masking algorithm
lower_blue = np.array([h,s,v])
upper_blue = np.array([180,255,255])
mask = cv2.inRange(hsv,lower_blue, upper_blue)
result = cv2.bitwise_and(frame,frame,mask = mask)
cv2.imshow('result',result)
#find center
cnts=cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)[-2]
center=None
if len(cnts)>0:
c=max(cnts, key=cv2.contourArea)
((x,y),radius)=cv2.minEnclosingCircle(c)
M=cv2.moments(c)
center=(int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
if radius>10:
#cv2.circle(frame, (int(x),int(y)), int(radius), 2)
cv2.circle(frame, center,5,(0,0,255),-1)
# color detection limits
lB = 5
lG = 50
lR = 50
hB = 15
hG = 255
hR = 255
lowerLimits = np.array([lB, lG, lR])
upperLimits = np.array([hB, hG, hR])
# Our operations on the frame come here
thresholded = cv2.inRange(frame, lowerLimits, upperLimits)
outimage = cv2.bitwise_and(frame, frame, mask = thresholded)
cv2.imshow('original', frame)
# Display the resulting frame
cv2.imshow('processed',outimage)
# Quit the program when Q is pressed
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
print 'closing program'
cap.release()
cv2.destroyAllWindows()

Find if drawn rectangle is visible on screen in opencv python

I have some code which will draw a rectangle using cv2.rectangle function based on other parameters. This is part of a face detection program. The rectangle is shown on screen within an image using imshow. However, due to the variable nature of the rectangle's dimensions, there are situations when the rectangle is not visible. After the line of code which draws the rectangle in the image, how do I detect if the rectangle is visible or not. I want this so that I can find when a face is not detected. This is my code:
import numpy as np
import cv2
# multiple cascades: https://github.com/Itseez/opencv/tree/master/data/haarcascades
# https://github.com/Itseez/opencv/blob/master/data/haarcascades/haarcascade_frontalface_default.xml
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
# https://github.com/Itseez/opencv/blob/master/data/haarcascades/haarcascade_eye.xml
eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
cap = cv2.VideoCapture(0)
while 1:
ret, img = cap.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
cv2.rectangle(img,(x + (w / 4), y + (h / 4)),(x+(3 * w / 4),y + (3 * h / 4)),(255,0,0),2)
cv2.imshow('img',img)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
Thanks!
Add print len(faces) in the while loop. If its zero, then no face detected. No rectangle.

Categories