Background Subtractor not working in OpenCV Python , what to do? - python

I am learning openCV with python with reference to this article and I tried the same code as they have given but even the first phase of background removal is not working.
cap = cv2.VideoCapture(1)
while True:
ret, frame = cap.read()
frame = cv2.flip(frame, 1) # Horizontal Flip
cv2.imshow('original', frame)
# Background Removal
bgSubtractor = cv2.createBackgroundSubtractorMOG2(
history=10, varThreshold=30, detectShadows=False)
fgmask = bgSubtractor.apply(frame)
kernel = np.ones((5, 5), np.uint8)
# The effect is to remove the noise in the background
fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel, iterations=2)
# To close the holes in the objects
fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_CLOSE, kernel, iterations=2)
img = cv2.bitwise_and(frame, frame, mask=fgmask)
cv2.imshow('image after bitwise_fgmask', img)
cv2.imshow('fgmask', fgmask)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
output for fgmask :
output for img is same as the original frame.
what's wrong in this and what to do ?

You have to move bgSubtractor out of the while loop. Otherwise you will be recreating it every frame:
import cv2
import numpy as np
cap = cv2.VideoCapture(0)
# Background Removal
bgSubtractor = cv2.createBackgroundSubtractorMOG2(
history=10, varThreshold=30, detectShadows=False)
while True:
ret, frame = cap.read()
frame = cv2.flip(frame, 1) # Horizontal Flip
cv2.imshow('original', frame)
fgmask = bgSubtractor.apply(frame)
kernel = np.ones((5, 5), np.uint8)
# The effect is to remove the noise in the background
fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel, iterations=2)
# To close the holes in the objects
fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_CLOSE, kernel, iterations=2)
img = cv2.bitwise_and(frame, frame, mask=fgmask)
cv2.imshow('image after bitwise_fgmask', img)
cv2.imshow('fgmask', fgmask)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()

Related

Opencv Watermark Transparency and Brightness

I have a logo and I am trying to put this logo into my video. When I add the logo with below codes, logo couldnt be original color, it shows transparent. But I dont want to transparency I need to put original color format.
I get this output:
This is my code:
img_path = 'ap_logo.png'
logo = cv2.imread(img_path,cv2.IMREAD_UNCHANGED)
#
#
watermark = image_resize(logo, height=300)
watermark = cv2.cvtColor(watermark, cv2.COLOR_BGR2BGRA)
watermark_h, watermark_w, watermark_c = watermark.shape
ret, frame = cap.read()
frame = cv2.flip(frame, 1)
frame = cv2.rotate(frame, cv2.ROTATE_90_CLOCKWISE)
frame_h, frame_w, frame_c = frame.shape
# # overlay with 4 channel BGR and Alpha
overlay = np.zeros((frame_h, frame_w, 4), dtype='uint8')
for i in range(0, watermark_h):
print(i)
for j in range(0, watermark_w):
if watermark[i, j][3] != 0:
h_offset = frame_h - watermark_h
w_offset = frame_w - watermark_w
overlay[h_offset + i, w_offset + j] = watermark[i, j]
while(True):
ret, frame = cap.read()
frame = cv2.flip(frame, 1)
frame = cv2.rotate(frame, cv2.ROTATE_90_CLOCKWISE)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2BGRA)
cv2.addWeighted(overlay, 0.25, frame, 1.0, 0, frame)
# Display the resulting frame
frame = cv2.cvtColor(frame, cv2.COLOR_BGRA2BGR)
if ret:
cv2.imshow('Frame', frame)
out.write(frame) # file a ilgili frame yazılıyor
if cv2.waitKey(1) & 0xFF == ord('q'):
break
#When everything done, relase the capture
cap.release()
out.release() # saved
cv2.destroyAllWindows()

How to count the number of blue items in video in python?

This is an example of what I will be seeing, I am trying to count the number of items that are blue in the video. In this example it would be 2, my shirt and the phone. How would I go about doing this?
Here is my code
import numpy as np import cv2
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
width = int(cap.get(3))
height = int(cap.get(4))
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
lower_blue = np.array([90, 50, 50])
upper_blue = np.array([130, 255, 255])
mask = cv2.inRange(hsv, lower_blue, upper_blue)
result = cv2.bitwise_and(frame, frame, mask=mask)
cv2.imshow('frame', result)
cv2.imshow('mask', mask)
if cv2.waitKey(1) == ord('q'):
break
cap.release() cv2.destroyAllWindows()

Saving Video in OpenCV with filters

I am trying to save a video using open CV, the idea is to alternate the video frames from colored to gray scale for few seconds. When I do the saving, the video saves the colored frames only.
import cv2
def saving(cap):
width=cap.get(cv2.CAP_PROP_FRAME_WIDTH)
height=cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
fps= cap.get(cv2.CAP_PROP_FPS)
fourcc=cv2.VideoWriter_fourcc(*'XVID')
out=cv2.VideoWriter('Project11.mp4',fourcc,fps,(int(width),int(height)))
return (out)
def first4seconds(video):
c=saving(video)
while(video.isOpened()):
print(video.get(cv2.CAP_PROP_POS_MSEC))
ret, frame = video.read()
if ret==True:
if cv2.waitKey(25) & 0xFF == ord('q'):
break
else:
break
if(500<=int(video.get(cv2.CAP_PROP_POS_MSEC))<1000 or 2000<=int(video.get(cv2.CAP_PROP_POS_MSEC))<3000):
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
c.write(gray)
cv2.imshow('frame',gray)
else:
c.write(frame)
cv2.imshow('frame',frame)
video.release()
cv2.destroyAllWindows()
cap = cv2.VideoCapture('test.mp4')
first4seconds(cap)
if(500<=int(video.get(cv2.CAP_PROP_POS_MSEC))<1000 or 2000<=int(video.get(cv2.CAP_PROP_POS_MSEC))<3000):
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
**gray_2=cv2.cvtColor(gray,cv2.COLOR_GRAY2BGR)**
**cv2.imshow('frame',gray_2)**
**c.write(gray_2)**
else:
c.write(frame)
cv2.imshow('frame',frame)

Separation of bounding boxes

In this problem we are trying to detect persons in a WEBCAM video in REAL TIME. The code is working fine for 1 person but when more than one person is entering then the code is failing miserably. Here is the code :-
import cv2
import numpy as np
cap = cv2.VideoCapture(0)
kernel = np.ones((5,5), np.uint8)
background = None
while True:
ret,frame = cap.read()
gray = frame.copy()
gray = cv2.cvtColor(gray, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (11,11), 0)
if background is None:
background = gray
continue
delta = cv2.absdiff(background, gray)
thresh = cv2.threshold(delta, 5, 255,
cv2.THRESH_BINARY+cv2.THRESH_OTSU)[1]
thresh = cv2.dilate(thresh, kernel, iterations=2)
_,contours,hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
if(len(contours)==0):
continue
#areas = [cv2.contourArea(c) for c in contours]
#max_index = np.argmax(areas)
#cnt=contours[max_index]
#(x,y,w,h) = cv2.boundingRect(cnt)
#if(1.0*(w*h)/(640*480)<0.75):
#cv2.rectangle(frame, (x,y), (x+w,y+h), (0,0,255), 3)
#print("Area: ",w*h)
for i in range(len(contours)):
(x,y,w,h) = cv2.boundingRect(contours[i])
if(w*h<=90000):
cv2.rectangle(frame, (x,y), (x+w,y+h), (0,0,255), 5)
#cv2.imshow('thresh', thresh)
cv2.imshow('frame', frame)
if cv2.waitKey(1)==27:
break
cap.release()
cv2.destroyAllWindows()
I think the problem is that the code is not able to separate the different contours of the different persons detected but I may not be the only reason. Can someone help me?

Storing images in list

I've been trying to store cropped images from as single frame to a list but i keep getting errors. Here's a snippet of my code
import cv2
import numpy as np
cap = cv2.VideoCapture('/home/wael-karkoub/Desktop/Research/Videos/test.mp4')
ret = True
cropped_images = []
while ret == True:
ret, frame = cap.read()
height, width, channels = frame.shape
gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray,(5,5),0)
_ ,thresh = cv2.threshold(blur[0:height//2],10,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
im2, contours, hierarchy = cv2.findContours(thresh,1,2)
for cnt in contours:
x,y,w,h = cv2.boundingRect(cnt)
cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),2)
image = frame[y:y+h, x:x+w]
cv2.imshow('test',image)
time.sleep(2)
# cv2.imshow('Contours',frame)
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
cap.release()
cv2.destroyAllWindows()
Is this the correct way of doing it?
Output = enter image description here
If you want to store your cropped images to a list, you need some assignment going on. Just add cropped_images.append(image) somewhere in your code, most likely after image = frame[y:y+h, x:x+w]

Categories