Remove background with opencv python - python

Here is my solution for getting a binary image:
import cv2
import numpy as np
img = cv2.imread('crop.png')
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray,(5,5),0)
ok,thresh = cv2.threshold(blur,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
cv2.imshow('image',img)
cv2.imshow('threshold',thresh)
k = cv2.waitKey(0) & 0xff
if k == ord('q'):
cv2.destroyAllWindows()
Below is the result I get. How can I remove background from hand?
original image
threshold image

You can use color detection to get a mask for hand region. If you want to do background subtraction on video, then that can be achieved by storing the background and subtracting the upcoming frames from the background.
import cv2
cap=cv2.VideoCapture(1)
j=0
while 1:
ret,frame=cap.read()
if(j==0):
bg=frame.copy().astype("float")
if(j<30):
cv2.accumulateWeighted(frame,bg,0.5)
j=j+1
diff=cv2.absdiff(frame,bg.astype("uint8"))
diff=cv2.cvtColor(diff,cv2.COLOR_BGR2GRAY)
thre,diff=cv2.threshold(diff,25,255,cv2.THRESH_BINARY)
cv2.imshow("j",diff)
if(cv2.waitKey(1) & 0XFF==ord('q')):
break
cap.release()
cv2.destroyAllWindows()

Related

Segment black AND moving Pixels

I’m trying to segment the moving propeller of this Video. My approach is, to detect all black and moving pixels to separate the propeller from the rest.
Here is what I tried so far:
import numpy as np
import cv2
x,y,h,w = 350,100,420,500 # Croping values
cap = cv2.VideoCapture('Video Path')
while(1):
_, frame = cap.read()
frame = frame[y:y+h, x:x+w] # Crop Video
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
lower_black = np.array([0,0,0])
upper_black = np.array([360,255,90])
mask = cv2.inRange(hsv, lower_black, upper_black)
res = cv2.bitwise_and(frame,frame, mask= mask)
nz = np.argwhere(mask)
cv2.imshow('Original',frame)
cv2.imshow('Propeller Segmentation',mask)
k = cv2.waitKey(30) & 0xff # press esc to exit
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
Screenshot form the Video
Result of the Segmentation
With function cv.createBackgroundSubtractorMOG2()
I think you should have a look at background subtraction. It should be the right approach for your problem.
OpenCV provides a good tutorial on this: Link

How to detect the one color in the image?

How to detect (True/False) if the frame contains at least one for example rgb(213, 39, 27) pixel?
I want to process frame only when the detection is true.
import cv2
VIDEO_URL = ''
cap = cv2.VideoCapture(VIDEO_URL)
fps = cap.get(cv2.CAP_PROP_FPS)
wait_ms = int(1000/fps)
while True:
ret, frame = cap.read()
img = cv2.cvtColor(frame, cv2.IMREAD_COLOR)[815:970, 360:1920]
#image processing here
if cv2.waitKey(wait_ms) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
You can use hsv images to detect any color.
Eg-Let You want to identify blue color in your image -
Include this code inside your while loop.
hsvimg = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
lb=np.array([94,80,2])
ub=np.array([126,255,255])
mask = cv2.inRange(hsvimg, lb, ub)
if 255 in mask:
print("Blue color present")
This code determines the blue color. You can find any other color by changing the HSV range.

CV2: Is there a function that outputs the number of white dots in a polygon?

I do a "Background Subtraction" with a VideoStream. Then I want to check inside the interior of a specified polygon, if there are white dots.
I thought about using https://docs.opencv.org/2.4/doc/tutorials/imgproc/shapedescriptors/point_polygon_test/point_polygon_test.html but I don't know how to do it, because the white points are existing after applying the filter. The original stream contains also white points which I also dont't want to count.
import numpy as np
import cv2
import time
cap = cv2.VideoCapture()
cap.open("rtsp://LOGINNAME:PASSWORD#192.168.178.42:554")
#cap.open("C:\\Users\\001\\Desktop\\cam-20191025-220508-220530.mp4")
fgbg = cv2.bgsegm.createBackgroundSubtractorMOG()
while(1):
ret, frame = cap.read()
fgmask = fgbg.apply(frame)
polygonInnenAutoErkennen_cnt = np.array( [(24, 719), (714,414), (1005,429),(1084,719)] )
cv2.drawContours(fgmask,[polygonInnenAutoErkennen_cnt],-1,(255,128,60))
#How can I check here?
cv2.imshow('frame',fgmask)
k = cv2.waitKey(30) & 0xff
if k == 27: # exit on ESC
break
cap.release()
cv2.destroyAllWindows()
The simplest way is to use mask image. Plot your polygon on binary image, and use it as mask for your white dots. You can just do per-pixel multiplication or logical AND.

How to detect only the left eye in a video with opencv?

I want to detect only the left eye in a video streaming, but I can't. When I run this code, it detects two eyes (right and left) and sometimes it also detects the mouth or the nose like it is an eye.
Follow the code below:
import cv2
import numpy as np
faceCascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
#eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
eye_cascade = cv2.CascadeClassifier('haarcascade_lefteye_2splits.xml')
video_capture = cv2.VideoCapture(0)
while True:
ret, img = video_capture.read()
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray_img,
scaleFactor=1.98,
minNeighbors=8,
minSize=(80, 80),
flags=cv2.cv.CV_HAAR_SCALE_IMAGE
)
#print 'faces: ', faces
for (p,q,r,s) in faces:
#cv2.rectangle(img,(p,q),(p+r,q+s),(255,0,0),3)
face_gray = gray_img[q:q+s, p:p+r]
face_color = img[q:q+s, p:p+r]
eyes = eye_cascade.detectMultiScale(face_gray)
for (ep,eq,er,es) in eyes:
cv2.rectangle(face_color,(ep,eq),(ep+er,eq+es), (0,255,0),3)
rimg = cv2.flip(img, 1) # invert the object img
cv2.imshow("Video", rimg)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows()
So how should I do to detect only the left eye?
PS: I'm using Python 2.7.13, NumPy 1.10.0 and OpenCV 2.4.13.3 in this program.
You can use a simple for loop to iterate through the eyes returned by eye-cascade and find the index of the eye with minimum x-coordinate. That will give you the index of the left-most eye.
def getleftmosteye(eyes):
leftmost=9999999
leftmostindex=-1
for i in range(0,2):
if eyes[i][0]<leftmost:
leftmost=eyes[i][0]
leftmostindex=i
return eyes[leftmostindex]
Now you can get coordinates of the left eye once get its index.

Python - OpenCv - Set window resolution when open a video file

I want to display 3 videos at the same time like this:
the way i am opening 1 video right now in fullscreen is as follows:
import numpy as np
import cv2
cap = cv2.VideoCapture('C:\Users\NachoM\Videos\VTS_01_1.mp4')
while(cap.isOpened()):
ret, frame = cap.read()
cv2.namedWindow("window", cv2.WND_PROP_FULLSCREEN)
cv2.setWindowProperty("window", cv2.WND_PROP_FULLSCREEN,
cv2.WINDOW_FULLSCREEN)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
cv2.imshow('window',gray)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
Is there any other parameter i have to pass to cv2 to position the window like in the picture above?
Thank you
One way of doing this is using:
cv2.moveWindow("WindowName", x, y)
More details can be found at (assuming OpenCV 2.4 is used): http://docs.opencv.org/2.4/modules/highgui/doc/user_interface.html#movewindow

Categories