I'm getting error with morphological mask - python

I wrote a program that find 2 ROIs, selects them in 2 separate frames and then counts number of green pixels of each frame.
It works fine, but when I apply morphology masks for camera it gives me error: /home/pi/opencv/opencv-3.4.0/modules/core/src/arithm.cpp:1769: error: (-209) The lower boundary is neither an array of the same size and same type as src, nor a scalar in function inRange
How can I fix this?
Problem occurs here
maskClose=cv2.morphologyEx(maskOpen,cv2.MORPH_CLOSE,kernelClose)
maskFinal=maskClose ... for i in range(len(conts)):
x,y,w,h=cv2.boundingRect(conts[i]) area=maskFinal[y:y+h, x:x+w] pixcount =
cv2.inRange(area,lowerBound,upperBound ) pixNum = cv2.countNonZero(pixcount)
Full code
import cv2
import numpy as np
from picamera.array import PiRGBArray
from picamera import PiCamera
import time
import sys
prevNrOfContours = 0
lowerBound=np.array([43,53,30])
upperBound=np.array([132,255,255])
camera = PiCamera()
camera.rotation = 180
camera.resolution = (640, 480)
camera.framerate = 30
font=cv2.FONT_HERSHEY_SIMPLEX
rawCapture = PiRGBArray(camera, size=(640, 480))
GREEN_MIN = np.array([0, 0, 0])
GREEN_MAX = np.array([0, 0, 0])
# allow the camera to warmup
time.sleep(0.1)
kernelOpen=np.ones((5,5))
kernelClose=np.ones((20,20))
# capture frames from the camera
for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
img = frame.array
imgHSV= cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
mask=cv2.inRange(imgHSV,lowerBound,upperBound)
#morphology
maskOpen=cv2.morphologyEx(mask,cv2.MORPH_OPEN,kernelOpen)
maskClose=cv2.morphologyEx(maskOpen,cv2.MORPH_CLOSE,kernelClose)
maskFinal=maskClose
_, conts, _=cv2.findContours(maskFinal.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
nrOfContours = len(conts)
for i in range(len(conts)):
x,y,w,h=cv2.boundingRect(conts[i])
area=maskFinal[y:y+h, x:x+w] ######## problem is here
pixcount = cv2.inRange(area,lowerBound,upperBound )
pixNum = cv2.countNonZero(pixcount)
print("Area No."+str(i), "Green pixels = " + str(no_black))
cv2.rectangle(img,(x,y),(x+w,y+h),(0,0,255), 2)
cv2.imshow("area" + str(i), area)
# close unnecessary windows
if prevNrOfContours > nrOfContours:
for i in range(nrOfContours, prevNrOfContours):
cv2.destroyWindow("area" + str(i))
prevNrOfContours = nrOfContours
cv2.imshow("cam",img)
# clear the stream in preparation for the next frame
key = cv2.waitKey(1) & 0xFF
rawCapture.truncate(0)
if key == ord("q"):
break

You're creating a subimage of maskFinal. maskFinal is a binary image, which is incompatible with the 3 channel lowerBound.
To solve it you can actually drop the line pixcount = cv2.inRange(area,lowerBound,upperBound ) The masked image has white for the green area's, so counting the nonzeros is enough.
Note: the current subimage includes the morphological transformations. If you do not want to count the pixels caused by those, you'll have to create a subimage of mask

Related

Change VideoCapture size based on loop value

So I have this code below and I was expecting it to change the width size of the frame based on loop i value from 1 to 1000 in live time, to put that in visualization, when I executed the code I was expecting it to changes the windows width size while it was running.
import cv2
import numpy as np
vid = cv2.VideoCapture("C:\\users\\USER\\downloads\\man.mp4")
while True:
ret,frame = vid.read()
for i in range(1,1000):
frame = cv2.resize(frame,(i,450))
size = 16
# Create motion blur kernel
kernel_motion_blur = np.zeros((size,size))
kernel_motion_blur[int((size-1)/2), :] = np.ones(size)
kernel_motion_blur = kernel_motion_blur / size
# Apply motion kernel motion blur
result = cv2.filter2D(frame, -1, kernel_motion_blur)
cv2.imshow('Motion Blur Applied',result)
cv2.imshow('Original',frame)
if cv2.waitKey(1) == ord('q'):
break
cv2.destroyAllWindows()
vid.release()
Its works but the problem is when I executed the code the video frame gets glitchy and the video frame doesn't display the correct image of what contains in the actual video. So how do I fix that?
The video before and after
I'm not sure I totally understand the question, or why you have a for loop inside of a while loop, but I think I was able to achieve the effect you are going for. Take a look:
import cv2
import numpy as np
# control how fast the image slides left to right
step_size = 10
vid = cv2.VideoCapture("videos\demo.mp4")
while True:
ret,frame = vid.read()
if ret == False:
break
width = frame.shape[1]
for n in range(0, width, step_size):
frame_to_show = frame[:,:n+step_size]
size = 16
# Create motion blur kernel
kernel_motion_blur = np.zeros((size,size))
kernel_motion_blur[int((size-1)/2), :] = np.ones(size)
kernel_motion_blur = kernel_motion_blur / size
# Apply motion kernel motion blur
result = cv2.filter2D(frame_to_show, -1, kernel_motion_blur)
cv2.imshow('Motion Blur Applied',result)
cv2.imshow('Original',frame_to_show)
key = cv2.waitKey(1)
if key == ord('q'):
break
cv2.destroyAllWindows()
vid.release()
Or perhaps this is more what you are looking for:
import cv2
import numpy as np
# control how fast the image slides left to right
step_size = 10
vid = cv2.VideoCapture("videos\demo.mp4")
end = 0
while True:
ret,frame = vid.read()
if ret == False:
break
width = frame.shape[1]
if end > width:
end = 0
end += step_size
frame_to_show = frame[:,:end]
size = 16
# Create motion blur kernel
kernel_motion_blur = np.zeros((size,size))
kernel_motion_blur[int((size-1)/2), :] = np.ones(size)
kernel_motion_blur = kernel_motion_blur / size
# Apply motion kernel motion blur
result = cv2.filter2D(frame_to_show, -1, kernel_motion_blur)
cv2.imshow('Motion Blur Applied',result)
cv2.imshow('Original',frame_to_show)
key = cv2.waitKey(1)
if key == ord('q'):
break
cv2.destroyAllWindows()
vid.release()

how to save previous frame in an array and compare it with current frame in python

i want to remove the duplication of objects, so when the camera opens it captures the first frame and save on the disk, than untill next object appears in the scene it saves the next object frame (does not save the same frame consecutively).
i have written a code to compare two consecutive frames of webcam, i want to store one frame in an array (max limit 3) to compare it with current frame. so the first frame will be saved on the disk and it compares untill the next object appears(used threshold value for this purpose)
How can i save the frame to an array and compare with current frame?
from skimage.metrics import structural_similarity
import imutils
import sys
import datetime
import cv2
import time
import numpy as np
cap = cv2.VideoCapture(0)
while (True):
# Capture frame-by-frame
ret, frame1 = cap.read(0) # first image
time.sleep(1/50) # slight delay
ret, frame2 = cap.read(0) # second image
gray1 = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
gray2 = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
# compute the Structural Similarity Index (SSIM) between the two
# images, ensuring that the difference image is returned
(score, diff) = structural_similarity (gray1, gray2, full=True)
diff = (diff * 255).astype ("uint8")
print ("SSIM: {}".format (score))
# threshold the difference image, followed by finding contours to
# obtain the regions of the two input images that differ
thresh = cv2.threshold (diff, 0, 255,
cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
if np.mean (thresh) < 0.4 :
print ("New object Detected")
date_string = datetime.datetime.now ( ).strftime ("%Y-%m-%d-%H:%M:%S")
cv2.imwrite ('img/img-' + date_string + '.png', frame2[y:y+h+30, x:x+w+30])
# Display the resulting frame
cv2.imshow ('frame1', frame1)
cv2.imshow('frame2', frame2)
cv2.imshow ("Diff", diff)
cv2.imshow ("Thresh", thresh)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything is done, release the capture
video_capture.release()
cv2.destroyAllWindows()
Not hard.
Actually, you can get the image as a NumPy array.
The shape is (720, 1280, 3).
To save it, try this
...
ret, frame1 = cap.read(0) # first image
print(frame1.shape)
rgb_frame1 = frame1[..., ::-1]
im = Image.fromarray(rgb_frame1)
im.save("your_file.jpeg")
time.sleep(1/50) # slight delay
...
Note: you need to change the channel order or you will get a blue image. Because the original channel is in BRG format.
Then you can store the frame:

Background substractor python opencv ( remove granulation )

Hello in using MOG2 to make a Background substrator from a base frame to a next frames.
but its showing me to much ruid
id like if there is another background substractor that can elimitate this ponts.
Also i have another problem.
When a car passes with flash lights on the flashlights is showed as white im mi image . i need to ignorate the reflexion of fleshlight in the ground.
Some one knows dow to do that ?
by cod for BGS:
backSub = cv2.createBackgroundSubtractorMOG2(history=1, varThreshold=150, detectShadows=True)
fgMask = backSub.apply(frame1)
fgMask2 = backSub.apply(actualframe)
maskedFrame = fgMask2 - fgMask
cv2.imshow("maskedFrame1 "+str(id), maskedFrame)
You can try to perform a Gaussian blur before sending the frame to backSub.apply() or experiment with the parameters for cv2.createBackgroundSubtractorMOG2(): if you need a better explanation of what they do, try this page.
This is the result from a 7x7 Gaussian blur using this video.
Code:
import cv2
import numpy as np
import sys
# read input video
cap = cv2.VideoCapture('traffic.mp4')
if (cap.isOpened()== False):
print("!!! Failed to open video")
sys.exit(-1)
# retrieve input video frame size
frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS)
print('* Input Video settings:', frame_width, 'x', frame_height, '#', fps)
# adjust output video size
frame_height = int(frame_height / 2)
print('* Output Video settings:', frame_width, 'x', frame_height, '#', fps)
# create output video
video_out = cv2.VideoWriter('traffic_out.mp4', cv2.VideoWriter_fourcc(*'MP4V'), fps, (frame_width, frame_height))
#video_out = cv2.VideoWriter('traffic_out.avi', cv2.VideoWriter_fourcc('M','J','P','G'), fps, (frame_width, frame_height), True)
# create MOG
backSub = cv2.createBackgroundSubtractorMOG2(history=5, varThreshold=60, detectShadows=True)
while (True):
# retrieve frame from the video
ret, frame = cap.read() # 3-channels
if (frame is None):
break
# resize to 50% of its original size
frame = cv2.resize(frame, None, fx=0.5, fy=0.5)
# gaussian blur helps to remove noise
blur = cv2.GaussianBlur(frame, (7,7), 0)
#cv2.imshow('frame_blur', blur)
# subtract background
fgmask = backSub.apply(blur) # single channel
#cv2.imshow('fgmask', fgmask)
# concatenate both frames horizontally and write it as output
fgmask_bgr = cv2.cvtColor(fgmask, cv2.COLOR_GRAY2BGR) # convert single channel image to 3-channels
out_frame = cv2.hconcat([blur, fgmask_bgr]) #
#print('output=', out_frame.shape) # shape=(360, 1280, 3)
cv2.imshow('output', out_frame)
video_out.write(out_frame)
# quick pause to display the windows
if (cv2.waitKey(1) == 27):
break
# release resources
cap.release()
video_out.release()
cv2.destroyAllWindows()
You can use SuBSENSE: A Universal Change Detection Method With Local Adaptive Sensitivity https://ieeexplore.ieee.org/document/6975239.
BackgroundSubtractionSuBSENSE bgs(/*...*/);
bgs.initialize(/*...*/);
for(/*all frames in the video*/) {
//...
bgs(input,output);
//...
}
You can find the complete implementation at
https://bitbucket.org/pierre_luc_st_charles/subsense/src/master/
Plus I don't know the scale of your work, and your requirements. But Murari Mandal composed a very informative repository on GitHub comprising list of resources related to background subtraction, which can solve the above mentioned problems.
https://github.com/murari023/awesome-background-subtraction

Parse output of opencv updateMotionHistory in Python

I am trying to generate grayscale MotionHistoryImages from a live image feed (webcam) for processing by a CNN model I have built, from the updateMotionHistory function I get an output of:
[width x height] array of type np.float32
I want to convert this array into a grayscale image, brighter the newer the change in motion like this
EDIT: Added code example below
import cv2
import numpy as np
import time
import genMHI_util
cap = cv2.VideoCapture(0)
fgbg = cv2.createBackgroundSubtractorMOG2(history=1000,detectShadows=False)
colourThreshold = 0.975
frame_reduction_counter = 0
mhi = np.zeros((genMHI_util.MHI_WIDTH, genMHI_util.MHI_HEIGHT), np.float32)
while True:
ret, frame = cap.read()
# kernel = np.ones((5, 5), np.float32) / 25
# frame = cv2.medianBlur(frame, 5)
# frame = cv2.filter2D(frame, -1, kernel) # Blur image
if frame_reduction_counter >= 0:
# Disregarding frames that do not contain enough movement, below the set threshold
frame_reduction_counter = 0
timestamp = cv2.getTickCount() / cv2.getTickFrequency()
# Do background subtraction on frame to get silhouette
silhouette = fgbg.apply(frame)
silhouette = cv2.resize(silhouette, (genMHI_util.MHI_WIDTH, genMHI_util.MHI_HEIGHT))
# Update MHI
cv2.motempl.updateMotionHistory(silhouette, mhi, timestamp, 0.5)
# Do something with 'mhi' object (300x300 float array) ?
# Convert float array to int
mask = cv2.convertScaleAbs(mhi,
alpha=(255 / genMHI_util.MHI_DURATION),
beta=((genMHI_util.MHI_DURATION - timestamp) * 255 / genMHI_util.MHI_DURATION))
# Preview images
cv2.imshow('original', frame)
cv2.imshow('silhouette', silhouette)
cv2.imshow('mhi', mask)
cv2.waitKey(1)
frame_reduction_counter += 1
cap.release()
cv2.destroyAllWindows()
CONSTANTS:
MHI_DURATION = 5
MHI_WIDTH = 300
MHI_HEIGHT = 300
Output without movement - bit of camera noise
Output with movement - kind of works but very badly

Find circles live with openCV and python

I am trying to find 2 circles in a field and mark them from the live data I am getting from the camera. The problem is that the houghCircles() function cannot always detect those circles even if their position is unchanged. Here is the code:
import cv2
import numpy as np
import time
wait = 5
st = time.clock()
on=0
arka=0
def circleS():
circles = cv2.HoughCircles(img_grs, cv2.HOUGH_GRADIENT, 1, 20,
param1=150,
param2=35,
minRadius=5,
maxRadius=30)
on=0
arka=0
if(circles[0,0,2]>circles[0,1,2]):
on=(circles[0,0,0],circles[0,0,1])
arka=(circles[0,1,0],circles[0,1,1])
if(circles[0,0,2]<circles[0,1,2]):
on=(circles[0,1,0],circles[0,1,1])
arka=(circles[0,0,0],circles[0,0,1])
cv2.circle(img_bgr,on,2,(0,255,0),3)
cv2.circle(img_bgr,arka,2,(0,0,255),3)
return on,arka
def takePic():
ret, frame = camera.read()
img_bgr = np.copy(frame)
frame = None
return img_bgr
camera = cv2.VideoCapture(1)
while((time.clock()-st)<=wait):
ret, frame = camera.read() # Capture a frame
#cv2.imshow('Camera Stream',frame) # Display the captured frame in a window named Camera Stream
cv2.waitKey(1)
cv2.destroyAllWindows()
while(1):
img_bgr=takePic()
img_grs = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2GRAY)
ret,img_bin = cv2.threshold(img_grs,220,255,cv2.THRESH_BINARY)
corners = cv2.goodFeaturesToTrack(img_bin,9,0.03,3)
corners = np.int0(corners)
on,arka=np.int0(circleS())
for i in corners:
x,y = i.ravel()
if(x>=arka[0]+15 or x<=arka[0]-15 or y>=arka[1]+15 or y<=arka[1]-15 ):
cv2.circle(img_grs,(x,y),3,255,-1)
cv2.imshow("Camera Stream",img_bgr)
cv2.waitKey(10)
img_bgr = None
img_grs= None
time.sleep(5)
camera.release()
cv2.imshow("bin",img_bgr)
cv2.waitKey(0)
cv2.destroyAllWindows()
The error I am getting is index out of bounds because of the circles array. What's going wrong?
The index out-of-bounds error on circles is because the cv2.HoughCircles() may return an empty circles array when it does not find any circle (as you mention).
Put in an extra check after the cv2.HoughCircles() call:
if len(circles) > 0:
# your circle stuff

Categories