I am trying to find 2 circles in a field and mark them from the live data I am getting from the camera. The problem is that the houghCircles() function cannot always detect those circles even if their position is unchanged. Here is the code:
import cv2
import numpy as np
import time
wait = 5
st = time.clock()
on=0
arka=0
def circleS():
circles = cv2.HoughCircles(img_grs, cv2.HOUGH_GRADIENT, 1, 20,
param1=150,
param2=35,
minRadius=5,
maxRadius=30)
on=0
arka=0
if(circles[0,0,2]>circles[0,1,2]):
on=(circles[0,0,0],circles[0,0,1])
arka=(circles[0,1,0],circles[0,1,1])
if(circles[0,0,2]<circles[0,1,2]):
on=(circles[0,1,0],circles[0,1,1])
arka=(circles[0,0,0],circles[0,0,1])
cv2.circle(img_bgr,on,2,(0,255,0),3)
cv2.circle(img_bgr,arka,2,(0,0,255),3)
return on,arka
def takePic():
ret, frame = camera.read()
img_bgr = np.copy(frame)
frame = None
return img_bgr
camera = cv2.VideoCapture(1)
while((time.clock()-st)<=wait):
ret, frame = camera.read() # Capture a frame
#cv2.imshow('Camera Stream',frame) # Display the captured frame in a window named Camera Stream
cv2.waitKey(1)
cv2.destroyAllWindows()
while(1):
img_bgr=takePic()
img_grs = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2GRAY)
ret,img_bin = cv2.threshold(img_grs,220,255,cv2.THRESH_BINARY)
corners = cv2.goodFeaturesToTrack(img_bin,9,0.03,3)
corners = np.int0(corners)
on,arka=np.int0(circleS())
for i in corners:
x,y = i.ravel()
if(x>=arka[0]+15 or x<=arka[0]-15 or y>=arka[1]+15 or y<=arka[1]-15 ):
cv2.circle(img_grs,(x,y),3,255,-1)
cv2.imshow("Camera Stream",img_bgr)
cv2.waitKey(10)
img_bgr = None
img_grs= None
time.sleep(5)
camera.release()
cv2.imshow("bin",img_bgr)
cv2.waitKey(0)
cv2.destroyAllWindows()
The error I am getting is index out of bounds because of the circles array. What's going wrong?
The index out-of-bounds error on circles is because the cv2.HoughCircles() may return an empty circles array when it does not find any circle (as you mention).
Put in an extra check after the cv2.HoughCircles() call:
if len(circles) > 0:
# your circle stuff
Related
import cv2
import mediapipe as mp
import pyautogui as py
cam = cv2.VideoCapture(0)
face_mesh = mp.solutions.face_mesh.FaceMesh(refine_landmarks=True)
while True:
_, frame = cam.read()
frame = cv2.flip(frame, 1)
'''frameRGB = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)'''
output = face_mesh.process(frameRGB)
face_landmark = output.multi_face_landmarks
frame_w, frame_h, _ = frame.shape
'''if face_landmark:
landmarks = face_landmark[0].landmark
for landmark in enumerate(landmarks[474:478]):
x = int(landmark.x * frame_w)
y = int(landmark.y * frame_h)
cv2.circle(frame, (x, y), 3, (0, 255, 0))
if id == 1:
py.moveTo(x, y)'''
print(x, y)
cv2.imshow('Lazy mouse', frame)
cv2.waitKey(1)
it's printing where my head is for x, y, isnt showing me a camera with my face I believe it has to do with the frame or with the RGB I tried debugging but no luck, highlighted where I believe are the problems
enumerate() is a Python built-in function that returns a sequence of tuples.
Instead of
for landmark in enumerate(landmarks[474:478]): # wrong
you should use
for landmark in landmarks[474:478]:
or
for (index, landmark) in enumerate(landmarks[474:478]):
index is the index into the sublist/slice, so you will get indices 0,1,2,3
So I have this code below and I was expecting it to change the width size of the frame based on loop i value from 1 to 1000 in live time, to put that in visualization, when I executed the code I was expecting it to changes the windows width size while it was running.
import cv2
import numpy as np
vid = cv2.VideoCapture("C:\\users\\USER\\downloads\\man.mp4")
while True:
ret,frame = vid.read()
for i in range(1,1000):
frame = cv2.resize(frame,(i,450))
size = 16
# Create motion blur kernel
kernel_motion_blur = np.zeros((size,size))
kernel_motion_blur[int((size-1)/2), :] = np.ones(size)
kernel_motion_blur = kernel_motion_blur / size
# Apply motion kernel motion blur
result = cv2.filter2D(frame, -1, kernel_motion_blur)
cv2.imshow('Motion Blur Applied',result)
cv2.imshow('Original',frame)
if cv2.waitKey(1) == ord('q'):
break
cv2.destroyAllWindows()
vid.release()
Its works but the problem is when I executed the code the video frame gets glitchy and the video frame doesn't display the correct image of what contains in the actual video. So how do I fix that?
The video before and after
I'm not sure I totally understand the question, or why you have a for loop inside of a while loop, but I think I was able to achieve the effect you are going for. Take a look:
import cv2
import numpy as np
# control how fast the image slides left to right
step_size = 10
vid = cv2.VideoCapture("videos\demo.mp4")
while True:
ret,frame = vid.read()
if ret == False:
break
width = frame.shape[1]
for n in range(0, width, step_size):
frame_to_show = frame[:,:n+step_size]
size = 16
# Create motion blur kernel
kernel_motion_blur = np.zeros((size,size))
kernel_motion_blur[int((size-1)/2), :] = np.ones(size)
kernel_motion_blur = kernel_motion_blur / size
# Apply motion kernel motion blur
result = cv2.filter2D(frame_to_show, -1, kernel_motion_blur)
cv2.imshow('Motion Blur Applied',result)
cv2.imshow('Original',frame_to_show)
key = cv2.waitKey(1)
if key == ord('q'):
break
cv2.destroyAllWindows()
vid.release()
Or perhaps this is more what you are looking for:
import cv2
import numpy as np
# control how fast the image slides left to right
step_size = 10
vid = cv2.VideoCapture("videos\demo.mp4")
end = 0
while True:
ret,frame = vid.read()
if ret == False:
break
width = frame.shape[1]
if end > width:
end = 0
end += step_size
frame_to_show = frame[:,:end]
size = 16
# Create motion blur kernel
kernel_motion_blur = np.zeros((size,size))
kernel_motion_blur[int((size-1)/2), :] = np.ones(size)
kernel_motion_blur = kernel_motion_blur / size
# Apply motion kernel motion blur
result = cv2.filter2D(frame_to_show, -1, kernel_motion_blur)
cv2.imshow('Motion Blur Applied',result)
cv2.imshow('Original',frame_to_show)
key = cv2.waitKey(1)
if key == ord('q'):
break
cv2.destroyAllWindows()
vid.release()
I'm relatively new to scripting. (I know quite a bit but I also don't know quite a bit.)
I'm trying to have a simple script use OpenCV-Python to subtract two frames from a webcam and draw a bounding box around the changed pixels. The issue is that when I try to define the boundingRect (x,y,w,h = cv2.boundingRect(contours)) it gives the error:
Message=OpenCV(4.5.3) :-1: error: (-5:Bad argument) in function 'boundingRect'
> Overload resolution failed:
> - array is not a numpy array, neither a scalar
> - Expected Ptr<cv::UMat> for argument 'array'
I've been searching around for quite a while but there seems to be a very small number of people who've had my issue and pretty much none of them had solutions that worked.
Here's my code:
import cv2
from time import sleep as wait
import numpy as np
lastFrame = "foobaz"
i = 0
#My webcam is on index 1, this isn't (at least shouldn't) be the issue. Make sure to set it back to 0 if you are testing
vid = cv2.VideoCapture(1)
#A placeholder black image for the 'subract' imshow window
black = np.zeros((512,512,3), np.uint8)
while(True):
wait(0.5)
ret, frame = vid.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
blurframe = cv2.GaussianBlur(frame,(25,25),0)
#Makes sure the lastFrame has been assigned, if not set it as placeholder black image.
if lastFrame != "foobaz":
#Subtracts current frame and the last frame to find difference.
subFrame = cv2.subtract(blurframe,lastFrame)
else:
subFrame = black
#Assigns the next lastFrame
lastFrame = blurframe
#Gets the threshold of the subtracted image
ret,thresh1 = cv2.threshold(subFrame,40,255,cv2.THRESH_BINARY)
#Sets the thresholded image to grayscale if the loop was ran for the first time.
if i==0:
thresh1 = cv2.cvtColor(thresh1, cv2.COLOR_BGR2GRAY)
i+=1
#This is where issues arize. I'm trying to apply a bounding box using a contour but it always errors at line 44.
contours = cv2.findContours(thresh1, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[0]
print(len(contours))
x,y,w,h = cv2.boundingRect(contours)
cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),2)
cv2.imshow('frame',frame)
cv2.imshow('subtract',thresh1)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
I saw that some other posts had the contour type() so here it is:
type(contours) = <class 'list'>
CLOSED: I found out the issue. You have to iterate contours for it to work.
I wrote a program that find 2 ROIs, selects them in 2 separate frames and then counts number of green pixels of each frame.
It works fine, but when I apply morphology masks for camera it gives me error: /home/pi/opencv/opencv-3.4.0/modules/core/src/arithm.cpp:1769: error: (-209) The lower boundary is neither an array of the same size and same type as src, nor a scalar in function inRange
How can I fix this?
Problem occurs here
maskClose=cv2.morphologyEx(maskOpen,cv2.MORPH_CLOSE,kernelClose)
maskFinal=maskClose ... for i in range(len(conts)):
x,y,w,h=cv2.boundingRect(conts[i]) area=maskFinal[y:y+h, x:x+w] pixcount =
cv2.inRange(area,lowerBound,upperBound ) pixNum = cv2.countNonZero(pixcount)
Full code
import cv2
import numpy as np
from picamera.array import PiRGBArray
from picamera import PiCamera
import time
import sys
prevNrOfContours = 0
lowerBound=np.array([43,53,30])
upperBound=np.array([132,255,255])
camera = PiCamera()
camera.rotation = 180
camera.resolution = (640, 480)
camera.framerate = 30
font=cv2.FONT_HERSHEY_SIMPLEX
rawCapture = PiRGBArray(camera, size=(640, 480))
GREEN_MIN = np.array([0, 0, 0])
GREEN_MAX = np.array([0, 0, 0])
# allow the camera to warmup
time.sleep(0.1)
kernelOpen=np.ones((5,5))
kernelClose=np.ones((20,20))
# capture frames from the camera
for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
img = frame.array
imgHSV= cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
mask=cv2.inRange(imgHSV,lowerBound,upperBound)
#morphology
maskOpen=cv2.morphologyEx(mask,cv2.MORPH_OPEN,kernelOpen)
maskClose=cv2.morphologyEx(maskOpen,cv2.MORPH_CLOSE,kernelClose)
maskFinal=maskClose
_, conts, _=cv2.findContours(maskFinal.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
nrOfContours = len(conts)
for i in range(len(conts)):
x,y,w,h=cv2.boundingRect(conts[i])
area=maskFinal[y:y+h, x:x+w] ######## problem is here
pixcount = cv2.inRange(area,lowerBound,upperBound )
pixNum = cv2.countNonZero(pixcount)
print("Area No."+str(i), "Green pixels = " + str(no_black))
cv2.rectangle(img,(x,y),(x+w,y+h),(0,0,255), 2)
cv2.imshow("area" + str(i), area)
# close unnecessary windows
if prevNrOfContours > nrOfContours:
for i in range(nrOfContours, prevNrOfContours):
cv2.destroyWindow("area" + str(i))
prevNrOfContours = nrOfContours
cv2.imshow("cam",img)
# clear the stream in preparation for the next frame
key = cv2.waitKey(1) & 0xFF
rawCapture.truncate(0)
if key == ord("q"):
break
You're creating a subimage of maskFinal. maskFinal is a binary image, which is incompatible with the 3 channel lowerBound.
To solve it you can actually drop the line pixcount = cv2.inRange(area,lowerBound,upperBound ) The masked image has white for the green area's, so counting the nonzeros is enough.
Note: the current subimage includes the morphological transformations. If you do not want to count the pixels caused by those, you'll have to create a subimage of mask
So, this piece of code seems to find a circle using my webcam pretty easily. However, I'd like it to also draw a circle whenever one was found instead of simply closing the program. I tried to add a "cv2.circle(parameters...)" to the code but it didn't work. Can anyone help me?
import cv2
import numpy as np
import sys
color = (0,0,255)
cap = cv2.VideoCapture(0)
while(True):
gray = cv2.medianBlur(cv2.cvtColor(cap.read()[1], cv2.COLOR_BGR2GRAY),5)
circles = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, 1, 10, minRadius = 1, maxRadius = 20)
if circles != None: print "Circle There !"
cv2.imshow('video',gray)
if cv2.waitKey(1) == 27:# esc Key
break
cap.release()
cv2.destroyAllWindows()
You are probably going to want to mess with the parameters in the cv2.HoughCircles function (you may even be able to delete some of the params entirely), but this should work.
cap = cv2.VideoCapture(0)
while(True):
gray = cv2.medianBlur(cv2.cvtColor(cap.read()[1], cv2.COLOR_BGR2GRAY),5)
circles = cv2.HoughCircles(gray,cv2.HOUGH_GRADIENT,1,10, param1=150,param2=40,minRadius=0,maxRadius=1000)
if circles != None:
for i in circles[0,:]:
# draw the outer circle
cv2.circle(cgray,(i[0],i[1]),i[2],(0,255,0),2)
# draw the center of the circle
cv2.circle(cgray,(i[0],i[1]),2,(0,0,255),3)
cv2.imshow('video',cgray)
if cv2.waitKey(1) == 27:# esc Key
break
cap.release()
cv2.destroyAllWindows()