How can I make my OpenCV code work without becoming stuck - python

I'm a new coder and I work on graduation project with OpenCV library [Python code].
I need to write code to detect balloon in the constant background and monitor it.
I decided to work with Substruct function + HoughCircles, the problem is that every code is working good [except the Circle detection which working not the best], When I combine the two codes It works super slowly and stuck ALOT, can someone suggest me please how to insert it right so it wont stuck and somehow improve the HoughCircles?
import numpy as np
#import RPi.GPIO as GPIO
import time
import cv2
cap = cv2.VideoCapture(0)
fgbg = cv2.createBackgroundSubtractorMOG2()
while True:
ret, frame = cap.read()
if frame is None:
break
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
fgmask = fgbg.apply(gray)
#changes = sum(sum(fgmask>200))
changes = (fgmask>200).sum() #
is_moving = (changes > 10000)
print(changes, is_moving)
items = []
contours, hier = cv2.findContours(fgmask, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
area = cv2.contourArea(cnt)
if 200 < area:
(x,y,w,h) = cv2.boundingRect(cnt)
cv2.rectangle(fgmask, (x,y),(x+w,y+h),255, 2)
cv2.rectangle(frame, (x,y),(x+w,y+h),(0,255,0), 2)
items.append( (area, x, y, w, h) )
if items:
main_item = max(items)
area, x, y, w, h = main_item
if w > h:
r = w//2
else:
r = h//2
cv2.circle(frame, (x+w//2, y+h//2), r, (0,0,255), 2)
print(x + w // 2, y + h // 2)
circles = cv2.HoughCircles(gray,cv2.HOUGH_GRADIENT,1,50,
param1=50,param2=30,minRadius=0,maxRadius=0)
#circles = np.uint16(np.around(circles))
if circles is not None:
# convert the (x, y) coordinates and radius of the circles to integers
circles = np.round(circles[0, :]).astype("int")
# loop over the (x, y) coordinates and radius of the circles
for (x, y, r) in circles:
# draw the circle in the output image, then draw a rectangle in the image
# corresponding to the center of the circle
cv2.circle(gray, (x, y), r, (0, 255, 0), 4)
#cv2.rectangle(gray, (x - 5, y - 5), (x + 5, y + 5), (0, 128, 255), -1)
# time.sleep(0.5)
print("Column Number: ")
print(x)
print("Row Number: ")
print(y)
print("Radius is: ")
print(r)
cv2.imshow('fgmask', fgmask)
cv2.imshow('frame', frame)
cv2.imshow('gray',gray)
k = cv2.waitKey(10) & 0xff
if k == 27:
break
cv2.destroyAllWindows()
cap.release()
Thank you very much!

Related

Remove Contours OpenCV

My Image
I want to get
https://ibb.co/t8hNkM2
I could only get
I was able to find the maximum contour
def img_counter_max(image_file: str):
img = cv2.imread(image_file)
# grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # меняем цветовую модель с BGR на HSV
cv2.waitKey(0)
# binarize
ret, thresh = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY_INV)
cv2.waitKey(0)
# find contours
ctrs, hier = cv2.findContours(thresh.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
# sort contours
sorted_ctrs = sorted(ctrs, key=lambda ctr: cv2.boundingRect(ctr)[0])
# sorted_ctrs sorted(ctrs, key=cv2.contourArea, reverse=True)[0]
contour_sizes = [(cv2.contourArea(contour), contour) for contour in sorted_ctrs]
biggest_contour = max(contour_sizes, key=lambda x: x[0])[1]
x, y, w, h = cv2.boundingRect(biggest_contour)
roi = img[y:y + h, x:x + w]
cv2.imwrite("C:\\Users\\dennn\\PycharmProjects\\untitled2\\imag\\roi1.jpg",
roi)
cv2.rectangle(img, (x, y), (x + w, y + h), (90, 255, 0), 2)
from tensorflow.python import Size
resize_img = cv2.resize(img, (512,512))
# cv2.resize(img, Size(512,512), interpolation=cv2.INTER_AREA)
cv2.namedWindow("Display frame", cv2.WINDOW_AUTOSIZE);
cv2.imshow('Display frame', resize_img)
cv2.waitKey(0)
How do I get the image I need?
I found that sorting by contourArea() gives wrong results. Probably it calculates all points inside contour but not rectangle area which it uses - and this rectangle can be bigger.
I use boundingRect() to get rectangle used by contour and later calculate size using w*h and then it sorts contours in correct way.
I use for-loop to display image with different rectangles and see which contour gives expected region. And this way I see that third contour gives expected region so I can use [2] to get it and save it.
Eventually I would use size to select region which has w*h is in some range
expecte_region_size - range < w*h < expecte_region_size + range
Eventually I would use for-loop which display image with different rectangles to select manually which rectangle to use to save in file.
import cv2
img = cv2.imread('image.jpg')
# grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # меняем цветовую модель с BGR на HSV
# binarize
ret, thresh = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY_INV)
# find contours
ctrs, hier = cv2.findContours(thresh.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
# find rect and area - and create items [contour, rect, area] - but sorting by area gives wrong results
#items = [[ctr, cv2.boundingRect(ctr), cv2.contourArea(ctr)] for ctr in ctrs]
# find rect - and create items [contour, rect]
items = [[ctr, cv2.boundingRect(ctr)] for ctr in ctrs]
# find rect's size and create items [contour, rect, size]
items = [[ctr, rect, rect[2]*rect[3]] for ctr, rect in items]
# sort by size
items = sorted(items, key=lambda x: x[2], reverse=True)
for index, item in enumerate(items[:5]):
contour = item[0]
x, y, w, h = item[1]
size = item[2]
print(index, '->', size, '(', x, y, w, h, ')')
img_copy = img.copy()
cv2.rectangle(img_copy, (x, y), (x + w, y + h), (0, 0, 255), 15)
resize_img = cv2.resize(img_copy, (512,512))
cv2.imshow('frame', resize_img)
cv2.waitKey(0)
cv2.destroyAllWindows()
# --- save image ---
item = items[2]
contour = item[0]
x, y, w, h = item[1]
size = item[2]
img = img[y:y+h, x:x+w]
cv2.imwrite('output.jpg', img)
Preview:
Output:
The code finds characters well,but outputs them out of order
I found a piece of code that should solve this problem, but I can't -
after finding the contours using contours=cv2.findContours(),use -
boundary=[]
for c,cnt in enumerate(contours):
x,y,w,h = cv2.boundingRect(cnt)
boundary.append((x,y,w,h))
count=np.asarray(boundary)
max_width = np.sum(count[::, (0, 2)], axis=1).max()
max_height = np.max(count[::, 3])
nearest = max_height * 1.4
ind_list=np.lexsort((count[:,0],count[:,1]))
c=count[ind_list]
Find symbols
img = "C:\\Users\\dennn\\PycharmProjects\\untitled2\\output.jpg" dir = os.curdir
path = os.path.join(dir,img)
raw_image = cv2.imread(path,0)
cv2.imshow("original",raw_image)
plt.subplot(2,3,1)
plt.title("Original")
plt.imshow(raw_image,'gray')
plt.xticks([]),plt.yticks([]);
sm_image = cv2.blur(raw_image,(8,8))
cv2.imshow("smoothed",sm_image)
plt.subplot(2,3,2)
plt.title("Smoothed")
plt.imshow(sm_image,'gray')
plt.xticks([]),plt.yticks([]);
#cv2.imshow("smoothed",sm_image)
ret,bw_image = cv2.threshold(sm_image,160,255,cv2.THRESH_BINARY_INV)
cv2.imshow("thresholded",bw_image)
plt.subplot(2,3,3)
plt.title("Thresholded")
plt.imshow(bw_image,'gray')
plt.xticks([]),plt.yticks([]);
kernel = np.ones((4,4),np.uint8)
er_image = cv2.erode(bw_image,kernel)
cv2.imshow("eroded",er_image)
plt.subplot(2,3,4)
plt.title("Eroded")
plt.imshow(er_image,'gray')
plt.xticks([]),plt.yticks([]);
kernel = np.ones((2,2),np.uint8)
di_image = cv2.dilate(er_image,kernel)
cv2.imshow("dilated",di_image)
plt.title("Dilated")
plt.subplot(2,3,5)
plt.imshow(di_image,'gray')
plt.xticks([]),plt.yticks([]);
mo_image = di_image.copy()
contour0 =
cv2.findContours(mo_image.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
contours = [cv2.approxPolyDP(cnt,3,True) for cnt in contour0[0]]
maxArea = 0
rect = []
for ctr in contours:
maxArea = max(maxArea, cv2.contourArea(ctr))
if img == "C:\\Users\\dennn\\PycharmProjects\\untitled2\\output.jpg":
areaRatio = 0.05
for ctr in contours:
if cv2.contourArea(ctr) > maxArea * areaRatio:
rect.append(cv2.boundingRect(cv2.approxPolyDP(ctr, 1, True)))
symbols = []
for i in rect:
x = i[0]
y = i[1]
w = i[2]
h = i[3]
p1 = (x, y)
p2 = (x + w, y + h)
cv2.rectangle(mo_image, p1, p2, 255, 2)
image = cv2.resize(mo_image[y:y + h, x:x + w], (32, 32))
symbols.append(image.reshape(1024, ).astype("uint8"))
testset_data = np.array(symbols)
cv2.imshow("segmented", mo_image)
plt.subplot(2, 3, 6)
plt.title("Segmented")
plt.imshow(mo_image, 'gray')
plt.xticks([]), plt.yticks([]);
# plt.show()
# garbage collection
cv2.destroyAllWindows()
plt.close()
# show glyphs
for i in range(len(symbols)):
image = np.zeros(shape=(64,64))
image[15:47,15:47] = symbols[i].reshape((32,32))
cv2.imshow("sym",image)
cv2.waitKey(0)
cv2.destroyAllWindows()
plt.close()

How can I calculate the area of a circle which I detected with cv2.HoughCircles()?

guys,
I have written a program where I recognize circles with cv2.HoughCircles(). The code works too. Unfortunately I need the area of the circles for my project. But I don't know how to calculate this and my search on the internet was unsuccessful.
Thank you.
https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_imgproc/py_houghcircles/py_houghcircles.html
import cv2
import numpy as np
img = cv2.imread('opencv_logo.png',0)
img = cv2.medianBlur(img,5)
cimg = cv2.cvtColor(img,cv2.COLOR_GRAY2BGR)
circles = cv2.HoughCircles(img,cv2.HOUGH_GRADIENT,1,20,
param1=50,param2=30,minRadius=0,maxRadius=0)
circles = np.uint16(np.around(circles))
for i in circles[0,:]:
# draw the outer circle
cv2.circle(cimg,(i[0],i[1]),i[2],(0,255,0),2)
# draw the center of the circle
cv2.circle(cimg,(i[0],i[1]),2,(0,0,255),3)
cv2.imshow('detected circles',cimg)
cv2.waitKey(0)
cv2.destroyAllWindows()
i[0] is the x position
i[1] is the y position
i[2] is the radius
Area is calculated with the formula pi * r²
So the area of each detected circle would be:
for i in circles[0,:]:
area = 3.14159 * i[2] * i[2]
From HoughCircles() you get a list of 'circles' which contains x, y, r of the circle. x, y are coordinates of the center and r is the radius.
From the radius you can calculate the area of the circle with:
A = PI * r^2
Thank you guys,
#Tin Nguyen
if i use:
for i in circles[0,:]:
area = 3.14159 * i[2] * i[2]
then i get the following error: "TypeError: 'NoneType' object is not subscriptable"
This is my code, i want to detect circles in realtime and calculate the area.
import pypylon.pylon as py # wrapper to control Basler camera with python
import cv2 # openCV
import numpy as np
first_device = py.TlFactory.GetInstance().CreateFirstDevice()
icam = py.InstantCamera(first_device)
icam.Open()
# set parameters
icam.PixelFormat = "RGB8"
# if only a part of image sensor is used an offset is required or centering
'''icam.Width = 640
icam.Height = 480
icam.CenterX = False
icam.CenterY = False'''
# Demonstration of setting parameters - properties can be found on Pylon Viewer
# Auto property values are 'Off', 'Once', 'Continuous'
icam.GainAuto = 'Off'
icam.ExposureAuto = 'Continuous'
icam.BalanceWhiteAuto = 'Off'
icam.Gain = 0 # minimum gain value
# icam.ExposureTime = 50000 # exposure time or use ExposureAuto
icam.StartGrabbing(py.GrabStrategy_LatestImages)
while True:
res = icam.RetrieveResult(1000) # 1000 = time constant for time-out
frame = cv2.cvtColor(res.Array, cv2.COLOR_RGB2BGR)
output = frame.copy()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray,(5,5),0)
gray = cv2.medianBlur(gray, 5)
gray = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
cv2.THRESH_BINARY,11,3.5)
kernel = np.ones((2,3),np.uint8)
gray = cv2.erode(gray,kernel)
gray = cv2.dilate(gray, kernel)
# detect circles in the image
circles = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, 1, 200, param1=30, param2=45,
minRadius=0, maxRadius=0)
# print circles
# ensure at least some circles were found
if circles is not None:
# convert the (x, y) coordinates and radius of the circles to integers
circles = np.round(circles[0, :]).astype("int")
# loop over the (x, y) coordinates and radius of the circles
for (x, y, r) in circles:
# draw the circle in the output image, then draw a rectangle in the image
# corresponding to the center of the circle
cv2.circle(output, (x, y), r, (0, 255, 0), 4)
cv2.rectangle(output, (x - 5, y - 5), (x + 5, y + 5), (0, 128, 255), -1)
# time.sleep(0.5)
print
"Column Number: "
print
x
print
"Row Number: "
print
y
print
"Radius is: "
print
r
# Display the resulting frame
cv2.imshow('gray', gray)
cv2.imshow('frame', output)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
for i in circles[0, :]:
area = 3.14159 * i[2] * i[2]
print(area)
icam.StopGrabbing()
icam.Close()
cv2.destroyAllWindows()

Object counting software using opencv

I am working on counting software. I have successfully detected the products and want to count them. For counting, I have defined two-line and if centroid is between two lines the product is counted. Two problems I am facing. First, since my product is coming very fast some product is not falling between lines so, I increased the linespace. Second, after increasing the linespace if the products are falling two times then it is counted twice. I tried to put centroid in a list and if products are counted once, I deleted elements of the list. Count is twice or wrong. Please help.
Here is the video link
https://drive.google.com/file/d/1Gqp937OlDpF4_Nx2kSOzHw3A85Xch-HX/view?usp=sharing
import cv2
import math
import numpy as np
width = 0
height = 0
EntranceCounter = 0
ExitCounter = 0
OffsetRefLines = 120
QttyOfContours = 0
area = 0
detect = []
area_min = 100
area_max = 4000
BinarizationThreshold = 70
#Check if an object in entering in monitored zone
##def CheckEntranceLineCrossing(CoordYCentroid, CoorYEntranceLine,CoorYExitLine,area):
## AbsDistance = abs(CoordYCentroid - CoorYEntranceLine)
## if ((y>CoorYEntranceLine) and (y < CoorYExitLine) ):#and area>=5000):
## return 1
## #else:
## #return 0
cap = cv2.VideoCapture('testvideo.avi')
while(cap.isOpened()):
ret, frame = cap.read()
height = np.size(frame,0)
width = np.size(frame,1)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
_, threshold = cv2.threshold(frame, 100, 255, cv2.THRESH_BINARY)
low = np.array([0,0,0])
high = np.array([60, 60, 60])
image_mask = cv2.inRange(threshold,low, high)
contours,_= cv2.findContours(image_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
cv2.drawContours(frame, contours, -1, (0,255,0), 3)
CoorYEntranceLine = int(150-OffsetRefLines)
CoorYExitLine = int(150+OffsetRefLines)
cv2.line(threshold, (0,CoorYEntranceLine), (width,CoorYEntranceLine), (0, 255, 0), 2)
cv2.line(threshold , (0,CoorYExitLine), (width,CoorYExitLine), (0, 0, 255), 2)
for (i,c) in enumerate(contours):
new = True
QttyOfContours = QttyOfContours+1
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(threshold, (x, y), (x + w, y + h), (0, 0, 255), 2)
area = cv2.contourArea(c)
if area< area_min:
continue
#find object's centroid
CoordXCentroid = int((x+x+w)/2)
CoordYCentroid = int((y+y+h)/2)
ObjectCentroid = (CoordXCentroid,CoordYCentroid)
#center = pega_centro(x, y, w, h)
center = (CoordXCentroid,CoordYCentroid)
detect.append(center)
for (x,y) in detect:
if ((y < CoorYExitLine) and (y>CoorYEntranceLine) and (area>area_min) and (area<area_max)):
if new ==True:
EntranceCounter += 1
detect.clear()
break
cv2.putText(frame, str(EntranceCounter), ((CoordXCentroid),(CoordYCentroid-50)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
cv2.putText(threshold, "Entrances: {}".format(str(EntranceCounter)), (10, 50),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (250, 0, 1), 2)
cv2.imshow('fram1',threshold)
cv2.imshow('frame',frame)
if cv2.waitKey(1)==27:
break
cap.release()
cv2.destroyAllWindows()
Just check intersection of counting line and line between current and previous object position points. And, as extra guaranty, use "counted" flag for tracked objects.

Keep the same label for a moving object using opencv

I want to track a moving fish from camera witch has located on top of a fish tank. Till now I was able to track the multiple moving objects using moving average and background subtraction methods. And I put text on each contour. But the problem is I couldn't keep the same label for same moving fish. Fish can detect from every frame but tracked object number is changing. I have attached my current Python code. What am I doing wrong here? Can someone tell me a another possible way to do this?
import cv2
import numpy as np
device = cv2.VideoCapture(0)
flag, frame = device.read()
movingaverage = np.float32(frame)
background = cv2.createBackgroundSubtractorMOG2()
font=cv2.FONT_HERSHEY_SIMPLEX
kernelOpen=np.ones((5,5))
kernelClose=np.ones((20,20))
while True:
flag, frame = device.read()
alpha = float(1.0/2.0)
cv2.accumulateWeighted(frame,movingaverage,alpha)
gaussion = background.apply(frame)
res = cv2.convertScaleAbs(movingaverage)
difference_img = cv2.absdiff(res, frame)
grey_difference_img = cv2.cvtColor(difference_img, cv2.COLOR_BGR2GRAY)
ret,th1 = cv2.threshold(grey_difference_img, 10, 255, cv2.THRESH_BINARY)
combine = cv2.bitwise_and(gaussion, gaussion, mask = grey_difference_img)
_, conts, h1 =cv2.findContours(combine.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
if len(conts) == 0:
cv2.putText(frame,"No moving objects found!",(50,200), font, 1,(255,255,255),2,cv2.LINE_AA)
else:
number = 0
for i in range(len(conts)):
x,y,w,h = cv2.boundingRect(conts[i])
if (w > 50) and (h > 50):
cv2.rectangle(frame,(x,y),(x+w,y+h),(0,0,255), 2)
cv2.putText(frame,str(number +1)+ "object",(x,y+h), font, 1,(255,255,255),2,cv2.LINE_AA)
number = number + 1
cv2.imshow("Gaussian",gaussion)
cv2.imshow("Track",frame)
if cv2.waitKey(1) == 27:
break
device.release()
cv2.destroyAllWindows()
I have an idea but I'm not sure about that. Now I have modified the code to detect center of each and every contour. So, can I store the information about the coordinates to an array, then check the new frame contours center point such that close to array values. Then try to guess the contour which stored in array in previous frame. I don't know the possibility of this since I'm new to Python and OpenCV.
if (w > 50) and (h > 50):
cnt = conts[i]
M = cv2.moments(cnt)
if M['m00'] != 0:
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
#draw a circle at center of contours.
cv2.circle(frame,(cx,cy), 2, (0,0,255), -1)
print( "(",cx,",",cy,")" )
cv2.rectangle(frame,(x,y),(x+w,y+h),(0,0,255), 2)
cv2.putText(frame,str(number +1)+ "object",(x,y+h), font, 1,(255,255,255),2,cv2.LINE_AA)
I have edited my code. Now it works fine if the moving objects are in the same horizontal plane. What I did was, create list called matched_contours and paths. Then append calculated center points into paths which in element of matched_contours. And used min_dist to check whether contours has presented in previous frame or not. If it has presented I have updated new center points in matched_contour. If not, I took it as a new matched_contour.
THIS IS THE UPDATED PART
im2, contours, hierarchy = cv2.findContours(dilation, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_TC89_L1)
for (_, contour) in enumerate(contours):
(x, y, w, h) = cv2.boundingRect(contour)
if (w >= 50) or (h >= 50): # Find targeting size moving objects
x1 = int(w / 2)
y1 = int(h / 2)
cx = x + x1
cy = y + y1
if len(matched_contours) == 0: #No previous moving objects
paths = []
paths.append((cx,cy))
object_num = object_num + 1
matched_contours.append(((x, y, w, h), (cx,cy), object_num, paths))
cv2.circle(frame,(cx,cy), 2, (0,0,255), -1)
#cv2.rectangle(frame,(x,y),(x+w,y+h),(0,0,255), 2)
rect = cv2.minAreaRect(contour)
box = cv2.boxPoints(rect)
box = np.int0(box)
cv2.drawContours(frame,[box],0,(0,255,0),2)
cv2.putText(frame,"object " + str(object_num),(x,y+h), font, 0.5,(255,255,255),1,cv2.LINE_AA)
else:
found = False
for i in range(len(matched_contours)):
ponits, center, num, path = matched_contours[i]
old_cx, old_cy = center
#Calculate euclidian distances between new center and old centers to check this contour is existing or not
euc_dist = math.sqrt(float((cx - old_cx)**2) + float((cy - old_cy)**2))
if euc_dist <= min_dist:
if len(path) == max_path_length:
for t in range(len(path)):
if t == max_path_length - 1:
path[t] = (cx,cy)
else:
path[t] = path[t+1]
else:
path.append((cx,cy))
matched_contours[i] = ((x, y, w, h), (cx,cy), num, path)
cv2.circle(frame,(cx,cy), 2, (0,0,255), -1)
#cv2.rectangle(frame,(x,y),(x+w,y+h),(0,0,255), 2)
rect = cv2.minAreaRect(contour)
box = cv2.boxPoints(rect)
box = np.int0(box)
cv2.drawContours(frame,[box],0,(0,255,0),2)
cv2.putText(frame,"object " + str(num),(x,y+h), font, 0.5,(255,255,255),1,cv2.LINE_AA)
#Draw path of the moving object
for point in range(len(path)-1):
cv2.line(frame, path[point], path[point+1], (255,0,0), 1)
cv2.circle(frame,path[point+1], 3, (0,0,255), -1)
if not found: #New moving object has found
object_num = object_num + 1
paths = []
paths.append((cx,cy))
matched_contours.append(((x, y, w, h), (cx,cy), object_num, paths))
cv2.circle(frame,(cx,cy), 2, (0,0,255), -1)
#cv2.rectangle(frame,(x,y),(x+w,y+h),(0,0,255), 2)
rect = cv2.minAreaRect(contour)
box = cv2.boxPoints(rect)
box = np.int0(box)
cv2.drawContours(frame,[box],0,(0,255,0),2)
cv2.putText(frame,"object " + str(object_num),(x,y+h), font, 0.5,(255,255,255),1,cv2.LINE_AA)

How to get x,y position of contours in Python OpenCV

I'm trying to get x and y positions of contours from the following image, but I messed up.
the image
I just need to find x and y positions of contours or center of the contours.
The results will be something like the following as I manually look up their positions from GIMP.
290, 210
982, 190
570, 478
I believe it can be done with cv2.findContours method, but I'm really out of ideas right now.
-Offtopic-
I will use these values in setting cursor position usingwin32api.SetCursorPos((xposition,yposition))
Thanks
You can refer here
Find Co-ordinates of Contours using OpenCV | Python
# Python code to find the co-ordinates of
# the contours detected in an image.
import numpy as np
import cv2
# Reading image
font = cv2.FONT_HERSHEY_COMPLEX
img2 = cv2.imread('test.jpg', cv2.IMREAD_COLOR)
# Reading same image in another
# variable and converting to gray scale.
img = cv2.imread('test.jpg', cv2.IMREAD_GRAYSCALE)
# Converting image to a binary image
# ( black and white only image).
_, threshold = cv2.threshold(img, 110, 255, cv2.THRESH_BINARY)
# Detecting contours in image.
contours, _= cv2.findContours(threshold, cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE)
# Going through every contours found in the image.
for cnt in contours :
approx = cv2.approxPolyDP(cnt, 0.009 * cv2.arcLength(cnt, True), True)
# draws boundary of contours.
cv2.drawContours(img2, [approx], 0, (0, 0, 255), 5)
# Used to flatted the array containing
# the co-ordinates of the vertices.
n = approx.ravel()
i = 0
for j in n :
if(i % 2 == 0):
x = n[i]
y = n[i + 1]
# String containing the co-ordinates.
string = str(x) + " " + str(y)
if(i == 0):
# text on topmost co-ordinate.
cv2.putText(img2, "Arrow tip", (x, y),
font, 0.5, (255, 0, 0))
else:
# text on remaining co-ordinates.
cv2.putText(img2, string, (x, y),
font, 0.5, (0, 255, 0))
i = i + 1
# Showing the final image.
cv2.imshow('image2', img2)
# Exiting the window if 'q' is pressed on the keyboard.
if cv2.waitKey(0) & 0xFF == ord('q'):
cv2.destroyAllWindows()
Indeed, you can do that with findContours. Since you have your contours there are several options:
Calculate enclosing rectangle and take e.g. the center point.
Calculate moments and take the centroid
Fit minimum enclosing circle and take the center
and so on...
Here are some examples of what you can do with your contours, including the options above.
First you need to find contours, draw a bounding box and then take the x and y from there. I hope this helps
import numpy as np
import cv2
im = cv2.imread('ctBI9.png')
imgray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(imgray, 127, 255, 0)
im2, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for c in contours:
if cv2.contourArea(c) <= 50 :
continue
x,y,w,h = cv2.boundingRect(c)
cv2.rectangle(im, (x, y), (x + w, y + h), (0, 255,0), 2)
center = (x,y)
print (center)
while True:
cv2.imshow('test',im)
if cv2.waitKey(20) & 0xFF == 27:
break
cv2.destroyAllWindows()
result is something like this
(93, 746)
(1174, 738)
(147, 736)
(395, 729)
(506, 404)
(240, 168)
(918, 130)

Categories