Labeled unique contours (or draw independient contours) - python

Im working in python and opencv library.
I can threshold a camera capture, find contours (more than one) and draw.
But I have a problem. I try to identify those contours with an unique id or tag. (for example: Id: 1 , Id:2) to track them.
I need this contours use a persistent id.
The goal is draw a line and count more than one contour and sometimes more than one near contours converts in a big one.
Note: Im working with a depth camera and my image its an array of depth.
add a piece of code.
Thanks in advance.
countours = cv2.findContours(mask, cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)[1]
# only proceed if at least one contour was found
if len(countours) > 0:
# find the largest contour in the mask, then use
# it to compute the minimum enclosing circle and
# centroid
for (i,c) in enumerate(countours):
((x, y), radius) = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
if M["m00"] > 0:
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
centerString = str(center)
x = (int(M["m10"] / M["m00"]))
y = (int(M["m01"] / M["m00"]))
else:
center = int(x), int(y)
if radius > 10:
# draw the circle and centroid on the frame,
cv2.circle(frame, (int(x), int(y)), int(radius),
(0, 255, 255), 2)
cv2.circle(frame, center, 5, (0, 0, 255), -1)
# then update the ponter trail
if self.previous_position:
cv2.line(self.trail, self.previous_position, center,
(255, 255, 255), 2)
cv2.add(self.trail, frame, frame)
print center
self.previous_position = center
if len(countours) < 1:
center = 0
self.trail = numpy.zeros((self.cam_height, self.cam_width, 3),
numpy.uint8)
self.previous_position = None

Two options. First off, the contours are already in a Python list, so the indices of that list can be used to enumerate them. In fact you're already doing that in some sense with (i,c) in enumerate(countours). You can just use the index i to 'color' each contour with the value i drawing on a blank image, and then you'll know which contour is which just by examining the image. The other, probably better option IMO, is to use cv2.connectedComponents() to label the binary images instead of the contours of the binary image. Also pre-labeling you might try morphological operations to close up blobs.

EDIT: I follow your recommendation and I have new problems :)
Label is not persistent. When I move both contours, numbers of labels change.
Add some pictures and my code.
Hand label 0 and circle 1
Hand label 1 and circle 0
while True:
cnt += 1
if (cnt % 10) == 0:
now = time.time()
dt = now - last
fps = 10/dt
fps_smooth = (fps_smooth * smoothing) + (fps * (1.0-smoothing))
last = now
c = dev.color
cad = dev.cad
dev.wait_for_frames()
c = cv2.cvtColor(c, cv2.COLOR_RGB2GRAY)
depth = dev.depth * dev.depth_scale * 1000
print depth
#d = cv2.applyColorMap(depth.astype(np.uint8), cv2.COLORMAP_SUMMER)
print depth
res = np.float32(dev.depth)
depth = 255 * np.logical_and(depth >= 100, depth <= 500)
res2 = depth.astype(np.uint8)
kernel = np.ones((3, 3), np.uint8)
#res2 = cv2.blur(res2,(15,15))
res2 = cv2.erode(res2, kernel, iterations=4)
res2 = cv2.dilate(res2, kernel, iterations=8)
im_floodfill = res2.copy()
h, w = res2.shape[:2]
mask2 = np.zeros((h + 2, w + 2), np.uint8)
cv2.floodFill(im_floodfill, mask2, (0, 0), 255)
im_floodfill_inv = cv2.bitwise_not(im_floodfill)
im_out = res2 | im_floodfill_inv
im_out = cv2.blur(im_out,(5,5))
label = cv2.connectedComponentsWithStats(im_out)
n = label[0] - 1
cog = np.delete(label[3], 0, 0)
for i in xrange(n):
# im = cv2.circle(im,(int(cog[i][0]),int(cog[i][1])), 10, (0,0,255), -1)
cv2.putText(im_out, str(i), (int(cog[i][0]), int(cog[i][1])), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1)
cv2.imshow("Depth", res)
cv2.imshow("OUT", im_out)
cv2.imshow( "C", res2)

Related

Multi object tracking using CV2

I have some code in which you input a video, and then use background subtraction via cv2 to produce contours over a certain threshold, drawing a bounding box over them. On its own this acts simply to identify objects/motion in the video. I then go on to track the change in x and y over coordinate points of the contours to print on screen what direction the object is moving, drawing on a motion track behind the object to the frame.
However, the code cannot distinguish the different contours as separate objects. When only one object is present, it works great. When it detects more than one object/contour, the motion is all over the place as you would expect.
I have been doing some research and it seems optical flow might be the best solution, but I'm not really sure if it applies to this situation, nor how to integrate it into the code I already have.
Is optical flow the best solution, and how can I implement it into the code? I have read this page but it doesn't follow the background subtraction or contour finding that my code currently follows. The result I want is objects/contours being tracked separately, so I can filter out any moving in a fashion/direction I don't want. Below is a small bits from my code to demonstrate my method. I follow these outlines: https://pyimagesearch.com/2015/09/21/opencv-track-object-movement/
Any help appreciated! Thanks!
# example of bg subtraction
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (21, 21), 0)
frameDelta = cv2.absdiff(firstFrame, gray)
(_, thresh) = cv2.threshold(frameDelta, desired_th, 255, cv2.THRESH_BINARY)
thresh = cv2.dilate(thresh, kernel, iterations=2)
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts) #cnts ie contours, getting the outline
# motion
c = max(cnts, key=cv2.contourArea) #find the largest contour (based on its area)
((x, y), radius) = cv2.minEnclosingCircle(c) #compute the minimum enclosing circle and the centroid of the object.
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
if radius > 10: #minimum pixel radius
cv2.circle(frame, (int(x), int(y)), int(radius),(0, 255, 255), 2) #draw the minimum enclosing circle surrounding the objec
cv2.circle(frame, center, 5, (0, 0, 255), -1) #draw the centroid
pts.appendleft(center) #update the list of pts containing the center (x, y)-coordinates of the object
for i in np.arange(1, len(pts)): # loop over the set of tracked points
if pts[i - 1] is None or pts[i] is None:
continue
if counter >= 5 and i == 10 and pts[i-10] is not None: #check if enough points have been accumulated in buffer
dX = pts[i-10][0] - pts[i][0] #compute difference between x and y
dY = pts[i-10][1] - pts[i][1]
(dirX, dirY) = ("", "") #reinitialise direction in txt variables
if np.abs(dX) > 10: #ensure significant movement in x direction
dirX = "East" if np.sign(dX) == 1 else "West"
if np.abs(dY) > 10: #eg 20 as in, 20 pixel difference. make smaller to detect smaller movements, and vice versa
dirY = "South" if np.sign(dY) == 1 else "North" # Python reverses eg top of frame is pixel 0 and bottom is eg 780
if dirX != "" and dirY != "": #handles when both directions are not empty
direction = "{}-{}".format(dirY, dirX) #ie north west
else: # otherwise, only one direction is non-empty
direction = dirX if dirX != "" else dirY
#compute the thickness of the line and draw the connecting lines
thickness = int(np.sqrt(buffer / float(i + 1)) * 2.5)
cv2.line(frame, pts[i - 1], pts[i], (0, 0, 255), thickness)

OpenCV Find a middle line of a contour [Python]

In my image processing project, I have already obtained a masked image (black-and-white image) and its contours using the cv.findContours function. My goal now is to create an algorithm that can draw a middle line for this contour. The masked image and its contour are shown in the following images.
Masked image:
Contour:
In my imagination, for that contour, I would like to create a middle line which is near horizontal. I have manually marked my ideal middle line in red. Please check the following image for the red middle line that I have mentioned.
Contour with the middle line:
It is noticeable that my ultimate goal is to find the tip point that I have marked in yellow. If you have other ideas that can directly find the yellow tip point, please also let me know. For finding the yellow tip point, I have tried two approaches cv.convexHull and cv.minAreaRect, but the issue is the robustness. I made these two approaches worked for some images but for some other images in my dataset, they are not working very well. Therefore, I think to find the middle line might be a good approach that I can try.
I believe you're trying to determine the contour's center of gravity and orientation. We can easily do this using Central Moments. More info on that here.
The code below generates this plot. Is this the result you wanted?
# Determine contour
img = cv2.imread(img_file, cv2.IMREAD_GRAYSCALE)
img_bin = (img>128).astype(np.uint8)
contours, _ = cv2.findContours(img_bin, mode=cv2.RETR_EXTERNAL, method=cv2.CHAIN_APPROX_NONE)
# Determine center of gravity and orientation using Moments
M = cv2.moments(contours[0])
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
theta = 0.5*np.arctan2(2*M["mu11"],M["mu20"]-M["mu02"])
endx = 600 * np.cos(theta) + center[0] # linelength 600
endy = 600 * np.sin(theta) + center[1]
# Display results
plt.imshow(img_bin, cmap='gray')
plt.scatter(center[0], center[1], marker="X")
plt.plot([center[0], endx], [center[1], endy])
plt.show()
My goal right now is to create an algorithm that can draw a middle line for this contour.
If you detect the upper and lower bounds of your horizontal-lines, then you can calculate the middle-line coordinates.
For instance:
Middle-line will be:
If you change the size to the width of the image:
Code:
import cv2
img = cv2.imread("contour.jpg")
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
(h, w) = img.shape[:2]
x1_upper = h
x1_lower = 0
x2_upper = h
x2_lower = 0
y1_upper = h
y1_lower = 0
y2_upper = h
y2_lower = 0
lines = cv2.ximgproc.createFastLineDetector().detect(gray)
for cur in lines:
x1 = cur[0][0]
y1 = cur[0][1]
x2 = cur[0][2]
y2 = cur[0][3]
# upper-bound coords
if y1 < y1_upper and y2 < y2_upper:
y1_upper = y1
y2_upper = y2
x1_upper = x1
x2_upper = x2
elif y1 > y1_lower and y2 > y2_lower:
y1_lower = y1
y2_lower = y2
x1_lower = x1
x2_lower = x2
print("\n\n-lower-bound-\n")
print("({}, {}) - ({}, {})".format(x1_lower, y1_lower, x2_lower, y2_lower))
print("\n\n-upper-bound-\n")
print("({}, {}) - ({}, {})".format(x1_upper, y1_upper, x2_upper, y2_upper))
cv2.line(img, (x1_lower, y1_lower), (x2_lower, y2_lower), (0, 255, 0), 5)
cv2.line(img, (x1_upper, y1_upper), (x2_upper, y2_upper), (0, 0, 255), 5)
x1_avg = int((x1_lower + x1_upper) / 2)
y1_avg = int((y1_lower + y1_upper) / 2)
x2_avg = int((x2_lower + x2_upper) / 2)
y2_avg = int((y2_lower + y2_upper) / 2)
cv2.line(img, (0, y1_avg), (w, y2_avg), (255, 0, 0), 5)
cv2.imshow("result", img)
cv2.waitKey(0)
cv2.destroyAllWindows()
I beleive skeleton is what you are looking for.
import cv2
import timeit
img = cv2.imread('Ggh8d - Copy.jpg',0)
s = timeit.default_timer()
thinned = cv2.ximgproc.thinning(img, thinningType = cv2.ximgproc.THINNING_ZHANGSUEN)
e = timeit.default_timer()
print(e-s)
cv2.imwrite("thinned1.png", thinned)
if smooth the edge a little bit
Actually the line will not torch the yellow point, since the algorithm have to check distance from edges, yellow point is located on the edge.
Here is another way to do that by computing the centerline of the rotated bounding box about your object in Python/OpenCV.
Input:
import cv2
import numpy as np
# load image
img = cv2.imread("blob_mask.jpg")
# convert to gray
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# threshold the grayscale image
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY)[1]
# get coordinates of all non-zero pixels
# NOTE: must transpose since numpy coords are y,x and opencv uses x,y
coords = np.column_stack(np.where(thresh.transpose() > 0))
# get rotated rectangle from
rotrect = cv2.minAreaRect(coords)
box = cv2.boxPoints(rotrect)
box = np.int0(box)
print (box)
# get center line from box
# note points are clockwise from bottom right
x1 = (box[0][0] + box[3][0]) // 2
y1 = (box[0][1] + box[3][1]) // 2
x2 = (box[1][0] + box[2][0]) // 2
y2 = (box[1][1] + box[2][1]) // 2
# draw rotated rectangle on copy of img as result
result = img.copy()
cv2.drawContours(result, [box], 0, (0,0,255), 2)
cv2.line(result, (x1,y1), (x2,y2), (255,0,0), 2)
# write result to disk
cv2.imwrite("blob_mask_rotrect.png", result)
# display results
cv2.imshow("THRESH", thresh)
cv2.imshow("RESULT", result)
cv2.waitKey(0)
cv2.destroyAllWindows()
Result:

Unable to use sort_contors for building seven segment OCR

I am trying to build an OCR for recognising seven segment display as mentioned below
Using preprocessing tools of open CV I got it here
Now I am trying to follow this tutorial - https://www.pyimagesearch.com/2017/02/13/recognizing-digits-with-opencv-and-python/
But on the part
digitCnts = contours.sort_contours(digitCnts,
method="left-to-right")[0]
digits = []
I am getting error as -
The error is solved using THRESH_BINARY_INV but still the OCR is not working any fix would be great
File "/Users/ms/anaconda3/lib/python3.6/site-packages/imutils/contours.py", line 25, in sort_contours
key=lambda b: b1[i], reverse=reverse))
ValueError: not enough values to unpack (expected 2, got 0)
Any idea how to solve this and make my OCR a working model
My whole code is :
import numpy as np
import cv2
import imutils
# import the necessary packages
from imutils.perspective import four_point_transform
from imutils import contours
import imutils
import cv2
# define the dictionary of digit segments so we can identify
# each digit on the thermostat
DIGITS_LOOKUP = {
(1, 1, 1, 0, 1, 1, 1): 0,
(0, 0, 1, 0, 0, 1, 0): 1,
(1, 0, 1, 1, 1, 1, 0): 2,
(1, 0, 1, 1, 0, 1, 1): 3,
(0, 1, 1, 1, 0, 1, 0): 4,
(1, 1, 0, 1, 0, 1, 1): 5,
(1, 1, 0, 1, 1, 1, 1): 6,
(1, 0, 1, 0, 0, 1, 0): 7,
(1, 1, 1, 1, 1, 1, 1): 8,
(1, 1, 1, 1, 0, 1, 1): 9
}
# load image
image = cv2.imread('d4.jpg')
# create hsv
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
# set lower and upper color limits
low_val = (60,180,160)
high_val = (179,255,255)
# Threshold the HSV image
mask = cv2.inRange(hsv, low_val,high_val)
# find contours in mask
ret, cont, hierarchy = cv2.findContours(mask,cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# select the largest contour
largest_area = 0
for cnt in cont:
if cv2.contourArea(cnt) > largest_area:
cont = cnt
largest_area = cv2.contourArea(cnt)
# get the parameters of the boundingbox
x,y,w,h = cv2.boundingRect(cont)
# create and show subimage
roi = image[y:y+h, x:x+w]
cv2.imshow("Result", roi)
# draw box on original image and show image
cv2.rectangle(image, (x,y),(x+w,y+h), (0,0,255),2)
cv2.imshow("Image", image)
grayscaled = cv2.cvtColor(roi,cv2.COLOR_BGR2GRAY)
retval, threshold = cv2.threshold(grayscaled, 10, 255, cv2.THRESH_BINARY)
retval2,threshold2 = cv2.threshold(grayscaled,125,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
cv2.imshow('threshold',threshold2)
cv2.waitKey(0)
cv2.destroyAllWindows()
# find contours in the thresholded image, then initialize the
# digit contours lists
cnts = cv2.findContours(threshold2.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
digitCnts = []
# loop over the digit area candidates
for c in cnts:
# compute the bounding box of the contour
(x, y, w, h) = cv2.boundingRect(c)
# if the contour is sufficiently large, it must be a digit
if w >= 15 and (h >= 30 and h <= 40):
digitCnts.append(c)
# sort the contours from left-to-right, then initialize the
# actual digits themselves
digitCnts = contours.sort_contours(digitCnts,
method="left-to-right")[0]
digits = []
# loop over each of the digits
for c in digitCnts:
# extract the digit ROI
(x, y, w, h) = cv2.boundingRect(c)
roi = thresh[y:y + h, x:x + w]
# compute the width and height of each of the 7 segments
# we are going to examine
(roiH, roiW) = roi.shape
(dW, dH) = (int(roiW * 0.25), int(roiH * 0.15))
dHC = int(roiH * 0.05)
# define the set of 7 segments
segments = [
((0, 0), (w, dH)), # top
((0, 0), (dW, h // 2)), # top-left
((w - dW, 0), (w, h // 2)), # top-right
((0, (h // 2) - dHC) , (w, (h // 2) + dHC)), # center
((0, h // 2), (dW, h)), # bottom-left
((w - dW, h // 2), (w, h)), # bottom-right
((0, h - dH), (w, h)) # bottom
]
on = [0] * len(segments)
# loop over the segments
for (i, ((xA, yA), (xB, yB))) in enumerate(segments):
# extract the segment ROI, count the total number of
# thresholded pixels in the segment, and then compute
# the area of the segment
segROI = roi[yA:yB, xA:xB]
total = cv2.countNonZero(segROI)
area = (xB - xA) * (yB - yA)
# if the total number of non-zero pixels is greater than
# 50% of the area, mark the segment as "on"
if total / float(area) > 0.5:
on[i]= 1
# lookup the digit and draw it on the image
digit = DIGITS_LOOKUP[tuple(on)]
digits.append(digit)
cv2.rectangle(output, (x, y), (x + w, y + h), (0, 255, 0), 1)
cv2.putText(output, str(digit), (x - 10, y - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 255, 0), 2)
# display the digits
print(u"{}{}.{}{}.{}{} \u00b0C".format(*digits))
cv2.imshow("Input", image)
cv2.imshow("Output", output)
cv2.waitKey(0)
A help would be great in fixing my OCR
I think the lookup-table you created is is for seven-digit display, not for seven-digit OCR. As for the size of display is fixed, I think you can try to segment it into seperated regions and recognise using template-matching or k-means.
This is my preprocessed steps:
(1) Find light green display in the HSV
mask = cv2.inRange(hsv, (50, 100, 180), (70, 255, 255))
(2) try to seperate by projecting and recognise standard seven-digits using LUT:
(3) try on the detected green display
So, as I said in the comments, there were two problems:
You were trying to find black contours on a white background, which is opposite of OpenCV documentation. This was solved using THRESH_BINARY_INV flag instead of THRESH_BINARY.
Due to the numbers not being connected, a full contour for the number couldn't be found. So I tried some morphological operations. Following are the steps:
2a) Opening on the above image with following code:
threshold2 = cv2.morphologyEx(threshold, cv2.MORPH_OPEN, np.ones((3,3), np.uint8))
2b) Dilation on the previous image:
threshold2 = cv2.dilate(threshold2, np.ones((5,1), np.uint8), iterations=1)
2c) Crop the top part of the image to separate numbers due to dilating into the top border:
height, width = threshold2.shape[:2]
threshold2 = threshold2[5:height,5:width]
Note Somehow, the images are being displayed here without the white border that I am talking about. Try opening the image in a new window and you will see what I mean.
So, after solving these issues, the contours were pretty good and how they were supposed to be as seen here:
cnts = cv2.findContours(threshold2.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
digitCnts = []
# loop over the digit area candidates
for c in cnts:
# compute the bounding box of the contour
(x, y, w, h) = cv2.boundingRect(c)
# if the contour is sufficiently large, it must be a digit
if w <= width * 0.5 and (h >= height * 0.2):
digitCnts.append(c)
# sort the contours from left-to-right, then initialize the
# actual digits themselves
cv2.drawContours(image2, digitCnts, -1, (0, 0, 255))
cv2.imwrite("cnts-sort.jpg", image2)
As you can see below, the contours are being drawn in red.
Now, for estimating whether the digit is a code or not, this part somehow doesn't work and I blame the look-up table for it. As you can see from the below images, the bounding rects for all the numbers are correctly cropped but the lookup table fails to recognize them.
# loop over each of the digits
j = 0
for c in digitCnts:
# extract the digit ROI
(x, y, w, h) = cv2.boundingRect(c)
roi = threshold2[y:y + h, x:x + w]
cv2.imwrite("roi" + str(j) + ".jpg", roi)
j += 1
# compute the width and height of each of the 7 segments
# we are going to examine
(roiH, roiW) = roi.shape
(dW, dH) = (int(roiW * 0.25), int(roiH * 0.15))
dHC = int(roiH * 0.05)
# define the set of 7 segments
segments = [
((0, 0), (w, dH)), # top
((0, 0), (dW, h // 2)), # top-left
((w - dW, 0), (w, h // 2)), # top-right
((0, (h // 2) - dHC) , (w, (h // 2) + dHC)), # center
((0, h // 2), (dW, h)), # bottom-left
((w - dW, h // 2), (w, h)), # bottom-right
((0, h - dH), (w, h)) # bottom
]
on = [0] * len(segments)
# loop over the segments
for (i, ((xA, yA), (xB, yB))) in enumerate(segments):
# extract the segment ROI, count the total number of
# thresholded pixels in the segment, and then compute
# the area of the segment
segROI = roi[yA:yB, xA:xB]
total = cv2.countNonZero(segROI)
area = (xB - xA) * (yB - yA)
# if the total number of non-zero pixels is greater than
# 50% of the area, mark the segment as "on"
if area != 0:
if total / float(area) > 0.5:
on[i] = 1
# lookup the digit and draw it on the image
try:
digit = DIGITS_LOOKUP[tuple(on)]
digits.append(digit)
cv2.rectangle(roi, (x, y), (x + w, y + h), (0, 255, 0), 1)
cv2.putText(roi, str(digit), (x - 10, y - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.65, (0, 255, 0), 2)
except KeyError:
continue
I read through the website you mentioned in the question and from the comments it seems some of the entries in the LUT might be wrong. So I am going to leave it to you to figure that out. Following are the individual digits found (but not recognised):
Alternatively, you could use tesseract instead to recognise these detected digits.
Hope it helps!

OpenCV Background Subtraction to Improve Detection

I'm able to detect eggs on conveyor belt but the problem is, If there are no eggs on belt, algorithm still finds objects.
Detected eggs on conveyor:
If there are no eggs on belt:
To reduce wrong detections I'm checking radius and area:
if (radius < 100 and radius > 20):
....
if area > 2200 and area < 8800:
....
But this checkpoints are not enough to prevent wrong detections.
I have tried createBackgroundSubtractorMOG method but I have failed detecting eggs after subtraction.
Conveyor belt types could be vary.
What is the best method for removing static background(belt) from the image?
Code:
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
th, bw = cv2.threshold(hsv[:, :, 2], 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
morph = cv2.morphologyEx(bw, cv2.MORPH_CLOSE, kernel)
dist = cv2.distanceTransform(morph, cv2.DIST_L2, cv2.DIST_MASK_PRECISE)
borderSize = 75
distborder = cv2.copyMakeBorder(dist, borderSize, borderSize, borderSize, borderSize,
cv2.BORDER_CONSTANT | cv2.BORDER_ISOLATED, 0)
gap = 10
kernel2 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (2*(borderSize-gap)+1, 2*(borderSize-gap)+1))
kernel2 = cv2.copyMakeBorder(kernel2, gap, gap, gap, gap,
cv2.BORDER_CONSTANT | cv2.BORDER_ISOLATED, 0)
distTempl = cv2.distanceTransform(kernel2, cv2.DIST_L2, cv2.DIST_MASK_PRECISE)
nxcor = cv2.matchTemplate(distborder, distTempl, cv2.TM_CCOEFF_NORMED)
mn, mx, _, _ = cv2.minMaxLoc(nxcor)
th, peaks = cv2.threshold(nxcor, mx*0.5, 255, cv2.THRESH_BINARY)
peaks8u = cv2.convertScaleAbs(peaks)
#fgmask = self.fgbg.apply(peaks8u)
_,contours, hierarchy = cv2.findContours(peaks8u, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)
peaks8u = cv2.convertScaleAbs(peaks) # to use as mask
for cnt in contours:
try:
if len(cnt) >= 5:
(x, y), radius = cv2.minEnclosingCircle(cnt)
radius = int(radius)
#print("radius: ", radius)
if (radius < 100 and radius > 20):
ellipse = cv2.fitEllipse(cnt)
(center , axis, angle) = ellipse
cx,cy = int(center[0]),int(center[1])
ax1,ax2 = int(axis[0])-2,int(axis[1])-2
orientation = int(angle)
area = cv2.contourArea(cnt)
if area > 2200 and area < 8800:
cv2.ellipse(frame, (cx,cy), (ax1,ax2), orientation, 0, 360, (255,0,0), 2)
#cv2.circle(frame, (cx,cy), 1, (0, 255, 0), 15)
cv2.putText(frame,str(int(area)),(cx,cy), cv2.FONT_HERSHEY_SIMPLEX, 0.5, 0, 1, cv2.LINE_AA)
There are two basic options to get the background image:
Obtain the background image for the specific conveyor belt in advance during some setup/calibration process.
Use the data itself to derive the background, for this you have to remove the objects (eggs).
The second option can be implemented by taking mean or median of every pixel over time. Given that most of the time each pixel shows background you will get a reasonably good estimate of the scene without the objects.

Keep the same label for a moving object using opencv

I want to track a moving fish from camera witch has located on top of a fish tank. Till now I was able to track the multiple moving objects using moving average and background subtraction methods. And I put text on each contour. But the problem is I couldn't keep the same label for same moving fish. Fish can detect from every frame but tracked object number is changing. I have attached my current Python code. What am I doing wrong here? Can someone tell me a another possible way to do this?
import cv2
import numpy as np
device = cv2.VideoCapture(0)
flag, frame = device.read()
movingaverage = np.float32(frame)
background = cv2.createBackgroundSubtractorMOG2()
font=cv2.FONT_HERSHEY_SIMPLEX
kernelOpen=np.ones((5,5))
kernelClose=np.ones((20,20))
while True:
flag, frame = device.read()
alpha = float(1.0/2.0)
cv2.accumulateWeighted(frame,movingaverage,alpha)
gaussion = background.apply(frame)
res = cv2.convertScaleAbs(movingaverage)
difference_img = cv2.absdiff(res, frame)
grey_difference_img = cv2.cvtColor(difference_img, cv2.COLOR_BGR2GRAY)
ret,th1 = cv2.threshold(grey_difference_img, 10, 255, cv2.THRESH_BINARY)
combine = cv2.bitwise_and(gaussion, gaussion, mask = grey_difference_img)
_, conts, h1 =cv2.findContours(combine.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
if len(conts) == 0:
cv2.putText(frame,"No moving objects found!",(50,200), font, 1,(255,255,255),2,cv2.LINE_AA)
else:
number = 0
for i in range(len(conts)):
x,y,w,h = cv2.boundingRect(conts[i])
if (w > 50) and (h > 50):
cv2.rectangle(frame,(x,y),(x+w,y+h),(0,0,255), 2)
cv2.putText(frame,str(number +1)+ "object",(x,y+h), font, 1,(255,255,255),2,cv2.LINE_AA)
number = number + 1
cv2.imshow("Gaussian",gaussion)
cv2.imshow("Track",frame)
if cv2.waitKey(1) == 27:
break
device.release()
cv2.destroyAllWindows()
I have an idea but I'm not sure about that. Now I have modified the code to detect center of each and every contour. So, can I store the information about the coordinates to an array, then check the new frame contours center point such that close to array values. Then try to guess the contour which stored in array in previous frame. I don't know the possibility of this since I'm new to Python and OpenCV.
if (w > 50) and (h > 50):
cnt = conts[i]
M = cv2.moments(cnt)
if M['m00'] != 0:
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
#draw a circle at center of contours.
cv2.circle(frame,(cx,cy), 2, (0,0,255), -1)
print( "(",cx,",",cy,")" )
cv2.rectangle(frame,(x,y),(x+w,y+h),(0,0,255), 2)
cv2.putText(frame,str(number +1)+ "object",(x,y+h), font, 1,(255,255,255),2,cv2.LINE_AA)
I have edited my code. Now it works fine if the moving objects are in the same horizontal plane. What I did was, create list called matched_contours and paths. Then append calculated center points into paths which in element of matched_contours. And used min_dist to check whether contours has presented in previous frame or not. If it has presented I have updated new center points in matched_contour. If not, I took it as a new matched_contour.
THIS IS THE UPDATED PART
im2, contours, hierarchy = cv2.findContours(dilation, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_TC89_L1)
for (_, contour) in enumerate(contours):
(x, y, w, h) = cv2.boundingRect(contour)
if (w >= 50) or (h >= 50): # Find targeting size moving objects
x1 = int(w / 2)
y1 = int(h / 2)
cx = x + x1
cy = y + y1
if len(matched_contours) == 0: #No previous moving objects
paths = []
paths.append((cx,cy))
object_num = object_num + 1
matched_contours.append(((x, y, w, h), (cx,cy), object_num, paths))
cv2.circle(frame,(cx,cy), 2, (0,0,255), -1)
#cv2.rectangle(frame,(x,y),(x+w,y+h),(0,0,255), 2)
rect = cv2.minAreaRect(contour)
box = cv2.boxPoints(rect)
box = np.int0(box)
cv2.drawContours(frame,[box],0,(0,255,0),2)
cv2.putText(frame,"object " + str(object_num),(x,y+h), font, 0.5,(255,255,255),1,cv2.LINE_AA)
else:
found = False
for i in range(len(matched_contours)):
ponits, center, num, path = matched_contours[i]
old_cx, old_cy = center
#Calculate euclidian distances between new center and old centers to check this contour is existing or not
euc_dist = math.sqrt(float((cx - old_cx)**2) + float((cy - old_cy)**2))
if euc_dist <= min_dist:
if len(path) == max_path_length:
for t in range(len(path)):
if t == max_path_length - 1:
path[t] = (cx,cy)
else:
path[t] = path[t+1]
else:
path.append((cx,cy))
matched_contours[i] = ((x, y, w, h), (cx,cy), num, path)
cv2.circle(frame,(cx,cy), 2, (0,0,255), -1)
#cv2.rectangle(frame,(x,y),(x+w,y+h),(0,0,255), 2)
rect = cv2.minAreaRect(contour)
box = cv2.boxPoints(rect)
box = np.int0(box)
cv2.drawContours(frame,[box],0,(0,255,0),2)
cv2.putText(frame,"object " + str(num),(x,y+h), font, 0.5,(255,255,255),1,cv2.LINE_AA)
#Draw path of the moving object
for point in range(len(path)-1):
cv2.line(frame, path[point], path[point+1], (255,0,0), 1)
cv2.circle(frame,path[point+1], 3, (0,0,255), -1)
if not found: #New moving object has found
object_num = object_num + 1
paths = []
paths.append((cx,cy))
matched_contours.append(((x, y, w, h), (cx,cy), object_num, paths))
cv2.circle(frame,(cx,cy), 2, (0,0,255), -1)
#cv2.rectangle(frame,(x,y),(x+w,y+h),(0,0,255), 2)
rect = cv2.minAreaRect(contour)
box = cv2.boxPoints(rect)
box = np.int0(box)
cv2.drawContours(frame,[box],0,(0,255,0),2)
cv2.putText(frame,"object " + str(object_num),(x,y+h), font, 0.5,(255,255,255),1,cv2.LINE_AA)

Categories