I followed a tutorial on watershed segmentation and used it to segment each red blood cell in an image. I'm new to openCV and I would like to know if it is possible to draw circles around the cells by using watershed segmentation? If so, could you please show how it is done.
Original image
Output of Watershed segmentation
Code is given below
import numpy as np
import cv2
from matplotlib import pyplot as plt
def fillHoles(otsuImg):
# find contours
contours, _ = cv2.findContours(otsuImg, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# filter out contours by size
small_cntrs = []
for con in contours:
area = cv2.contourArea(con)
# print(area)
if area < 1000: # size threshold
small_cntrs.append(con)
cv2.drawContours(otsuImg, small_cntrs, -1, 0, -1)
# load the image
img = cv2.imread('resources/rbc2.png')
img_pyr = cv2.pyrMeanShiftFiltering(img, 21, 51)
img_median = cv2.medianBlur(img_pyr, 9)
img_gray = cv2.cvtColor(img_median, cv2.COLOR_BGR2GRAY)
ret, img_thresh = cv2.threshold(img_gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
# fill holes of RBC
fillHoles(img_thresh)
# invert the image
img_thresh = cv2.bitwise_not(img_thresh)
# noise removal
kernel = np.ones((3,3),np.uint8)
opening = cv2.morphologyEx(img_thresh,cv2.MORPH_OPEN,kernel, iterations=2)
# sure background area
sure_bg = cv2.dilate(opening,kernel,iterations=3)
# Finding sure foreground area
dist_transform = cv2.distanceTransform(opening,cv2.DIST_L2, 5)
ret, sure_fg = cv2.threshold(dist_transform,0.1*dist_transform.max(),255,0)
# _, sure_fg = cv2.threshold(np.uint8(dist_transform), 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
# Finding unknown region
sure_fg = np.uint8(sure_fg)
unknown = cv2.subtract(sure_bg, sure_fg)
# Marker labelling
ret, markers = cv2.connectedComponents(sure_fg)
# Add one to all labels so that sure background is not 0, but 1
markers = markers+1
# Now, mark the region of unknown with zero
markers[unknown==255] = 0
markers = cv2.watershed(img,markers)
img[markers == -1] = [255,0,0]
cv2.imshow('markers2', np.uint8(markers))
cv2.imshow('Final output', img)
cv2.waitKey(0)
I had the same problem and ended up using skimage.segmentation.watershed for the last step, to get the labels I could use to calculate the contours of the watershed cells. Once you have the contours, you can calculate and plot the enclosing circle as usual:
# your code above
ret, markers = cv2.connectedComponents(sure_fg)
from skimage.segmentation import watershed
labels = watershed(-dist_transform,
markers,
mask=img_thresh,
watershed_line=False)
watershed_contours = list(map(lambda l: cv2.findContours((labels == l).astype(np.uint8),
cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0][0],
np.unique(labels)[1:]))
output = img.copy()
for contour in watershed_contours:
(x,y),radius = cv2.minEnclosingCircle(contour)
center = (int(x),int(y))
radius = int(radius)
cv2.circle(output,center,radius,(0,255,0),2)
Related
I followed the tutorial shown at OpenCV site.. How could I use the same example and extend it to detect coin widths ?
I know how I could use HoughCircles() and FindContour() python functions but this approach is more stable in noisy backgrounds.
I would like to extract the segmented coins and use circle/contour detection for width estimation. But I don't understand how I could extend this code.
import numpy as np
import cv2
from matplotlib import pyplot as plt
# https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_imgproc/py_watershed/py_watershed.html
img_file_name = r'C:\Users\coins.jpg'
img = cv2.imread(img_file_name)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(gray,100,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
#cv2.imshow("thresolded",thresh)
# noise removal
kernel = np.ones((6,6),np.uint8)
opening = cv2.morphologyEx(thresh,cv2.MORPH_OPEN,kernel, iterations = 3)
# sure background area
sure_bg = cv2.dilate(opening,kernel,iterations=3)
#cv2.imshow("dilate",sure_bg)
# Finding sure foreground area
dist_transform = cv2.distanceTransform(opening,cv2.DIST_L2,5)
ret, sure_fg = cv2.threshold(dist_transform,0.7*dist_transform.max(),255,0)
cv2.imshow("dist_transform",sure_fg)
# Finding unknown region
sure_fg = np.uint8(sure_fg)
unknown = cv2.subtract(sure_bg,sure_fg)
cv2.imshow("subtract",unknown)
# Marker labelling
ret, markers = cv2.connectedComponents(sure_fg)
# Add one to all labels so that sure background is not 0, but 1
markers = markers+1
# Now, mark the region of unknown with zero
markers[unknown==255] = 0
#markers = cv2.watershed(img,markers)
markers = cv2.watershed(img,markers)
img[markers == -1] = [0,255,0]
cv2.imshow("markers",img)
Here as I slightly changed your code:
import numpy as np
import cv2
import imutils
from matplotlib import pyplot as plt
img_file_name = 'coins.jpg'
img = cv2.imread(img_file_name)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(gray,100,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
# noise removal
kernel = np.ones((6,6),np.uint8)
opening = cv2.morphologyEx(thresh,cv2.MORPH_OPEN,kernel, iterations = 3)
# sure background area
sure_bg = cv2.dilate(opening,kernel,iterations=3)
# CHANGE
dist_transform = cv2.distanceTransform(thresh,cv2.DIST_L2,5)
ret, sure_fg = cv2.threshold(dist_transform,0.7*dist_transform.max(),255,0)
# Finding unknown region
sure_fg = np.uint8(sure_fg)
# CHANGE
unknown = cv2.subtract(thresh,sure_fg)
# Marker labelling
ret, markers = cv2.connectedComponents(sure_fg)
# Add one to all labels so that sure background is not 0, but 1
markers = markers+1
# Now, mark the region of unknown with zero
markers[unknown==255] = 0
markers = cv2.watershed(img,markers)
Here the code that you can use:
# loop over the unique markers returned by the Watershed
# algorithm
num_coins = np.amax(markers) -1
coins_width = np.zeros(num_coins)
for marker in np.unique(markers):
# if the marker is -1, we are examining the borders
# if the marker is 1, we are examining the 'background'
# so simply ignore them
if marker <= 1:
continue
# otherwise, allocate memory for the marker region and draw
# it on the mask
mask = np.zeros(gray.shape, dtype="uint8")
mask[markers == marker] = 255
# detect contours in the mask and grab the largest one
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
largest_cnt = max(cnts, key=cv2.contourArea)
#
# First Way
#
# calculate the center of the contour
M = cv2.moments(largest_cnt )
x = int(M["m10"] / M["m00"])
y = int(M["m01"] / M["m00"])
# calculate the radius of the contour from area (I suppose it's a circle)
area = cv2.contourArea(largest_cnt)
radius = np.sqrt(area/3.14)
#
# Second Way
#
# draw a circle enclosing the object
# ((x, y), radius) = cv2.minEnclosingCircle(largest_cnt)
cv2.circle(img, (int(x), int(y)), int(radius), (0, 255, 0), 1)
cv2.putText(img, "#{}".format(marker-2), (int(x) - 10, int(y)), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 1)
coins_width[marker-2] = 2* radius
print(coins_width)
cv2.imshow("markers",img)
The code is a modified version of the code in the article:
https://www.pyimagesearch.com/2015/11/02/watershed-opencv/
I have a video file with 2 dot laser and I want to calculate the distance in pixels between them, I tried this code with OpenCV, but it is not working :
Image
import cv2
import numpy as np
cap = cv2.VideoCapture('D:\Books\Pav Man\PICS\Test\VID_20200609_195155.mp4')
#cap = cv2.VideoCapture(0)
old = 0
while (1):
# Take each frame
ret, frame = cap.read()
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
lower_red = np.array([0, 0, 255])
upper_red = np.array([255, 255, 255])
mask = cv2.inRange(hsv, lower_red, upper_red)
cv2.imshow('mask', mask)
# cv2.imshow('Track Laser', frame)
moments = cv2.moments(hsv[:, :, 2])
output = cv2.connectedComponentsWithStats(mask, 8, cv2.CV_32S)
print (output[3])
print ("----**----")
if moments["m00"] > 0:
x = (moments['m10']/ moments['m00'])
y = (moments['m01']/ moments['m00'])
#print(moments['m00'],moments['m01'],moments['m10'])
#print(x, y)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
this code output = cv2.connectedComponentsWithStats(mask, 8, cv2.CV_32S) give me the centroid of points , but how to geat each point(laser dot) separately ? if I get the centroid I can measure the distance between these points
Here is your other way to do that in Python/OpenCV using connectedComponentsWithStats.
Read the input
Set min and max red colors
Use cv2.inRange() to threshold on the color range
Apply morphology open and close to clean up small spots and holes
Process with connectedComponentsWithStats to get the labels and centroids
Extract all the areas corresponding to the labels and centroids
For all labels, test if the area is smaller than some estimate of the background, as we want to exclude that centroid. Draw the label as a filled yellow region on a copy of the input. Extract the centroids of small regions and store in pts array. Draw the centroids as small green squares on the copy of the input.
For all saved points, compute the distance between successive ones.
Save the output
Input:
import cv2
import numpy as np
import math
# read image
frame = cv2.imread('red_spots.jpg')
hh, ww = frame.shape[:2]
# convert to hsv hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# threshold image
lower_red = np.array([0, 0, 225])
upper_red = np.array([255, 255, 255])
thresh = cv2.inRange(hsv, lower_red, upper_red)
# apply close and open morphology to smooth
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (9,9))
morph = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)
morph = cv2.morphologyEx(morph, cv2.MORPH_OPEN, kernel)
# do connected components processing
nlabels, labels, stats, centroids = cv2.connectedComponentsWithStats(morph, None, None, None, 8, cv2.CV_16U)
# get all areas from stats[label_start_id:label_stop_id, area_flag]
areas = stats[0:, cv2.CC_STAT_AREA]
# draw labels and get centroids and draw centroids
result = frame.copy()
pts = []
for i in range(0, nlabels):
if areas[i] <= ww*hh/5 :
# labels start at 1 not 0
result[labels == i+1] = (0,255,255)
pt = centroids[i]
pts.append(pt)
cx = pt[0]
cy = pt[1]
x = int(round(cx))
y = int(round(cy))
# draw small square at centroids
result[y-2:y+3,x-2:x+3] = (0,255,0)
print('centroid =',cx,",",cy)
number = len(pts)
for i in range(number-1):
pt1 = pts[i]
x1 = pt1[0]
y1 = pt1[1]
pt2 = pts[i+1]
x2 = pt2[0]
y2 = pt2[1]
dist = math.sqrt( (x2-x1)**2 + (y2-y1)**2 )
print('distance =', dist)
print('')
#save images
cv2.imwrite('red_spots_thresh2.jpg',thresh)
cv2.imwrite('red_spots_morph2.jpg',morph)
cv2.imwrite('red_spots_centroids2.jpg',result)
# show images
cv2.imshow("thresh", thresh)
cv2.imshow("morph", morph)
cv2.imshow("result", result)
cv2.waitKey(0)
cv2.destroyAllWindows()
Threshold image:
Morphology cleaned image:
Result image with colored region labels and centroids:
Centroids and Distances:
centroid = 1006.7307283673711 , 433.70499350726004
centroid = 1036.418693371483 , 750.4024797329519
distance = 318.08595229553544
Distance is the square root of the sum of the squares of the x difference and the y difference. So
import math
dist = math.sqrt( (x1-x2)**2 + (y1-y2)**2 )
for points x1,y1 and x2,y2
Here is one way to do your processing in Python/OpenCV using contours.
Read the input
Set min and max red colors
Use cv2.inRange() to threshold on the color range
Apply morphology open and close to clean up small spots and holes
Find contours
For each contour, draw the contour, use moments to compute the centroid, save the centroids in a list and draw a small square at the centroid location
For each successive pair of centroid points, compute the distance between them in pixels
Save the results
Input:
import cv2
import numpy as np
import math
# read image
frame = cv2.imread('red_spots.jpg')
# convert to hsv hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# threshold image
lower_red = np.array([0, 0, 225])
upper_red = np.array([255, 255, 255])
thresh = cv2.inRange(hsv, lower_red, upper_red)
# apply close and open morphology to smooth
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (9,9))
morph = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)
morph = cv2.morphologyEx(morph, cv2.MORPH_OPEN, kernel)
# draw contours and get centroids
spots = frame.copy()
contours = cv2.findContours(morph, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = contours[0] if len(contours) == 2 else contours[1]
pts = []
count = 0
for c in contours:
cv2.drawContours(spots, [c], -1, (0,255,0), 2)
M = cv2.moments(c)
cx = M["m10"] / M["m00"]
cy = M["m01"] / M["m00"]
pt = (cx,cy)
pts.append(pt)
x = round(cx)
y = round(cy)
# draw small square at centroids
spots[y-2:y+3,x-2:x+3] = (255,0,0)
print('centroid =',cx,",",cy)
count = count + 1
for i in range(count-1):
pt1 = pts[i]
x1 = pt1[0]
y1 = pt1[1]
pt2 = pts[i+1]
x2 = pt2[0]
y2 = pt2[1]
dist = math.sqrt( (x2-x1)**2 + (y2-y1)**2 )
print('distance =', dist)
print('')
#save images
cv2.imwrite('red_spots_thresh.png',thresh)
cv2.imwrite('red_spots_morph.png',morph)
cv2.imwrite('red_spots_centroids.png',spots)
# show images
cv2.imshow("thresh", thresh)
cv2.imshow("morph", morph)
cv2.imshow("spots", spots)
cv2.waitKey(0)
cv2.destroyAllWindows()
Threshold image:
Morphology cleaned image:
Contours and centroids image:
Centroids and Distances:
centroid = 1036.4038142620232 , 750.3941127694858
centroid = 1006.6605586230609 , 433.9662237323787
distance = 317.8227024875417
You can do this:
The function cv2.InRange() to find two red dots.
The function cv2.connectedComponentsWithStats() to find the centroids of these red points.
Calculate the Euclidean distance between centroids.
Also, you can select points by brightness only, without even using their color.
import cv2
img = cv2.imread('HAgbc.jpg')
gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
cv2.normalize(gray, gray, 0, 255, cv2.NORM_MINMAX, cv2.CV_8UC1)
points=cv2.threshold(gray, 230, 255, cv2.THRESH_BINARY )[1]
output = cv2.connectedComponentsWithStats(points, 8, cv2.CV_32S)
centroids = output[3]
x,y=(centroids[1]-centroids[2])
dist=cv2.magnitude(x, y)[0]
print('distance is: ', *dist)
Or this code (find two brightness maximum):
import cv2
img = cv2.imread('HAgbc.jpg')
gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
pos1=cv2.minMaxLoc(gray)[3]
cv2.circle(gray, pos1, 30, 0, -1) # masking first spot
pos2=cv2.minMaxLoc(gray)[3]
x=pos1[0]-pos2[0]
y=pos1[1]-pos2[1]
dist=cv2.magnitude(x, y)[0]
print('distance is: ', *dist)
I need to segment the seeds in the image below and crop them.
https://i.stack.imgur.com/ndOkX.jpg
They can be pretty close to each other and sometimes overlap, so I chose to use the watershed algorithm for this task.
My results are in the image below, after drawing the contours of the markers that are returned, and as you can see I'm having problems defining good markers for applying it. The individual seeds are outlined but there are many inner lines that I do not want.
https://i.stack.imgur.com/BtOfj.jpg
How would I go about removing them or defining better markers?
The code I'm running:
from skimage.feature import peak_local_max
from skimage.segmentation import watershed
import matplotlib.pyplot as plt
from scipy import ndimage
import cv2 as cv
import imutils
import numpy as np
img = cv.imread("image.jpg");
blur = cv.GaussianBlur(img,(7,7),0)
#color space change
mSource_Hsv = cv.cvtColor(blur,cv.COLOR_BGR2HSV);
mMask = cv.inRange(mSource_Hsv,np.array([0,0,0]),np.array([80,255,255]));
output = cv.bitwise_and(img, img, mask=mMask)
#grayscale
img_grey = cv.cvtColor(output, cv.COLOR_BGR2GRAY)
#thresholding
ret,th1 = cv.threshold(img_grey,0,255,cv.THRESH_BINARY + cv.THRESH_OTSU)
#dist transform
D = ndimage.distance_transform_edt(th1)
#markers
localMax = peak_local_max(D, indices=False, min_distance=20, labels=th1)
markers = ndimage.label(localMax, structure=np.ones((3, 3)))[0]
#apply watershed
labels = watershed(-D, markers, mask=th1)
print("[INFO] {} unique segments found".format(len(np.unique(labels)) - 1))
# loop over the unique labels
for label in np.unique(labels):
if label == 0:
continue
# draw label on the mask
mask = np.zeros(img_grey.shape, dtype="uint8")
mask[labels == label] = 255
# detect contours in the mask and grab the largest one
cnts = cv.findContours(mask.copy(), cv.RETR_EXTERNAL,
cv.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
c = max(cnts, key=cv.contourArea)
cv.drawContours(img, cnts, -1, (0, 255, 0), 2)
cv.imshow("segmented",img)
cv.waitKey(0)
You can merge every two contours that applies the following condition:
Area of convex hull of the merged contours is close to the sum of areas of the two contours.
The following solution uses a kind of "brute force" approach that tries merging every contour with all other contours (not very efficient).
Here is a working code sample (please read the comments):
from skimage.feature import peak_local_max
from skimage.segmentation import watershed
import matplotlib.pyplot as plt
from scipy import ndimage
import cv2 as cv
import imutils
import numpy as np
img = cv.imread("image.jpg");
blur = cv.GaussianBlur(img,(7,7),0)
#color space change
mSource_Hsv = cv.cvtColor(blur,cv.COLOR_BGR2HSV);
mMask = cv.inRange(mSource_Hsv,np.array([0,0,0]),np.array([80,255,255]));
output = cv.bitwise_and(img, img, mask=mMask)
#grayscale
img_grey = cv.cvtColor(output, cv.COLOR_BGR2GRAY)
#thresholding
ret,th1 = cv.threshold(img_grey,0,255,cv.THRESH_BINARY + cv.THRESH_OTSU)
#dist transform
D = ndimage.distance_transform_edt(th1)
#markers
localMax = peak_local_max(D, indices=False, min_distance=20, labels=th1)
markers = ndimage.label(localMax, structure=np.ones((3, 3)))[0]
#apply watershed
labels = watershed(-D, markers, mask=th1)
print("[INFO] {} unique segments found".format(len(np.unique(labels)) - 1))
contours = []
# loop over the unique labels, and append contours to all_cnts
for label in np.unique(labels):
if label == 0:
continue
# draw label on the mask
mask = np.zeros(img_grey.shape, dtype="uint8")
mask[labels == label] = 255
# detect contours in the mask and grab the largest one
cnts = cv.findContours(mask.copy(), cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
c = max(cnts, key=cv.contourArea)
## Ignore small contours
#if c.shape[0] < 20:
# continue
# Get convex hull of contour - it' going to help when merging contours
hull = cv.convexHull(c)
#cv.drawContours(img, c, -1, (0, 255, 0), 2)
cv.drawContours(img, [hull], -1, (0, 255, 0), 2, 1)
# Append hull to contours list
contours.append(hull)
# Merge the contours that does not increase the convex hull by much.
# Note: The solution is kind of "brute force" solution, and can be better.
################################################################################
for i in range(len(contours)):
c = contours[i]
area = cv.contourArea(c)
# Iterate all contours from i+1 to end of list
for j in range(i+1, len(contours)):
c2 = contours[j]
area2 = cv.contourArea(c2)
area_sum = area + area2
# Merge contours together
tmp = np.vstack((c, c2))
merged_c = cv.convexHull(tmp)
merged_area = cv.contourArea(merged_c)
# Replace contours c and c2 by the convex hull of merged c and c2, if total area is increased by no more then 10%
if merged_area < area_sum*1.1:
# Replace contour with merged one.
contours[i] = merged_c
contours[j] = merged_c
c = merged_c
area = merged_area
################################################################################
# Draw new contours in red color
for c in contours:
#Ignore small contours
if cv.contourArea(c) > 100:
cv.drawContours(img, [c], -1, (0, 0, 255), 2, 1)
cv.imshow("segmented",img)
cv.waitKey(0)
cv.destroyAllWindows()
Result:
I have an image of a graph. I perform some preprocessing functions on the image in order to extract the graph line (which works). I then, however, try to find the contour of the graph line that is found and saved as a separate image. When I do this, however, I do not get the desired results.
Graph line extracted
Contour found of the above image
import cv2
import numpy as np
import matplotlib.pyplot as plt
img = cv2.imread("/Users/2020shatgiskessell/Desktop/graph_extracting/Test_Graphs/Graph2.jpg")
h,w = img.shape[:2]
mask = np.zeros((h,w), np.uint8)
mask2 = mask = np.zeros((h,w), np.uint8)
def find_contours(image):
# Transform to gray colorspace and threshold the image
gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
_, thresh = cv2.threshold(gray,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
# erod then dialate image (for denoising)
kernel = np.ones((2,2),np.uint8)
opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
#Find contours in order of hiarchy
#CHAIN_APPROX_NONE gives all the points on the contour
_, contours, hierarchy = cv2.findContours(opening,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
return contours
#---------------------------------------------------------------
#CLEAN UP IMAGE AND JUST EXTRACT LINE
#get the biggest contour
cnt = max(find_contours(img), key=cv2.contourArea)
cv2.drawContours(mask, [cnt], 0, 255, -1)
# Perform a bitwise operation
res = cv2.bitwise_and(img, img, mask=mask)
# Threshold the image again
gray = cv2.cvtColor(res,cv2.COLOR_BGR2GRAY)
_, thresh = cv2.threshold(gray,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
# Find all non white pixels of image
non_zero = cv2.findNonZero(thresh)
# Transform all other pixels in non_white to white
for i in range(0, len(non_zero)):
first_x = non_zero[i][0][0]
first_y = non_zero[i][0][1]
first = res[first_y, first_x]
res[first_y, first_x] = 255
# Display the image
cv2.imwrite("extractedline.png", res)
#-------------------------------------------------------
#GET CONTOUR OF EXTRACTED LINE - NOT WORKING
i = 0
#Display contours
for contour in find_contours(res):
#approximate the contour shape
cv2.drawContours(mask2, [contour], 0, 255, -1)
res2 = cv2.bitwise_and(res,res,mask=mask2)
i = i+1
print (i)
cv2.imshow('after', mask2)
I want to detect form with opencv and python
so i chose Contour Features but now i have problem how can i distinguish between a square and a lozenge using opencv and python
if there is other method can you tell me please my image it like this :enter image description here
i add my code
#-*- coding: utf-8 -*-
import cv2
import numpy as np
from pyimagesearch.shapedetector import ShapeDetector
import argparse
import imutils
from scipy import ndimage
import math
import matplotlib.pyplot as plt
from skimage import io, morphology, img_as_bool, segmentation
global limit
# cv2.threshold(src, thresh, maxval, type[, dst])
import math
def angle(pt1, pt2):
x1, y1 = pt1
x2, y2 = pt2
inner_product = x1*x2 + y1*y2
len1 = math.hypot(x1, y1)
len2 = math.hypot(x2, y2)
return math.acos(inner_product/(len1*len2))
def calculate(pt, ls):
i = 2
for x in ls:
pt2 = (x, i)
i = i+1
ang = angle(pt, pt2)*180/math.pi
ang = ang * (-1)
print (ang)
Image = cv2.imread("114.png")
# Extraction of Blue channel
b = Image[:,:,0]
# Callback Function for Trackbar (but do not any work)
def nothing(*arg):
pass
# Generate trackbar Window Name
TrackbarName = "Trackbar"
# Make Window and Trackbar
cv2.namedWindow("window", cv2.WINDOW_NORMAL)
cv2.createTrackbar(TrackbarName, "window", 0, 250, nothing)
img_threshed = np.zeros(b.shape, np.uint8)
ret,img_threshed = cv2.threshold(b,168,255,cv2.THRESH_BINARY)
cv2.imshow("window55", img_threshed)
# Expanding borders of the objects
kernel = np.ones((9, 9),np.uint8)
img_dilated = cv2.dilate(img_threshed, kernel)
cv2.namedWindow("Dilated Blue Channel", cv2.WINDOW_NORMAL)
cv2.imshow("Dilated Blue Channel", img_dilated)
# Retrieving contours by subtraction base objects from the expanded objects
img_contours = img_dilated - img_threshed
cv2.namedWindow("Contours", cv2.WINDOW_NORMAL)
cv2.imshow("Contours", img_contours)
median = cv2.medianBlur(img_contours,3)
cv2.imshow("median img_threshed", median)
#_, contours0, hierarchy = cv2.findContours( median, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
#cnts = [cv2.approxPolyDP(cnt, 2, True) for cnt in contours0]
gray = cv2.imread('114.png')
#gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (3, 3), 0)
# apply Canny edge detection using a wide threshold, tight
# threshold, and automatically determined threshold
wide = cv2.Canny(blurred, 90, 150)
cnts = cv2.findContours(img_contours, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
#----Find contour in the image----
_, contours, hierarchy = cv2.findContours(img_contours, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
# loop over the contours
for c in cnts:
#----Draw a rectangle having minimum area around it using Contour features as you mentioned----
rect = cv2.minAreaRect(c) #---I used cnt[0] since there is only one contour in the image----
box = cv2.boxPoints(rect)
box = np.int0(box)
im = cv2.drawContours(Image, [box], 0, (0,0,255), 2)
#----Draw one diagonal ----
#cv2.line(Image,(box[2][0],box[2][1]),(box[0][0],box[0][1]), (255,0,0),2)
#cv2.line(Image,(0,10),(Image.shape[1], 10), (255,255,0),2)
#calculate(cv2.line(Image,(box[2][0],box[2][1]),(box[0][0],box[0][1]), (255,0,0),2),cv2.line(Image,(0,10),(Image.shape[1], 10), (255,255,0),2))
cv2.imwrite("Final_Image.jpg", Image)
# show the output image
cv2.imshow("Image", Image)
cv2.waitKey(0)
cv2.destroyAllWindows()
As mentioned in the comments' section, if you want to distinguish between an apparent square from a lozenge the only property that is distinct are the diagonals.
Using python in OpenCV I coded the following to obtain 1 diagonal for the square and the lozenge:
#----Find contour in the image----
_, contours, hierarchy = cv2.findContours(th, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
#----Draw a rectangle having minimum area around it using Contour features as you mentioned----
rect = cv2.minAreaRect(cnt[0]) #---I used cnt[0] since there is only one contour in the image----
box = cv2.boxPoints(rect)
box = np.int0(box)
im = cv2.drawContours(im1, [box], 0, (0,0,255), 2)
#----Draw one diagonal ----
cv2.line(im1,(box[2][0],box[2][1]),(box[0][0],box[0][1]), (255,0,0),2)
cv2.imwrite("Final_Image.jpg", im1)
This is what I get:
SQUARE:
LOZENGE:
Now since you have obtained the diagonal you have to compare it with a reference line to find the angle in order to determine whether it is a square or not.
For that first draw a reference line (I considered a horizontal line)
cv2.line(im1,(0,10),(im1.shape[1], 10), (255,255,0),2)
You will get :
SQUARE:
LOZENGE:
Now you just have to calculate the angle between these two lines (the diagonal and the reference line):
If the angle is 90 degree or 0 => Lozenge.
Otherwise => Square
How do you calculate angles between two lines?
See THIS POST