I followed the tutorial shown at OpenCV site.. How could I use the same example and extend it to detect coin widths ?
I know how I could use HoughCircles() and FindContour() python functions but this approach is more stable in noisy backgrounds.
I would like to extract the segmented coins and use circle/contour detection for width estimation. But I don't understand how I could extend this code.
import numpy as np
import cv2
from matplotlib import pyplot as plt
# https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_imgproc/py_watershed/py_watershed.html
img_file_name = r'C:\Users\coins.jpg'
img = cv2.imread(img_file_name)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(gray,100,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
#cv2.imshow("thresolded",thresh)
# noise removal
kernel = np.ones((6,6),np.uint8)
opening = cv2.morphologyEx(thresh,cv2.MORPH_OPEN,kernel, iterations = 3)
# sure background area
sure_bg = cv2.dilate(opening,kernel,iterations=3)
#cv2.imshow("dilate",sure_bg)
# Finding sure foreground area
dist_transform = cv2.distanceTransform(opening,cv2.DIST_L2,5)
ret, sure_fg = cv2.threshold(dist_transform,0.7*dist_transform.max(),255,0)
cv2.imshow("dist_transform",sure_fg)
# Finding unknown region
sure_fg = np.uint8(sure_fg)
unknown = cv2.subtract(sure_bg,sure_fg)
cv2.imshow("subtract",unknown)
# Marker labelling
ret, markers = cv2.connectedComponents(sure_fg)
# Add one to all labels so that sure background is not 0, but 1
markers = markers+1
# Now, mark the region of unknown with zero
markers[unknown==255] = 0
#markers = cv2.watershed(img,markers)
markers = cv2.watershed(img,markers)
img[markers == -1] = [0,255,0]
cv2.imshow("markers",img)
Here as I slightly changed your code:
import numpy as np
import cv2
import imutils
from matplotlib import pyplot as plt
img_file_name = 'coins.jpg'
img = cv2.imread(img_file_name)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(gray,100,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
# noise removal
kernel = np.ones((6,6),np.uint8)
opening = cv2.morphologyEx(thresh,cv2.MORPH_OPEN,kernel, iterations = 3)
# sure background area
sure_bg = cv2.dilate(opening,kernel,iterations=3)
# CHANGE
dist_transform = cv2.distanceTransform(thresh,cv2.DIST_L2,5)
ret, sure_fg = cv2.threshold(dist_transform,0.7*dist_transform.max(),255,0)
# Finding unknown region
sure_fg = np.uint8(sure_fg)
# CHANGE
unknown = cv2.subtract(thresh,sure_fg)
# Marker labelling
ret, markers = cv2.connectedComponents(sure_fg)
# Add one to all labels so that sure background is not 0, but 1
markers = markers+1
# Now, mark the region of unknown with zero
markers[unknown==255] = 0
markers = cv2.watershed(img,markers)
Here the code that you can use:
# loop over the unique markers returned by the Watershed
# algorithm
num_coins = np.amax(markers) -1
coins_width = np.zeros(num_coins)
for marker in np.unique(markers):
# if the marker is -1, we are examining the borders
# if the marker is 1, we are examining the 'background'
# so simply ignore them
if marker <= 1:
continue
# otherwise, allocate memory for the marker region and draw
# it on the mask
mask = np.zeros(gray.shape, dtype="uint8")
mask[markers == marker] = 255
# detect contours in the mask and grab the largest one
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
largest_cnt = max(cnts, key=cv2.contourArea)
#
# First Way
#
# calculate the center of the contour
M = cv2.moments(largest_cnt )
x = int(M["m10"] / M["m00"])
y = int(M["m01"] / M["m00"])
# calculate the radius of the contour from area (I suppose it's a circle)
area = cv2.contourArea(largest_cnt)
radius = np.sqrt(area/3.14)
#
# Second Way
#
# draw a circle enclosing the object
# ((x, y), radius) = cv2.minEnclosingCircle(largest_cnt)
cv2.circle(img, (int(x), int(y)), int(radius), (0, 255, 0), 1)
cv2.putText(img, "#{}".format(marker-2), (int(x) - 10, int(y)), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 1)
coins_width[marker-2] = 2* radius
print(coins_width)
cv2.imshow("markers",img)
The code is a modified version of the code in the article:
https://www.pyimagesearch.com/2015/11/02/watershed-opencv/
Related
As this is the output from the watershed and I want to mark the labels like 1,2,3 etc on the regions identified. I have tried to use cv2.puttext as well by using cv2.boudingrect but the labels are not coming in the center of the region identified
for i in range(2, ret3+1):
a=0
b=0
mask = np.where(markers==i, np.uint8(255), np.uint8(0))
x,y,w,h = cv2.boundingRect(mask)
area = cv2.countNonZero(mask[y:y+h,x:x+w])
print ("Label %d at (%d, %d) size (%d x %d) area %d pixels" % (i,x,y,w,h,area))
# Visualize
color = np.uint8(np.random.random_integers(0, 0, 3)).tolist()
output[mask!=0] = color
cv2.putText(img2,'%d'%i,(int(x), int(y)), cv2.FONT_HERSHEY_SIMPLEX, 3.9, color, 15, cv2.LINE_AA)
plt.imshow(img2, cmap='jet')
plt.show()
Through the above code the generated labels are as follows
What i want is that to mark the labels 3,4,5 etc in the center of the objects identified by watershed.
You can find the center of each region like this:
markers = cv2.watershed(img, markers)
labels = np.unique(markers)
for label in labels:
y, x = np.nonzero(markers == label)
cx = int(np.mean(x))
cy = int(np.mean(y))
The result:
Complete example:
import cv2
import numpy as np
img = cv2.imread("water_coins.jpg")
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
# noise removal
kernel = np.ones((3, 3), np.uint8)
opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=2)
# sure background area
sure_bg = cv2.dilate(opening, kernel, iterations=3)
# Finding sure foreground area
dist_transform = cv2.distanceTransform(opening, cv2.DIST_L2, 5)
ret, sure_fg = cv2.threshold(dist_transform, 0.7 * dist_transform.max(), 255, 0)
# Finding unknown region
sure_fg = np.uint8(sure_fg)
unknown = cv2.subtract(sure_bg, sure_fg)
# Marker labelling
ret, markers = cv2.connectedComponents(sure_fg)
# Add one to all labels so that sure background is not 0, but 1
markers = markers + 1
# Now, mark the region of unknown with zero
markers[unknown == 255] = 0
markers = cv2.watershed(img, markers)
labels = np.unique(markers)
for label in labels:
y, x = np.nonzero(markers == label)
cx = int(np.mean(x))
cy = int(np.mean(y))
color = (255, 255, 255)
img[markers == label] = np.random.randint(0, 255, size=3)
cv2.circle(img, (cx, cy), 2, color=color, thickness=-1)
cv2.putText(img, f"{label}", (cx, cy), cv2.FONT_HERSHEY_SIMPLEX, 0.35, color, 1, cv2.LINE_AA)
cv2.imwrite("out.jpg", img)
Erode every region independently with a structuring element that has the size of the region label. Then use any remaining pixel.
In some cases (tiny regions), no pixel at all will remain. You have two options
use a pixel from the "ultimate eroded";
use some location near the region and a leader line (but avoiding collisions is uneasy).
You can also work with the inner distances of the regions and pick pixels with maximum distance.
I followed a tutorial on watershed segmentation and used it to segment each red blood cell in an image. I'm new to openCV and I would like to know if it is possible to draw circles around the cells by using watershed segmentation? If so, could you please show how it is done.
Original image
Output of Watershed segmentation
Code is given below
import numpy as np
import cv2
from matplotlib import pyplot as plt
def fillHoles(otsuImg):
# find contours
contours, _ = cv2.findContours(otsuImg, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# filter out contours by size
small_cntrs = []
for con in contours:
area = cv2.contourArea(con)
# print(area)
if area < 1000: # size threshold
small_cntrs.append(con)
cv2.drawContours(otsuImg, small_cntrs, -1, 0, -1)
# load the image
img = cv2.imread('resources/rbc2.png')
img_pyr = cv2.pyrMeanShiftFiltering(img, 21, 51)
img_median = cv2.medianBlur(img_pyr, 9)
img_gray = cv2.cvtColor(img_median, cv2.COLOR_BGR2GRAY)
ret, img_thresh = cv2.threshold(img_gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
# fill holes of RBC
fillHoles(img_thresh)
# invert the image
img_thresh = cv2.bitwise_not(img_thresh)
# noise removal
kernel = np.ones((3,3),np.uint8)
opening = cv2.morphologyEx(img_thresh,cv2.MORPH_OPEN,kernel, iterations=2)
# sure background area
sure_bg = cv2.dilate(opening,kernel,iterations=3)
# Finding sure foreground area
dist_transform = cv2.distanceTransform(opening,cv2.DIST_L2, 5)
ret, sure_fg = cv2.threshold(dist_transform,0.1*dist_transform.max(),255,0)
# _, sure_fg = cv2.threshold(np.uint8(dist_transform), 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
# Finding unknown region
sure_fg = np.uint8(sure_fg)
unknown = cv2.subtract(sure_bg, sure_fg)
# Marker labelling
ret, markers = cv2.connectedComponents(sure_fg)
# Add one to all labels so that sure background is not 0, but 1
markers = markers+1
# Now, mark the region of unknown with zero
markers[unknown==255] = 0
markers = cv2.watershed(img,markers)
img[markers == -1] = [255,0,0]
cv2.imshow('markers2', np.uint8(markers))
cv2.imshow('Final output', img)
cv2.waitKey(0)
I had the same problem and ended up using skimage.segmentation.watershed for the last step, to get the labels I could use to calculate the contours of the watershed cells. Once you have the contours, you can calculate and plot the enclosing circle as usual:
# your code above
ret, markers = cv2.connectedComponents(sure_fg)
from skimage.segmentation import watershed
labels = watershed(-dist_transform,
markers,
mask=img_thresh,
watershed_line=False)
watershed_contours = list(map(lambda l: cv2.findContours((labels == l).astype(np.uint8),
cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0][0],
np.unique(labels)[1:]))
output = img.copy()
for contour in watershed_contours:
(x,y),radius = cv2.minEnclosingCircle(contour)
center = (int(x),int(y))
radius = int(radius)
cv2.circle(output,center,radius,(0,255,0),2)
I want to mask human fingernails (fingernails white and everything including the hand is black). I do simple image operations then Canny edge detection after I smoothen the image then find contours to give internal contours white color which would be fingernails.
My problem is that when fingernails are painted it is quite easy to detect however when there is no paint it becomes really complicated and the program has to get 50 images and save outputs to a certain folder.
I am confused about how to proceed, if anybody did something similar I would appreciate some help.
import cv2
import numpy as np
import matplotlib.pyplot as plt
def display_img(img):
fig = plt.figure(figsize = (12,10))
ax = fig.add_subplot(111)
plt.imshow(img,cmap='gray')
img = cv2.imread('nail2.png')
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
blur = cv2.blur(gray,ksize=(1,1))
kernel = np.ones((5,5),np.uint8)
display_img(blur)
med = np.median(gray)
gradient = cv2.Laplacian(blur,cv2.CV_64F)
gradient = cv2.convertScaleAbs(gradient)
plt.imshow(gradient,'gray')
lower = int(max(0,0.7*med))
upper = int(min(255,1.3*med))
edges = cv2.Canny(blur,lower,upper)
display_img(edges)
edges = cv2.GaussianBlur(edges, (11, 11), 0) # smoothing before applying threshold
display_img(edges)
image, contours, hierarchy = cv2.findContours(edges, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)
# Create empty array to hold internal contours
image_internal = np.zeros(image.shape)
# Iterate through list of contour arrays
for i in range(len(contours)):
# If third column value is NOT equal to -1 than its internal
if hierarchy[0][i][3] != -1:
# Draw the Contour
cv2.drawContours(image_internal, contours, i, 255, -1)
display_img(image_internal)
below is a good result:
some bad result even though fingers have pink paint:
Well, you have a big light and scale problem in these two images. But a possible solution is to segment the color channels and look for blobs.
Then you can segment with blob params.
The code you can try here:
import cv2
import numpy as np
fra = cv2.imread('nails.png')
height, width, channels = fra.shape
src = cv2.medianBlur(fra, 21)
hsv = cv2.cvtColor(src, cv2.COLOR_BGR2HSV_FULL)
mask = cv2.inRange(hsv, np.array([0, 0, 131]), np.array([62, 105, 255]))
mask = cv2.erode(mask, None, iterations=8)
mask = cv2.dilate(mask, None, iterations=8)
params = cv2.SimpleBlobDetector_Params()
params.filterByArea = True
params.minArea = int((height * width) / 500)
params.maxArea = int((height * width) / 10)
params.filterByCircularity = True
params.minCircularity = 0.5
params.filterByConvexity = True
params.minConvexity = 0.5
params.filterByInertia = True
params.minInertiaRatio = 0.01
detector = cv2.SimpleBlobDetector_create(params)
key_points = detector.detect(255 - mask)
vis = cv2.bitwise_and(hsv, hsv, mask=mask)
vis = cv2.addWeighted(src, 0.2, vis, 0.8, 0)
cv2.drawKeypoints(vis, key_points, vis, (0, 0, 255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
for kp in key_points:
cv2.drawMarker(vis, (int(kp.pt[0]), int(kp.pt[1])), color=(0, 255, 0), markerType=cv2.MARKER_CROSS, thickness=3)
cv2.imshow("VIS", vis)
cv2.imwrite('nails_detected.png', vis)
cv2.waitKey(0)
cv2.destroyAllWindows()
Good luck!
After loads of reading and trying around i hope someone here can help me out with the final steps.
I'm about to use openCV for counting of 2 different colored objects.
I convert into an HSV image, define the boundaries for the 2 colors to get a mask with each color. Afterwards i use a kernel to smoothen the pic and correct holes.
At the end using watershed to identify the single beads in the picture.
The algorithm doesnt work too bad but is still to unprecise especially for the blue beads, closely touching objects and the wall regions (see maskb). I'd be very tahnkful for any tipps for improvement.
The amount in the picture are ~580 blue and ~1632 white beads.
My code is the following:
img = cv2.imread('xxx')
#set font
font= cv2.FONT_HERSHEY_SIMPLEX
# shift correction
shifted = cv2.pyrMeanShiftFiltering(img, 15, 30)
#hsv conversion
hsv = cv2.cvtColor(shifted, cv2.COLOR_BGR2HSV)
# define range of white color in HSV for brown background
lower_white = np.array([0, 0, 190])
upper_white = np.array([360, 255, 255])
maskw= cv2.inRange(hsv, lower_white, upper_white)
# define range of green color in HSV
lower_blue = np.array([0, 0, 0])
upper_blue = np.array([360, 255, 48])
maskb=cv2.inRange(hsv,lower_blue, upper_blue)
"""MASK CORRECTION"""
# corrects open lines and holes in picture -- closing doesnt work - opening leads to better result - 5,5 is array
kernelOpen=np.ones((4,4))
kernelClose=np.ones((5,5))
#morphology mask white
maskOpenw=cv2.morphologyEx(maskw,cv2.MORPH_OPEN,kernelOpen)
maskClosew=cv2.morphologyEx(maskOpenw,cv2.MORPH_CLOSE,kernelClose)
#morphology mask blue
maskOpenb=cv2.morphologyEx(maskb,cv2.MORPH_OPEN,kernelOpen)
maskCloseb=cv2.morphologyEx(maskOpenb,cv2.MORPH_CLOSE,kernelClose)
""" FOR WHITE!!! - min distance factor to play"""
# compute the exact Euclidean distance from every binary
# pixel to the nearest zero pixel, then find peaks in this
# distance map
D = ndimage.distance_transform_edt(maskOpenw)
localMax = peak_local_max(D, indices=False, min_distance=9,
labels=maskOpenw)
# perform a connected component analysis on the local peaks,
# using 8-connectivity, then appy the Watershed algorithm
markers = ndimage.label(localMax, structure=np.ones((3, 3)))[0]
labels = watershed(-D, markers, mask=maskOpenw)
print("[INFO] {} unique segments found".format(len(np.unique(labels)) - 1))
# loop over the unique labels returned by the Watershed
# algorithm
for label in np.unique(labels):
# if the label is zero, we are examining the 'background'
# so simply ignore it
if label == 0:
continue
# otherwise, allocate memory for the label region and draw
# it on the mask
mask = np.zeros(maskOpenw.shape, dtype="uint8")
mask[labels == label] = 255
# detect contours in the mask and grab the largest one
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
c = max(cnts, key=cv2.contourArea)
# draw a circle enclosing the object
((x, y), r) = cv2.minEnclosingCircle(c)
cv2.circle(img, (int(x), int(y)), int(r), (0, 255, 0), 2)
# cv2.putText(img, "#{}".format(label), (int(x) - 10, int(y)),
# cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
""" FOR Blue!!!"""
# compute the exact Euclidean distance from every binary
# pixel to the nearest zero pixel, then find peaks in this
# distance map
D = ndimage.distance_transform_edt(maskb)
localMax = peak_local_max(D, indices=False, min_distance=9,
labels=maskOpenb)
# perform a connected component analysis on the local peaks,
# using 8-connectivity, then appy the Watershed algorithm
markers = ndimage.label(localMax, structure=np.ones((3, 3)))[0]
labels = watershed(-D, markers, mask=maskOpenb)
print("[INFO] {} unique segments found".format(len(np.unique(labels)) - 1))
# loop over the unique labels returned by the Watershed
# algorithm
for label in np.unique(labels):
# if the label is zero, we are examining the 'background'
# so simply ignore it
if label == 0:
continue
# otherwise, allocate memory for the label region and draw
# it on the mask
mask = np.zeros(maskOpenb.shape, dtype="uint8")
mask[labels == label] = 255
# detect contours in the mask and grab the largest one
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
c = max(cnts, key=cv2.contourArea)
# draw a circle enclosing the object
((x, y), r) = cv2.minEnclosingCircle(c)
cv2.circle(img, (int(x), int(y)), int(r), (0, 255, 0), 2)
#cv2.putText(img, "#{}".format(label), (int(x) - 10, int(y)),
# cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
# show picture
cv2.imshow("image", img)
cv2.waitKey()
Thanks in advance,
Jannik
EDIT: I tried to do a distance transformation for the blue beads which leads to the following results:
Distance Transformation
The added Code:
# sure background area
sure_bg = cv2.dilate(maskb,kernelOpen,iterations=5)
cv2.imshow("surebg", sure_bg)
# Finding sure foreground area
dist_transform = cv2.distanceTransform(maskb,cv2.DIST_L2,3)
ret, sure_fg = cv2.threshold(dist_transform,0.14*dist_transform.max(),255,0)
# Finding unknown region
sure_fg = np.uint8(sure_fg)
cv2.imshow("surefg", sure_fg)
unknown = cv2.subtract(sure_bg, sure_fg)
cv2.imshow("unknown", unknown)
# Marker labelling
ret, markers = cv2.connectedComponents(sure_fg)
# Add one to all labels so that sure background is not 0, but 1
markers = markers+1
# Now, mark the region of unknown with zero
markers[unknown==255] = 0
markers = cv2.watershed(img,markers)
img[markers == -1] = [0,0,255]
print(ret)
cv2.imshow("img", img)
The Marking of the beads works quite well even some regions are unprecise.If i understood correct the "ret" value gives me the count of objects marked. Anyone any idea to further reach a precise counting?
This is my first question here so I'm asking for understanding. I have to process hundreds of the satellites images.
I try to find contour of the area of the useful data located on the image - only the largest one.
Then I want to save the coordinates of the few points (x,y) corresponding to this contour. In simplest case, the area is a square and can be represented by 4 points, but for more complicated shapes the contour will be approximated by a little more points (preferably no more than ~ fifteen). However I am still not be able to find the areas on my images. Sometimes the area touches the edge of the image. Therefore, in this script I enlarge the pictures and add additional boundaries filled by the background color. Examples of pictures you will find here satellite1,satellite2,satellite3
As you see the images can have different background colors and in addition they contain countries borders and legend. I have tried to use Aidenhjj tips OpenCV - using cv2.approxPolyDP() correctly and prepared my script. I tried many approaches, filtering and tune parameters but still can't succeed with my data. I am asking you for help.
import numpy as np
import cv2
import matplotlib.pyplot as plt
image = cv2.imread('image1.jpg')
image = cv2.resize(image, None,fx=0.25, fy=0.25, interpolation = cv2.INTER_CUBIC)
ysize, xsize, channels = image.shape
print("Image size: {} x {}".format(xsize, ysize))
#calculate the histograms in r,g,b channels, measure background color
r, g, b = cv2.split(image)
image_data = image
histr = cv2.calcHist([r],[0],None,[256],[0,256])
for y in range(0,len(histr)):
elem = histr[y]
if elem == histr.max():
break
else:
y = none
R=y
histr = cv2.calcHist([g],[0],None,[256],[0,256])
for y in range(0,len(histr)):
elem = histr[y]
if elem == histr.max():
break
else:
y = none
G=y
histr = cv2.calcHist([b],[0],None,[256],[0,256])
for y in range(0,len(histr)):
elem = histr[y]
if elem == histr.max():
break
else:
y = none
B=y
color = (R, G, B)
#add borders around the image colorized as background. This will allow me to find closed contour around area with data.
bordersize=100
new_xsize = xsize + bordersize*2
new_ysize = ysize + bordersize*2
#image_border.show()
image_border=cv2.copyMakeBorder(image, top=bordersize, bottom=bordersize, left=bordersize, right=bordersize, borderType= cv2.BORDER_CONSTANT, value=[R,G,B] )
#ysizeb, xsizeb, channelsb = image_border.shape
# get a blank canvas for drawing contour on and convert image to grayscale
canvas = np.zeros(image_border.shape, np.uint8)
#imgc = cv2.medianBlur(img,21)
img2gray = cv2.cvtColor(image_border,cv2.COLOR_BGR2GRAY)
# filter out country borders
kernel = np.ones((5,5),np.float32)/25
img2gray = cv2.filter2D(img2gray,-1,kernel)
# threshold the image and extract contours
thresh = cv2.adaptiveThreshold(img2gray,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY,11,11)
contours,hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
plt.subplot(111),plt.imshow(thresh,'gray')
plt.show()
# find the biggest area
cnt = contours[0]
max_area = cv2.contourArea(cnt)
for cont in contours:
if cv2.contourArea(cont) > max_area:
cnt = cont
max_area = cv2.contourArea(cont)
perimeter = cv2.arcLength(cnt,True)
epsilon = 0.01*cv2.arcLength(cnt,True)
approx = cv2.approxPolyDP(cnt,epsilon,True)
hull = cv2.convexHull(cnt)
# cv2.isContourConvex(cnt)
cv2.drawContours(canvas, cnt, -1, (0, 255, 0), 3)
cv2.drawContours(canvas, approx, -1, (0, 0, 255), 3)
#cv2.drawContours(canvas, [hull], -1, (0, 0, 255), 3)
cv2.imshow("Contour", canvas)
k = cv2.waitKey(0)
if k == 27: # wait for ESC key to exit
cv2.destroyAllWindows()