So i have taken the code from Github of #bradmontgomer and trying to modify it. The code first converts the frame into HSV color space, split the video frame into color channels and then Performs an AND on HSV components to identify the laser. I am having trouble in finding the contours of the detected laser point. heres my code;
def threshold_image(self, channel):
if channel == "hue":
minimum = self.hue_min
maximum = self.hue_max
elif channel == "saturation":
minimum = self.sat_min
maximum = self.sat_max
elif channel == "value":
minimum = self.val_min
maximum = self.val_max
(t, tmp) = cv2.threshold(
self.channels[channel], # src
maximum, # threshold value
0, # we dont care because of the selected type
cv2.THRESH_TOZERO_INV #t type
)
(t, self.channels[channel]) = cv2.threshold(
tmp, # src
minimum, # threshold value
255, # maxvalue
cv2.THRESH_BINARY # type
)
if channel == 'hue':
# only works for filtering red color because the range for the hue is split
self.channels['hue'] = cv2.bitwise_not(self.channels['hue'])
def detect(self, frame):
# resize the frame, blur it, and convert it to the HSV
# color space
frame = imutils.resize(frame, width=600)
hsv_img = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# split the video frame into color channels
h, s, v = cv2.split(hsv_img)
self.channels['hue'] = h
self.channels['saturation'] = s
self.channels['value'] = v
# Threshold ranges of HSV components; storing the results in place
self.threshold_image("hue")
self.threshold_image("saturation")
self.threshold_image("value")
# Perform an AND on HSV components to identify the laser!
self.channels['laser'] = cv2.bitwise_and(
self.channels['hue'],
self.channels['value']
)
self.channels['laser'] = cv2.bitwise_and(
self.channels['saturation'],
self.channels['laser']
)
# Merge the HSV components back together.
hsv_image = cv2.merge([
self.channels['hue'],
self.channels['saturation'],
self.channels['value'],
])
thresh = cv2.threshold(self.channels['laser'], 25, 255, cv2.THRESH_BINARY)[1]
#find contours in the mask and initialize the current
#(x, y) center of the ball
#cnts = cv2.findContours(self.channels['laser'].copy(), cv2.RETR_EXTERNAL,
#cv2.CHAIN_APPROX_SIMPLE)
(_, cnts, _) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
center = None
# only proceed if at least one contour was found
if len(cnts) > 0:
# find the largest contour in the mask, then use
# it to compute the minimum enclosing circle and
# centroid
c = max(cnts, key=cv2.contourArea)
((x, y), radius) = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
# only proceed if the radius meets a minimum size
if radius > 10:
# draw the circle and centroid on the frame,
# then update the list of tracked points
cv2.circle(frame, (int(x), int(y)), int(radius),
(0, 255, 255), 2)
cv2.circle(frame, center, 5, (0, 0, 255), -1)
cv2.imshow('LaserPointer', self.channels['laser'])
################################################
return hsv_image
I am getting the cnts greater then 0 in line "if len(cnts) > 0:", but can't see a circle drawn in the laser pointer.
There was another function (display()) that was displaying laser frame (self.channel['laser']),
def display(self, img, frame):
"""Display the combined image and (optionally) all other image channels
NOTE: default color space in OpenCV is BGR.
"""
cv2.imshow('RGB_VideoFrame', frame)
cv2.imshow('LaserPointer', self.channels['laser'])
I commented out these cv2.iamshow lines from this function and then I was able to see circle around the laser pointer. This is because now frame from cv2.iamshow line inside function "detect(self, frame):" was executed. I then applied further codings on the pointer to detect its location.
Related
I am using raspberry pi4 (8GB) with pi camera to detect water level . I have defined a line from 0,375 to 800,375 . If top most point of water level contour goes above this line then I want to call a function. Here is my code and attached image of setup. How do I get water level contour only. Does it require canny edge detection over contours to get clear water level ? first I am getting largest contour and then defining its top most point.
import numpy as np
import cv2
import time
from datetime import datetime
#color=(255,0,0)
color=(0,255,0)
thickness=2
kernel = np.ones((2,2),np.uint8) # added 01/07/2021
picflag = 0 # set value to 1 once picture is taken
# function to take still picture when water level goes beyond threshold
def takepicture(frame):
currentTime = datetime.now()
picTime = currentTime.strftime("%d.%m.%Y-%H%M%S") # Create file name for our picture
text = currentTime.strftime("%d.%m.%Y-%H:%M:%S")
font = cv2.FONT_HERSHEY_SIMPLEX # font
org = (05, 20) # org
fontScale = 0.5 # fontScale
color = (0, 0, 255) # Red color in BGR
thickness = 1 # Line thickness of 2 px
picName = picTime + '.png'
image = cv2.putText(frame, text, org, font, fontScale, color, thickness, cv2.LINE_AA, False)
cv2.imwrite(picName , image)
picflag = 1
return
cap = cv2.VideoCapture(0)
while(True):
# Capture frame-by-frame
ret, frame = cap.read() # ret = 1 if the video is captured; frame is the image
# Our operations on the frame come here
gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
#blur = cv2.GaussianBlur(gray,(21,21),0)
gray= cv2.medianBlur(gray, 3) #to remove salt and paper noise
#ret,thresh = cv2.threshold(gray,10,20,cv2.THRESH_BINARY_INV)
ret,thresh = cv2.threshold(gray,127,127,cv2.THRESH_BINARY_INV)
thresh = cv2.morphologyEx(thresh, cv2.MORPH_GRADIENT, kernel) # get outer boundaries only added 01/07/2021
thresh = cv2.dilate(thresh,kernel,iterations = 5) # strengthen weak pixels added 01/07/2021
img1, contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
#img1,contours,hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) #added 01/07/2021
cv2.line(frame, pt1=(0,375), pt2=(800,375), color=(0,0,255), thickness=2) # added 01/07/2021
if len(contours) != 0:
c = max(contours, key = cv2.contourArea) # find the largest contour
#x,y,w,h = cv2.boundingRect(c) # get bounding box of largest contour
img2=cv2.drawContours(frame, c, -1, color, thickness) # draw largest contour
#img2=cv2.drawContours(frame, contours, -1, color, thickness) # draw all contours
#img3 = cv2.rectangle(img2,(x,y),(x+w,y+h),(0,0,255),2) # draw red bounding box in img
#center = (x, y)
#print(center)
left = tuple(c[c[:, :, 0].argmin()][0])
right = tuple(c[c[:, :, 0].argmax()][0])
top = tuple(c[c[:, :, 1].argmin()][0])
bottom = tuple(c[c[:, :, 1].argmax()][0])
# Draw dots onto frame
cv2.drawContours(frame, [c], -1, (36, 255, 12), 2)
cv2.circle(frame, left, 8, (0, 50, 255), -1)
cv2.circle(frame, right, 8, (0, 255, 255), -1)
cv2.circle(frame, top, 8, (255, 50, 0), -1)
cv2.circle(frame, bottom, 8, (255, 255, 0), -1)
#print('left: {}'.format(left))
#print('right: {}'.format(right))
#print(format(top))
top_countour_point = top[1]
print(top_countour_point)
#print('bottom: {}'.format(bottom))
#if ((top_countour_point <= 375) and (picflag == 0)): #checking if contour top point is above line
#takepicture(frame)
#continue
#if ((top_countour_point > 375) and (picflag == 0)) :
#picflag = 0
#continue
# Display the resulting image
# cv2.line(frame, pt1=(0,375), pt2=(800,375), color=(0,0,255), thickness=2) # added 01/07/2021
#cv2.imshow('Contour',img3)
#cv2.imshow('thresh' ,thresh)
cv2.imshow('Contour',frame)
if cv2.waitKey(1) & 0xFF == ord('q'): # press q to quit
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
Caveats
As it was pointed out in the comments, there is very little to work with based on your post. In general, I agree with s0mbre that you'd be better of with a water level sensor, and with kavko, that if you do want to use a camera, you need to better control your environment, with lighting, camera angles, background, etc.
However, that is not to say that it is not possible with your current setup, assuming, that it is a static setup except for the water level. As such, there are some assumptions that we can make.
Here are the steps that I took to get an approximate approach:
Gather image data
You have only provided one image with lines already on it, so that's not a lot to go on. What I did is that I removed the line that you added:
Fortunately there wasn't too much of a line left afterwards.
Image processing:
I have loaded the image (this would come from the code you have posted).
Using the assumptions above, I have decided to focus on a narrow slice of the image. I selected only the middle 60 pixels (1)
slc = frame[:, 300:360]
Next, I have converted it to greyscale (2)
gray_slc = cv2.cvtColor(slc, cv2.COLOR_BGR2GRAY)
I have used Canny edge detection (see docs here) to find the edges in the image (3)
edges = cv2.Canny(gray_slc, 50, 90)
After that, I have applied a Hough Transform, to find all the edges (related Stack Overflow answer) (4)
rho = 1
theta = np.pi / 180
threshold = 15
min_line_length = 50
max_line_gap = 20
lines = cv2.HoughLinesP(edges, rho, theta, threshold, np.array([]), min_line_length, max_line_gap)
Given that I can assume that all of the top lines are the edge of the container, I averaged the y coordinates of the lines, and picked the lower most one as the water level:
y_avgs = [(line[0, 1] + line[0, 3]) / 2 for line in lines]
water_level = max(y_avgs)
Having this, I just checked, if it is over the threshold you have selected:
trigger_threshold = 375
if water_level > trigger_threshold:
print("Water level is under the selected line")
Now, keep in mind, I only had the one image to go on. Considering lighting conditions, yours results may vary.
After loads of reading and trying around i hope someone here can help me out with the final steps.
I'm about to use openCV for counting of 2 different colored objects.
I convert into an HSV image, define the boundaries for the 2 colors to get a mask with each color. Afterwards i use a kernel to smoothen the pic and correct holes.
At the end using watershed to identify the single beads in the picture.
The algorithm doesnt work too bad but is still to unprecise especially for the blue beads, closely touching objects and the wall regions (see maskb). I'd be very tahnkful for any tipps for improvement.
The amount in the picture are ~580 blue and ~1632 white beads.
My code is the following:
img = cv2.imread('xxx')
#set font
font= cv2.FONT_HERSHEY_SIMPLEX
# shift correction
shifted = cv2.pyrMeanShiftFiltering(img, 15, 30)
#hsv conversion
hsv = cv2.cvtColor(shifted, cv2.COLOR_BGR2HSV)
# define range of white color in HSV for brown background
lower_white = np.array([0, 0, 190])
upper_white = np.array([360, 255, 255])
maskw= cv2.inRange(hsv, lower_white, upper_white)
# define range of green color in HSV
lower_blue = np.array([0, 0, 0])
upper_blue = np.array([360, 255, 48])
maskb=cv2.inRange(hsv,lower_blue, upper_blue)
"""MASK CORRECTION"""
# corrects open lines and holes in picture -- closing doesnt work - opening leads to better result - 5,5 is array
kernelOpen=np.ones((4,4))
kernelClose=np.ones((5,5))
#morphology mask white
maskOpenw=cv2.morphologyEx(maskw,cv2.MORPH_OPEN,kernelOpen)
maskClosew=cv2.morphologyEx(maskOpenw,cv2.MORPH_CLOSE,kernelClose)
#morphology mask blue
maskOpenb=cv2.morphologyEx(maskb,cv2.MORPH_OPEN,kernelOpen)
maskCloseb=cv2.morphologyEx(maskOpenb,cv2.MORPH_CLOSE,kernelClose)
""" FOR WHITE!!! - min distance factor to play"""
# compute the exact Euclidean distance from every binary
# pixel to the nearest zero pixel, then find peaks in this
# distance map
D = ndimage.distance_transform_edt(maskOpenw)
localMax = peak_local_max(D, indices=False, min_distance=9,
labels=maskOpenw)
# perform a connected component analysis on the local peaks,
# using 8-connectivity, then appy the Watershed algorithm
markers = ndimage.label(localMax, structure=np.ones((3, 3)))[0]
labels = watershed(-D, markers, mask=maskOpenw)
print("[INFO] {} unique segments found".format(len(np.unique(labels)) - 1))
# loop over the unique labels returned by the Watershed
# algorithm
for label in np.unique(labels):
# if the label is zero, we are examining the 'background'
# so simply ignore it
if label == 0:
continue
# otherwise, allocate memory for the label region and draw
# it on the mask
mask = np.zeros(maskOpenw.shape, dtype="uint8")
mask[labels == label] = 255
# detect contours in the mask and grab the largest one
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
c = max(cnts, key=cv2.contourArea)
# draw a circle enclosing the object
((x, y), r) = cv2.minEnclosingCircle(c)
cv2.circle(img, (int(x), int(y)), int(r), (0, 255, 0), 2)
# cv2.putText(img, "#{}".format(label), (int(x) - 10, int(y)),
# cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
""" FOR Blue!!!"""
# compute the exact Euclidean distance from every binary
# pixel to the nearest zero pixel, then find peaks in this
# distance map
D = ndimage.distance_transform_edt(maskb)
localMax = peak_local_max(D, indices=False, min_distance=9,
labels=maskOpenb)
# perform a connected component analysis on the local peaks,
# using 8-connectivity, then appy the Watershed algorithm
markers = ndimage.label(localMax, structure=np.ones((3, 3)))[0]
labels = watershed(-D, markers, mask=maskOpenb)
print("[INFO] {} unique segments found".format(len(np.unique(labels)) - 1))
# loop over the unique labels returned by the Watershed
# algorithm
for label in np.unique(labels):
# if the label is zero, we are examining the 'background'
# so simply ignore it
if label == 0:
continue
# otherwise, allocate memory for the label region and draw
# it on the mask
mask = np.zeros(maskOpenb.shape, dtype="uint8")
mask[labels == label] = 255
# detect contours in the mask and grab the largest one
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
c = max(cnts, key=cv2.contourArea)
# draw a circle enclosing the object
((x, y), r) = cv2.minEnclosingCircle(c)
cv2.circle(img, (int(x), int(y)), int(r), (0, 255, 0), 2)
#cv2.putText(img, "#{}".format(label), (int(x) - 10, int(y)),
# cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
# show picture
cv2.imshow("image", img)
cv2.waitKey()
Thanks in advance,
Jannik
EDIT: I tried to do a distance transformation for the blue beads which leads to the following results:
Distance Transformation
The added Code:
# sure background area
sure_bg = cv2.dilate(maskb,kernelOpen,iterations=5)
cv2.imshow("surebg", sure_bg)
# Finding sure foreground area
dist_transform = cv2.distanceTransform(maskb,cv2.DIST_L2,3)
ret, sure_fg = cv2.threshold(dist_transform,0.14*dist_transform.max(),255,0)
# Finding unknown region
sure_fg = np.uint8(sure_fg)
cv2.imshow("surefg", sure_fg)
unknown = cv2.subtract(sure_bg, sure_fg)
cv2.imshow("unknown", unknown)
# Marker labelling
ret, markers = cv2.connectedComponents(sure_fg)
# Add one to all labels so that sure background is not 0, but 1
markers = markers+1
# Now, mark the region of unknown with zero
markers[unknown==255] = 0
markers = cv2.watershed(img,markers)
img[markers == -1] = [0,0,255]
print(ret)
cv2.imshow("img", img)
The Marking of the beads works quite well even some regions are unprecise.If i understood correct the "ret" value gives me the count of objects marked. Anyone any idea to further reach a precise counting?
I have a calibration process in which I want to get the maximum height and width of the screen. So I made a growing rectangle of which I want co-ordinates by image processing. This rectangle will always be viewed at an angle.
I have used a coloured rectangle to detect it in HSV range but it doesn't seems to be working. First I detect the green coloured rectangle then threshold it then detect canny edges then find contours and filter the largest contour. Final contour with approximation is shown in 'img' tab in screenshot.
The problem here is the further edge of the green rectangle appears black and doesn't get detected in HSV range. So the maximum width and height is not obtained. Even if I have the top-left and bottom-right corner of the rectangle in edge detection.
Is there a way other to track scaling rectangle other than detecting HSV range of coloured rectangle. Since the rectangle is moving there should be.
CODE:
video = cv2.VideoCapture('rectCalibration2.mp4')
img = np.zeros((360,640,3))
prevImg = np.zeros((360,640,3))
finalContour = []
end_time = time.time() + 15
largestArea = 5000
while(video.isOpened()):
_, frame = video.read()
if frame is None:
print('ended')
break
frame = cv2.resize(frame, (640, 360))
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
greenLower = (55, 100, 6)
greenUpper = (70, 255, 255)
mask = cv2.inRange(hsv, greenLower, greenUpper)
th = cv2.adaptiveThreshold(mask,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,11,2)
th = cv2.erode(th, None, iterations=3)
th = cv2.dilate(th, None, iterations=3)
edges = cv2.Canny(th,200,400)
m, contours, hierarchy =
cv2.findContours(edges,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
largeContours = []
for contour in contours:
area = cv2.contourArea(contour)
if area > largestArea:
largestArea = area
largeContours.append(contour)
finalContour = contour
img = np.zeros((360,640,3))
cv2.drawContours(img, largeContours, -1, (255, 255, 255))
prevImg = img
if (prevImg is not img):
end_time = time.time() + 15
if time.time() > end_time:
epsilon = 0.1 * cv2.arcLength(finalContour, True)
approx = cv2.approxPolyDP(finalContour, epsilon, True)
print(approx)
prevImg = np.zeros((360,640,3))
cv2.drawContours(prevImg,[approx],0,(255,255,255),2)
cv2.imshow('final', prevImg)
video.release()
break
cv2.imshow('frame',frame)
cv2.imshow('img', img)
cv2.imshow('edges', edges)
if cv2.waitKey(33) == ord('q'):
video.release()
break
Small Rectangle
Frame, Edges, Contours
The 'img' tab shows the finalContour which is not of the maximum width and height
I'm trying to modify this code to allow tracking of multiple objects of the same color and draw a path where the object travel. Currenlty the code just tracks the largest object based on color and the travel path dissappears as the object moves around the video. Finally, I could use some guidance on how to create a new video file capturing the paths. This is my first post so I'm not sure if the code was posted correctly lol. Go easy on me ;)
from collections import deque
import numpy as np
import argparse
import imutils
import cv2
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video",
help="path to the (optional) video file")
ap.add_argument("-b", "--buffer", type=int, default=64,
help="max buffer size")
args = vars(ap.parse_args())
# define the lower and upper boundaries of the "green"
# object in the HSV color space, then initialize the
# list of tracked points
greenLower = (29, 86, 6)
greenUpper = (64, 255, 255)
pts = deque(maxlen=args["buffer"])
# if a video path was not supplied, grab the reference
# to the webcam
if not args.get("video", False):
camera = cv2.VideoCapture(0)
# otherwise, grab a reference to the video file
else:
camera = cv2.VideoCapture(args["video"])
# keep looping
while True:
# grab the current frame
(grabbed, frame) = camera.read()
# if we are viewing a video and we did not grab a frame,
# then we have reached the end of the video
if args.get("video") and not grabbed:
break
# resize the frame, blur it, and convert it to the HSV
# color space
frame = imutils.resize(frame, width=600)
# blurred = cv2.GaussianBlur(frame, (11, 11), 0)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# construct a mask for the color "green", then perform
# a series of dilations and erosions to remove any small
# blobs left in the mask
mask = cv2.inRange(hsv, greenLower, greenUpper)
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
# find contours in the mask and initialize the current
# (x, y) center of the ball
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)[-2]
center = None
# only proceed if at least one contour was found
if len(cnts) > 0:
# find the largest contour in the mask, then use
# it to compute the minimum enclosing circle and
# centroid
c = max(cnts, key=cv2.contourArea)
((x, y), radius) = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
# only proceed if the radius meets a minimum size
if radius > 10:
# draw the circle and centroid on the frame,
# then update the list of tracked points
cv2.circle(frame, (int(x), int(y)), int(radius),
(0, 255, 255), 2)
cv2.circle(frame, center, 5, (0, 0, 255), -1)
# update the points queue
pts.appendleft(center)
# loop over the set of tracked points
for i in xrange(1, len(pts)):
# if either of the tracked points are None, ignore
# them
if pts[i - 1] is None or pts[i] is None:
continue
# otherwise, compute the thickness of the line and
# draw the connecting lines
thickness = int(np.sqrt(args["buffer"] / float(i + 1)) * 2.5)
cv2.line(frame, pts[i - 1], pts[i], (0, 0, 255), thickness)
# show the frame to our screen
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# if the 'q' key is pressed, stop the loop
if key == ord("q"):
break
# cleanup the camera and close any open windows
camera.release()
cv2.destroyAllWindows()
I want to detect form with opencv and python
so i chose Contour Features but now i have problem how can i distinguish between a square and a lozenge using opencv and python
if there is other method can you tell me please my image it like this :enter image description here
i add my code
#-*- coding: utf-8 -*-
import cv2
import numpy as np
from pyimagesearch.shapedetector import ShapeDetector
import argparse
import imutils
from scipy import ndimage
import math
import matplotlib.pyplot as plt
from skimage import io, morphology, img_as_bool, segmentation
global limit
# cv2.threshold(src, thresh, maxval, type[, dst])
import math
def angle(pt1, pt2):
x1, y1 = pt1
x2, y2 = pt2
inner_product = x1*x2 + y1*y2
len1 = math.hypot(x1, y1)
len2 = math.hypot(x2, y2)
return math.acos(inner_product/(len1*len2))
def calculate(pt, ls):
i = 2
for x in ls:
pt2 = (x, i)
i = i+1
ang = angle(pt, pt2)*180/math.pi
ang = ang * (-1)
print (ang)
Image = cv2.imread("114.png")
# Extraction of Blue channel
b = Image[:,:,0]
# Callback Function for Trackbar (but do not any work)
def nothing(*arg):
pass
# Generate trackbar Window Name
TrackbarName = "Trackbar"
# Make Window and Trackbar
cv2.namedWindow("window", cv2.WINDOW_NORMAL)
cv2.createTrackbar(TrackbarName, "window", 0, 250, nothing)
img_threshed = np.zeros(b.shape, np.uint8)
ret,img_threshed = cv2.threshold(b,168,255,cv2.THRESH_BINARY)
cv2.imshow("window55", img_threshed)
# Expanding borders of the objects
kernel = np.ones((9, 9),np.uint8)
img_dilated = cv2.dilate(img_threshed, kernel)
cv2.namedWindow("Dilated Blue Channel", cv2.WINDOW_NORMAL)
cv2.imshow("Dilated Blue Channel", img_dilated)
# Retrieving contours by subtraction base objects from the expanded objects
img_contours = img_dilated - img_threshed
cv2.namedWindow("Contours", cv2.WINDOW_NORMAL)
cv2.imshow("Contours", img_contours)
median = cv2.medianBlur(img_contours,3)
cv2.imshow("median img_threshed", median)
#_, contours0, hierarchy = cv2.findContours( median, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
#cnts = [cv2.approxPolyDP(cnt, 2, True) for cnt in contours0]
gray = cv2.imread('114.png')
#gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (3, 3), 0)
# apply Canny edge detection using a wide threshold, tight
# threshold, and automatically determined threshold
wide = cv2.Canny(blurred, 90, 150)
cnts = cv2.findContours(img_contours, cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
#----Find contour in the image----
_, contours, hierarchy = cv2.findContours(img_contours, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
# loop over the contours
for c in cnts:
#----Draw a rectangle having minimum area around it using Contour features as you mentioned----
rect = cv2.minAreaRect(c) #---I used cnt[0] since there is only one contour in the image----
box = cv2.boxPoints(rect)
box = np.int0(box)
im = cv2.drawContours(Image, [box], 0, (0,0,255), 2)
#----Draw one diagonal ----
#cv2.line(Image,(box[2][0],box[2][1]),(box[0][0],box[0][1]), (255,0,0),2)
#cv2.line(Image,(0,10),(Image.shape[1], 10), (255,255,0),2)
#calculate(cv2.line(Image,(box[2][0],box[2][1]),(box[0][0],box[0][1]), (255,0,0),2),cv2.line(Image,(0,10),(Image.shape[1], 10), (255,255,0),2))
cv2.imwrite("Final_Image.jpg", Image)
# show the output image
cv2.imshow("Image", Image)
cv2.waitKey(0)
cv2.destroyAllWindows()
As mentioned in the comments' section, if you want to distinguish between an apparent square from a lozenge the only property that is distinct are the diagonals.
Using python in OpenCV I coded the following to obtain 1 diagonal for the square and the lozenge:
#----Find contour in the image----
_, contours, hierarchy = cv2.findContours(th, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
#----Draw a rectangle having minimum area around it using Contour features as you mentioned----
rect = cv2.minAreaRect(cnt[0]) #---I used cnt[0] since there is only one contour in the image----
box = cv2.boxPoints(rect)
box = np.int0(box)
im = cv2.drawContours(im1, [box], 0, (0,0,255), 2)
#----Draw one diagonal ----
cv2.line(im1,(box[2][0],box[2][1]),(box[0][0],box[0][1]), (255,0,0),2)
cv2.imwrite("Final_Image.jpg", im1)
This is what I get:
SQUARE:
LOZENGE:
Now since you have obtained the diagonal you have to compare it with a reference line to find the angle in order to determine whether it is a square or not.
For that first draw a reference line (I considered a horizontal line)
cv2.line(im1,(0,10),(im1.shape[1], 10), (255,255,0),2)
You will get :
SQUARE:
LOZENGE:
Now you just have to calculate the angle between these two lines (the diagonal and the reference line):
If the angle is 90 degree or 0 => Lozenge.
Otherwise => Square
How do you calculate angles between two lines?
See THIS POST