Getting more than expected contours in image - python

I have a image consisting N almost 90 times but I am getting 105 contours using below code:
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#cv2.imshow("Image", gray)
#cv2.waitKey(0)
blurred = cv2.GaussianBlur(gray, (5, 5), 0) #blur to reduce noise
#cv2.imshow("Image", blurred)
#cv2.waitKey(0)
# perform edge detection, find contours in the edge map, and sort the
# resulting contours from left-to-right
edged = cv2.Canny(blurred, 30, 150) #30, 150
#cv2.imwrite("test.png", edged)
#cv2.imshow("Image", edged)
#cv2.waitKey(0)
#find contours of characters(objects) in images
cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
if cnts:
cnts = sort_contours(cnts, method="left-to-right")[0]
cv2.drawContours(image, cnts, -1, (0, 0, 255), 2)
cv2.imwrite("all_contours.jpg", image)
I have tried different combinations in Canny and findContours function but cant get the contours equal to the number of N in image.
My image:
image with contours:
Looking at the contours image, I cant see where is the problem.
Any help or hint will be appreciated.
P.S : this image is an ideal image for testing. In real scenario, image will be taken from webcam.

Try without Bluring image.
import cv2
image = cv2.imread("n.png")
image = cv2.resize(image, (800, 800))
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
_, threshold = cv2.threshold(gray, 100, 255, cv2.THRESH_BINARY_INV)
contours, hierarchy = cv2.findContours(threshold, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
cv2.putText(image, "contours found in image : " + str(len(contours)), (20, 20), cv2.FONT_HERSHEY_PLAIN, 1, (255, 0, 0),1)
cv2.drawContours(image, contours, -1, (0, 0, 255), -1)
cv2.imshow("contour_image", image)
cv2.waitKey(0)
cv2.destroyAllWindows()

Related

An output image file with red contours of all objects

I have the following code:
import cv2 as cv
import numpy as np
image = cv.imread("input1.jpg")
img_gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
img_denoised = cv.GaussianBlur(img_gray,(5,5),2)
ret, thresh = cv.threshold(img_denoised, 216, 255, cv.THRESH_BINARY)
kernel = np.ones((1,1),np.uint8)
opening = cv.dilate(thresh, kernel)
opening = cv.erode(opening, kernel)
# detect the contours on the binary image using cv.CHAIN_APPROX_NONE
contours, hierarchy = cv.findContours(image=opening, mode=cv.RETR_TREE, method=cv.CHAIN_APPROX_NONE)
for i in contours:
x, y, w, h = cv.boundingRect(i)
cv.drawContours(image, [i], -1, (0, 0, 255), 2)
cv.imshow("A.jpg", image)
cv.waitKey(0)
cv.destroyAllWindows()
Output:
enter image description here
It only shows the stars with a red contours but I want all the text to have a red contours, including the background. Here is the original file:
enter image description here
Many thanks in advance!
I messed with this a bit and the best outcome I could get was the following, I think with some tweaking you could ignore the shading, as I'm converting it to grayscale it seems to be dropping the correct contour on the shapes, but the text is working as expected;
import cv2
import numpy as np
src = cv2.imread('c:\\input1.jpg')
gray = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)
# blur
blur = cv2.GaussianBlur(gray, (3, 3), 0)
# canny edge
canny = cv2.Canny(blur, 100, 200)
# dilate
kernel = np.ones((5, 5))
dilate = cv2.dilate(canny, kernel, iterations=1)
# find contours
contours, hierarchy = cv2.findContours(
dilate, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
# draw contours
cv2.drawContours(src, contours, -1, (0, 255, 0), 3)
cv2.imshow("a.jpg", src)
cv2.waitKey()

Hand contour not precise

I wrote an OpenCV program to extract the hand out of the image precisely. But is not able to get it out correctly. Below is the code and the output and the sample image which I used to test it.
import numpy as np
import cv2
# Reading image
font = cv2.FONT_HERSHEY_COMPLEX
img2 = cv2.imread('1.bmp', cv2.IMREAD_COLOR)
# Reading same image in another
# variable and converting to gray scale.
img = cv2.imread('1.bmp', cv2.IMREAD_GRAYSCALE)
# Converting image to a binary image
# ( black and white only image).
_, threshold = cv2.threshold(img, 110, 255, cv2.THRESH_BINARY)
# Detecting contours in image.
contours, _= cv2.findContours(threshold, cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE)
contours1 = max(contours, key=cv2.contourArea)
# Going through every contours found in the image.
approx = cv2.approxPolyDP(contours1, 0.009 * cv2.arcLength(contours1, True), True)
# draws boundary of contours.
cv2.drawContours(img2, [approx], 0, (0, 0, 255), 5)
cv2.imshow('image2', img2)
# Exiting the window if 'q' is pressed on the keyboard.
if cv2.waitKey(0) & 0xFF == ord('q'):
cv2.destroyAllWindows()
Input image -
One of the reasons your contour is not precise is the obvious; the line where you approximated the contour. But you have also mentioned (in a comment) that lowering the approximation didn't solve the problem.
This is because you didn't blur the thresholded image, which resulted in the jagged edges. Here is an example where the thresholded image is blurred before the contour detection:
The code:
import cv2
import numpy as np
def process(img):
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
return cv2.threshold(img_gray, 111, 255, cv2.THRESH_BINARY)[1]
def draw_contours(img):
contours, _ = cv2.findContours(process(img), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
cnt = max(contours, key=cv2.contourArea)
cv2.drawContours(img, [cnt], -1, (0, 0, 255), 2)
img = cv2.imread("image.png")
draw_contours(img)
cv2.imshow("result", img)
cv2.waitKey(0)
cv2.destroyAllWindows()
Input image:
Output image:
Still, the contour isn't very precise. This is where the Canny edge detector comes into play:
import cv2
import numpy as np
def process(img):
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
_, thresh = cv2.threshold(img_gray, 111, 255, cv2.THRESH_BINARY)
img_blur = cv2.GaussianBlur(thresh, (5, 5), 4)
img_canny = cv2.Canny(img_blur, 0, 0)
img_dilate = cv2.dilate(img_canny, None, iterations=1)
return cv2.erode(img_dilate, None, iterations=0)
def draw_contours(img):
contours, _ = cv2.findContours(process(img), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
cnt = max(contours, key=cv2.contourArea)
cv2.drawContours(img, [cnt], -1, (0, 0, 255), 2)
img = cv2.imread("image.png")
draw_contours(img)
cv2.imshow("result", img)
cv2.waitKey(0)
cv2.destroyAllWindows()

How to detect ellipse and remove outliers in image using opencv Python

I'm trying to extract the dots that form an ellipse then draw it . But because of some points that can be considered as outliers, I got an invalid mask of ellipse. Like this:
Here is the code that I'm executing, but it always selects the outlier
`cv2.rectangle(cleanedpartiallyimage, (0, 0), (1200, 10), (0, 0, 0), -1)
cv2.rectangle(cleanedpartiallyimage, (0, 0), (47, 1200), (0, 0, 0), -1)
image = cv2.cvtColor(cleanedpartiallyimage, cv2.COLOR_BGR2HSV) lower = np.array([85, 0, 20], dtype="uint8")
upper = np.array([95, 255, 255], dtype="uint8") mygray = cv2.inRange(image, lower, upper)
#--- Gaussian and Canny filters to make it easy to get the contours
blurred = cv2.GaussianBlur(mygray, (5, 5), 0) imageCanny = cv2.Canny(blurred, 0, 100, 0)
ret,th = cv2.threshold(imageCanny,127,255, 0)
#--- Find all the contours in the binary image ---
contours,hierarchy = cv2.findContours(th,3,1)
cnt = contours big_contour = [] max = 0 for i in cnt:
area = cv2.contourArea(i) #--- find the contour having biggest area ---
if(area > max): max = area big_contour = i
final = cv2.drawContours(imageCanny, big_contour, -1, (0,255,0), 3)
actualcontours, hierarchy = cv2.findContours(final, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
#---Removing side contour points
actualcontours = getactualcontours(actualcontours, 60)
empty = np.zeros((image.shape[0], image.shape[1], 3), np.uint8)
#---Removes linear contour points
ConvexHullPoints = contoursConvexHull(actualcontours)
#---Converts the points to Ellipse using fitEllipse
test41 = cv2.polylines(empty, [ConvexHullPoints], True, (255, 255, 255), 3)
imageCannyee = cv2.Canny(test41, 0, 100, 0)
contours, hierarchy = cv2.findContours(imageCannyee, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
for cont in contours:
if len(cont) < 20:
break
elps = cv2.fitEllipse(cont)
anotherempty = np.zeros((image.shape[0], image.shape[1], 3), np.uint8)
#---Drawing the ellipse into the empty mask
cv2.ellipse(anotherempty, elps, (255, 255, 255), 2) plt.imshow(anotherempty)
Here's a simple approach:
Obtain binary image. We load the image, convert to grayscale, Gaussian blur, then Otsu's threshold to obtain a binary image.
Dilate to form single contour. Next we create an elliptical shaped kernel using cv2.getStructuringElement with the cv2.MORPH_ELLIPSE parameter and dilate to combine small individual contours into a single large contour.
Identify ellipse. Next we find contours, filter using contour area and then detect the ellipse with cv2.fitEllipse().
import cv2
# Load image, grayscale, Gaussian blur, Otsu's threshold
image = cv2.imread('1.png')
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (3,3), 0)
thresh = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
# Dilate with elliptical shaped kernel
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3,3))
dilate = cv2.dilate(thresh, kernel, iterations=2)
# Find contours, filter using contour threshold area, draw ellipse
cnts = cv2.findContours(dilate, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
for c in cnts:
area = cv2.contourArea(c)
if area > 5000:
ellipse = cv2.fitEllipse(c)
cv2.ellipse(image, ellipse, (36,255,12), 2)
cv2.imshow('thresh', thresh)
cv2.imshow('dilate', dilate)
cv2.imshow('image', image)
cv2.waitKey()

How can I draw two contours on an image given a contour distance of 100 pixels between them using python and opencv

What I have tried is drawing the external contour using the following lines
cnts, hierarchy = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(orig, cnts, -1, (0, 255, 0), 3) # this draws the external contour
Referring the below
[![enter image description here][1]][1]
How can I arrive at the answer below?
[![enter image description here][2]][2]
I don't know how it is solved in the links but you can use a blank mask on which you can draw your contour and then use cv2.dilate to expand it using a kernel size of the number of pixels you require between them. Once that is done find contours on the mask and draw the second one onto your original image.
import cv2
import numpy as np
img = cv2.imread('contour.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
_, thresh = cv2.threshold(gray, 240, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
cnts, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(img, cnts, -1, (0, 255, 0), 3)
mask = np.zeros(img.shape[:2], dtype=np.uint8)
cv2.drawContours(mask, cnts, -1, 255, 1)
kernel = np.ones((100, 100), np.uint8)
mask = cv2.dilate(mask, kernel, iterations = 1)
cnts, hierarchy = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(img, cnts, 1, (255, 0, 0), 3)
cv2.imshow("img", img)
cv2.waitKey(0)
cv2.destroyAllWindows()

Difficulty extracting characters using MSER in images which have borders around the text

I am trying to develop a OCR system. I am trying to use MSER in order to extract character from an image and then passing the characters into a CNN to recognize those characters. Here is my code for character extraction:
import cv2
import numpy as np
# create MSER object
mser = cv2.MSER_create()
# read the image
img = cv2.imread('textArea01.png')
# convert to gray scale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# store copy of the image
vis = img.copy()
# detect regions in the image
regions,_ = mser.detectRegions(gray)
# find convex hulls of the regions and draw them onto the original image
hulls = [cv2.convexHull(p.reshape(-1, 1, 2)) for p in regions]
cv2.polylines(vis, hulls, 1, (0, 255, 0))
# create mask for the detected region
mask = np.zeros((img.shape[0], img.shape[1], 1), dtype=np.uint8)
mask = cv2.dilate(mask, np.ones((150, 150), np.uint8))
for contour in hulls:
cv2.drawContours(mask, [contour], -1, (255, 255, 255), -1)
#this is used to find only text regions, remaining are ignored
text_only = cv2.bitwise_and(img, img, mask=mask)
cv2.imshow('img', vis)
cv2.waitKey(0)
cv2.imshow('mask', mask)
cv2.waitKey(0)
cv2.imshow('text', text_only)
cv2.waitKey(0)
This is working fine for most images, but for some images like this:
The outer border is also detected as a region and the contour is drawn in the mask such that all area inside the border is detected as text region. So, the contours inside have no effect. How do I prevent this so that only the text is detected?
Hulls detected:
and the mask as a result:
My result using this code:
import cv2
import numpy as np
img = cv2.imread("img.png")
# grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
cv2.imshow('gray', gray)
# binary
# ret, thresh = cv2.threshold(gray, 250, 255, cv2.THRESH_BINARY_INV)
thresh = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, 35, 180)
cv2.imshow('threshold', thresh)
# dilation
kernel = np.ones((1, 1), np.uint8)
img_dilation = cv2.dilate(thresh, kernel, iterations=1)
cv2.imshow('dilated', img_dilation)
# find contours
# cv2.findCountours() function changed from OpenCV3 to OpenCV4: now it have only two parameters instead of 3
cv2MajorVersion = cv2.__version__.split(".")[0]
# check for contours on thresh
if int(cv2MajorVersion) >= 4:
ctrs, hier = cv2.findContours(img_dilation.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
else:
im2, ctrs, hier = cv2.findContours(img_dilation.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# sort contours
sorted_ctrs = sorted(ctrs, key=lambda ctr: cv2.boundingRect(ctr)[0])
for i, ctr in enumerate(sorted_ctrs):
# Get bounding box
x, y, w, h = cv2.boundingRect(ctr)
# Getting ROI
roi = img[y:y + h, x:x + w]
# show ROI
# cv2.imshow('segment no:'+str(i),roi)
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 1)
# if you want to save the letters without green bounding box, comment the line above
if w > 5:
cv2.imwrite('C:\\Users\\PC\\Desktop\\output\\{}.png'.format(i), roi)
cv2.imshow('marked areas', img)
cv2.waitKey(0)
You can have a threshold on the contour area so that it ignores all shapes that cover more than a certain area in the image.
for contour in hulls:
if cv.contourArea(contour) < ThresholdArea:
continue
cv2.drawContours(mask, [contour], -1, (255, 255, 255), -1)
#this is used to find only text regions, remaining are ignored
text_only = cv2.bitwise_and(img, img, mask=mask)

Categories