Cropping Concave polygon from Image using Opencv python - python

How can I crop a concave polygon from an image. My Input image look like
.
and the coordinates of closed polygon are
[10,150],[150,100],[300,150],[350,100],[310,20],[35,10]. I want region bounded by concave polygon to be cropped using opencv. I searched for other similar questions but I did not able to find correct answer. That's why I am asking it ? Can you help me.
Any help would be highly appreciated.!!!

Steps
find region using the poly points
create mask using the poly points
do mask op to crop
add white bg if needed
The code:
# 2018.01.17 20:39:17 CST
# 2018.01.17 20:50:35 CST
import numpy as np
import cv2
img = cv2.imread("test.png")
pts = np.array([[10,150],[150,100],[300,150],[350,100],[310,20],[35,10]])
## (1) Crop the bounding rect
rect = cv2.boundingRect(pts)
x,y,w,h = rect
croped = img[y:y+h, x:x+w].copy()
## (2) make mask
pts = pts - pts.min(axis=0)
mask = np.zeros(croped.shape[:2], np.uint8)
cv2.drawContours(mask, [pts], -1, (255, 255, 255), -1, cv2.LINE_AA)
## (3) do bit-op
dst = cv2.bitwise_and(croped, croped, mask=mask)
## (4) add the white background
bg = np.ones_like(croped, np.uint8)*255
cv2.bitwise_not(bg,bg, mask=mask)
dst2 = bg+ dst
cv2.imwrite("croped.png", croped)
cv2.imwrite("mask.png", mask)
cv2.imwrite("dst.png", dst)
cv2.imwrite("dst2.png", dst2)
Source image:
Result:

You can do it in 3 steps:
Create a mask out of the image
mask = np.zeros((height, width))
points = np.array([[[10,150],[150,100],[300,150],[350,100],[310,20],[35,10]]])
cv2.fillPoly(mask, points, (255))
Apply mask to original image
res = cv2.bitwise_and(img,img,mask = mask)
Optionally you can remove the crop the image to have a smaller one
rect = cv2.boundingRect(points) # returns (x,y,w,h) of the rect
cropped = res[rect[1]: rect[1] + rect[3], rect[0]: rect[0] + rect[2]]
With this you should have at the end the image cropped
UPDATE
For the sake of completeness here is the complete code:
import numpy as np
import cv2
img = cv2.imread("test.png")
height = img.shape[0]
width = img.shape[1]
mask = np.zeros((height, width), dtype=np.uint8)
points = np.array([[[10,150],[150,100],[300,150],[350,100],[310,20],[35,10]]])
cv2.fillPoly(mask, points, (255))
res = cv2.bitwise_and(img,img,mask = mask)
rect = cv2.boundingRect(points) # returns (x,y,w,h) of the rect
cropped = res[rect[1]: rect[1] + rect[3], rect[0]: rect[0] + rect[2]]
cv2.imshow("cropped" , cropped )
cv2.imshow("same size" , res)
cv2.waitKey(0)
For the colored background version use the code like this:
import numpy as np
import cv2
img = cv2.imread("test.png")
height = img.shape[0]
width = img.shape[1]
mask = np.zeros((height, width), dtype=np.uint8)
points = np.array([[[10,150],[150,100],[300,150],[350,100],[310,20],[35,10]]])
cv2.fillPoly(mask, points, (255))
res = cv2.bitwise_and(img,img,mask = mask)
rect = cv2.boundingRect(points) # returns (x,y,w,h) of the rect
im2 = np.full((res.shape[0], res.shape[1], 3), (0, 255, 0), dtype=np.uint8 ) # you can also use other colors or simply load another image of the same size
maskInv = cv2.bitwise_not(mask)
colorCrop = cv2.bitwise_or(im2,im2,mask = maskInv)
finalIm = res + colorCrop
cropped = finalIm[rect[1]: rect[1] + rect[3], rect[0]: rect[0] + rect[2]]
cv2.imshow("cropped" , cropped )
cv2.imshow("same size" , res)
cv2.waitKey(0)

For the blured image background version use the code like this:
img = cv2.imread(img_path)
box = <box points>
# -- background
blur_bg = cv2.blur(img, (h, w))
mask1 = np.zeros((h, w, 3), np.uint8)
mask2 = np.ones((h, w, 3), np.uint8) * 255
cv2.fillPoly(mask1, box, (255, 255, 255))
# -- indexing
img_idx = np.where(mask1 == mask2)
bg_idx = np.where(mask1 != mask2)
# -- fill box
res = np.zeros((h, w, 3), np.int64)
res[img_idx] = img[img_idx]
res[bg_idx] = blur_bg[bg_idx]
res = res[y1:y2, x1:x2, :]

Related

Not enough background filtering

I am trying to filter the background of images presenting electric cables. I tried to do the following:
Transform from color to gray
Apply cv2.Laplacian or 2 times of cv2.Sobel for finding edges in both directions.
Apply thresholding cv2.THRESH_BINARY(_INV), cv2.THRESH_OTSU
Lastly, I tried to find edges with 'filtered' images using cv2.Canny together with cv2.HoughLinesP
Overall, the results aren't satisfying at all. I will give an example of 2 images:
And the output of my script:
I also played with the values in config, but the results weren't different much.
Here's the little script I managed to do:
import cv2
import matplotlib.pyplot as plt
import numpy as np
def img_show(images, cmap=None):
fig = plt.figure(figsize=(17, 10))
root = 3 # len(images) ** 0.5
for i, img in enumerate(images):
ax = fig.add_subplot(root, root, i + 1)
ax.imshow(img, cmap=cmap[i])
plt.show()
class Config:
scale = 0.4
min_threshold = 120
max_threshold = 200
canny_min_threshold = 100
canny_max_threshold = 200
config = Config()
def find_lines(img, rgb_img):
dst = cv2.Canny(img, config.canny_min_threshold, config.canny_max_threshold)
cdstP = np.copy(rgb_img)
lines = cv2.HoughLinesP(dst, 1, np.pi / 180, 150, None, 0, 0)
lines1 = lines[:, 0, :]
for x1, y1, x2, y2 in lines1[:]:
cv2.line(cdstP, (x1, y1), (x2, y2), (255, 0, 0), 5)
return cdstP
if __name__ == "__main__":
bgr_img = cv2.imread('DJI_0009.JPG')
bgr_img = cv2.resize(bgr_img, (0, 0), bgr_img, config.scale, config.scale)
rgb_img = cv2.cvtColor(bgr_img, cv2.COLOR_BGR2RGB)
gray_img = cv2.cvtColor(bgr_img, cv2.COLOR_BGR2GRAY)
# _, threshold = cv2.threshold(gray_img, config.min_threshold, config.max_threshold, cv2.THRESH_BINARY)
# laplacian = cv2.Laplacian(rgb_img, cv2.CV_8UC1)
sobelx = cv2.Sobel(gray_img, cv2.CV_8UC1, 1, 0)
sobely = cv2.Sobel(gray_img, cv2.CV_8UC1, 0, 1)
blended = cv2.addWeighted(src1=sobelx, alpha=0.5, src2=sobely, beta=0.5, gamma=0)
_, threshold = cv2.threshold(blended, config.min_threshold, config.max_threshold,
cv2.THRESH_BINARY | cv2.THRESH_OTSU)
p1 = find_lines(threshold, rgb_img)
p2 = find_lines(blended, rgb_img)
p3 = find_lines(gray_img, rgb_img)
plots = [rgb_img, p1, p2, p3]
cmaps = [None] + ['gray'] * (len(plots) - 1)
img_show(plots, cmaps)
I am assuming I need to do much better filtring. However, I also tried image segmentation, but the results weren't promising at all.
Any ideas on how to improve this?
Thanks
Here is one way to do that in Python/OpenCV. I threshold, then optionally clean with morphology. Then get the contours and for each contour compute its rotated rectangle. Then get the dimensions of the rotated rectangle and compute the aspect ratio (largest dimension / smallest dimension) and optionally the area. Then I threshold on the aspect ratio (and optionally the area) and keep only those contours that pass)
Input:
import cv2
import numpy as np
image = cv2.imread("DCIM-100-MEDIA-DJI-0009-JPG.jpg")
hh, ww = image.shape[:2]
# convert to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# create a binary thresholded image
thresh = cv2.threshold(gray, 64, 255, cv2.THRESH_BINARY)[1]
# invert so line is white on black background
thresh = 255 - thresh
# apply morphology
kernel = np.ones((11,11), np.uint8)
clean = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)
# get external contours
contours = cv2.findContours(clean, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = contours[0] if len(contours) == 2 else contours[1]
area_thresh = ww / 2
aspect_thresh = ww / 30
print(area_thresh,aspect_thresh)
print('')
result = image.copy()
for c in contours:
# get rotated rectangle from contour
# get its dimensions
rotrect = cv2.minAreaRect(c)
(center), (dim1,dim2), angle = rotrect
maxdim = max(dim1,dim2)
mindim = min(dim1,dim2)
area = dim1 * dim2
if mindim != 0:
aspect = maxdim / mindim
#print(area, aspect)
#if area > area_thresh and aspect > aspect_thresh:
if aspect > aspect_thresh:
# draw contour on input
cv2.drawContours(result,[c],0,(0,0,255),3)
print(area, aspect)
# save result
cv2.imwrite("DCIM-100-MEDIA-DJI-0009-JPG_thresh.jpg",thresh)
cv2.imwrite("DCIM-100-MEDIA-DJI-0009-JPG_clean.jpg",clean)
cv2.imwrite("DCIM-100-MEDIA-DJI-0009-JPG_result.jpg",result)
# display result
cv2.imshow("thresh", thresh)
cv2.imshow("clean", clean)
cv2.imshow("result", result)
cv2.waitKey(0)
cv2.destroyAllWindows()
Thresholded image:
Morphology cleaned image:
Result image:

Rotate image in python and remove the background

Is there a way to rotate these kind of images and remove the background whitespace or any background and get and image like this
I tried to remove the background if the image doesn't have any rotation i am able to remove the background whitespace by using this script but if the image got any rotation it doesn't remove any space
i followed this How to crop or remove white background from an image
import cv2
import numpy as np
img = cv2.imread('cheque_img\rotate.PNG')
## (1) Convert to gray, and threshold
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
th, threshed = cv2.threshold(gray, 240, 255, cv2.THRESH_BINARY_INV)
## (2) Morph-op to remove noise
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (11,11))
morphed = cv2.morphologyEx(threshed, cv2.MORPH_CLOSE, kernel)
## (3) Find the max-area contour
cnts = cv2.findContours(morphed, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]
cnt = sorted(cnts, key=cv2.contourArea)[-1]
## (4) Crop and save it
x,y,w,h = cv2.boundingRect(cnt)
dst = img[y:y+h, x:x+w]
cv2.imwrite("001.png", dst)
Please try it with any scanned image and rotate it and try to get rid of the background white space and rotate it to its original dimension for doing computer vision operation
Using cv2.boundingRect will give you the minimum non-rotating rectangle that fit the contour. cv2.boundingRect result :
Instead of cv2.boundingRect, you will need to use cv2.minAreaRect to obtain a rectangle that fit the contour. cv2.minAreaRect result :
After the obtaining the rotated rect information, you will need to find the affine transform matrix between the model points and the current points. Current points are the points found in rotated rect and the model point is the point of the original object. In this case an object with the initial location (0,0) and the width and height of the rotated rect.
Affine might be an overkill here but for generality affine transform is used.
Detailed explanation is located in the code.
import cv2
import numpy as np
img = cv2.imread('Bcm3h.png')
## (1) Convert to gray, and threshold
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
th, threshed = cv2.threshold(gray, 240, 255, cv2.THRESH_BINARY_INV)
## (2) Morph-op to remove noise
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (11,11))
morphed = cv2.morphologyEx(threshed, cv2.MORPH_CLOSE, kernel)
## (3) Find the max-area contour
cnts = cv2.findContours(morphed, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]
cnt = sorted(cnts, key=cv2.contourArea)[-1]
## This will extract the rotated rect from the contour
rot_rect = cv2.minAreaRect(cnt)
# Extract useful data
cx,cy = (rot_rect[0][0], rot_rect[0][1]) # rect center
sx,sy = (rot_rect[1][0], rot_rect[1][1]) # rect size
angle = rot_rect[2] # rect angle
# Set model points : The original shape
model_pts = np.array([[0,sy],[0,0],[sx,0],[sx,sy]]).astype('int')
# Set detected points : Points on the image
current_pts = cv2.boxPoints(rot_rect).astype('int')
# sort the points to ensure match between model points and current points
ind_model = np.lexsort((model_pts[:,1],model_pts[:,0]))
ind_current = np.lexsort((current_pts[:,1],current_pts[:,0]))
model_pts = np.array([model_pts[i] for i in ind_model])
current_pts = np.array([current_pts[i] for i in ind_current])
# Estimate the transform betwee points
M = cv2.estimateRigidTransform(current_pts,model_pts,True)
# Wrap the image
wrap_gray = cv2.warpAffine(gray, M, (int(sx),int(sy)))
# for display
cv2.imshow("dst",wrap_gray)
cv2.waitKey(0)
#cv2.imwrite("001.png", dst)
Result :
Considering you don't know the angle of the rotation and can be different for each scanned image, you need to find it first.
Combine what you already did with accepted answer for this question.
For the image you provided:
Angle is -25.953375702364195
If the background is guaranteed to be saturated white (value 255) and the document mostly unsaturated values, binarize below the threshold 255 and fit a bounding rectangle.
I had some problems running the code presented above, so here is my slightly modified version:
import cv2
import numpy as np
def crop_minAreaRect(img, rect):
# rotate img
angle = rect[2]
print("angle: " + str(angle))
rows,cols = img.shape[0], img.shape[1]
M = cv2.getRotationMatrix2D((cols/2,rows/2),angle,1)
img_rot = cv2.warpAffine(img,M,(cols,rows))
# rotate bounding box
rect0 = (rect[0], rect[1], angle)
box = cv2.boxPoints(rect0)
pts = np.int0(cv2.transform(np.array([box]), M))[0]
pts[pts < 0] = 0
# crop
img_crop = img_rot[pts[1][1]:pts[0][1],
pts[1][0]:pts[2][0]]
return img_crop
def ResizeWithAspectRatio(image, width=None, height=None, inter=cv2.INTER_AREA):
dim = None
(h, w) = image.shape[:2]
if width is None and height is None:
return image
if width is None:
r = height / float(h)
dim = (int(w * r), height)
else:
r = width / float(w)
dim = (width, int(h * r))
return cv2.resize(image, dim, interpolation=inter)
img = cv2.imread('rotatedCheque.png')
cv2.imshow("orig", img)
img_copy = img.copy()
# (1) Convert to gray, and threshold
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
th, threshed = cv2.threshold(gray, 240, 255, cv2.THRESH_BINARY_INV)
# (2) Morph-op to remove noise
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (11, 11))
morphed = cv2.morphologyEx(threshed, cv2.MORPH_CLOSE, kernel)
# (3) Find the max-area contour
cnts = cv2.findContours(morphed, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]
cnt = sorted(cnts, key=cv2.contourArea)[-1]
# This will extract the rotated rect from the contour
rot_rect = cv2.minAreaRect(cnt)
cropped_img = crop_minAreaRect(img, rot_rect)
width, height = img.shape[0], img.shape[1]
if height > width:
cropped_img = cv2.rotate(cropped_img, cv2.ROTATE_90_CLOCKWISE)
resized_img = ResizeWithAspectRatio(cropped_img, width=800)
cv2.imshow("cropped", resized_img)
cv2.waitKey(0)

Add two images with blended edges

I'm writing a script to detect faces and blur out everything but the face.
I find the faces using Haar Cascades, then create a mask for with circles where the faces are. Then I add them together. This works fine for adding where the divide is absolute but I can't work out how to have the blur taper off without a blunt line. Blurring the mask just creates an ugly line where the tapering should be.
import numpy as np
import cv2
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')
original_image = cv2.imread('person.jpg')
gray_original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2GRAY)
# mask
# detect faces and create mask
mask = np.full(
(original_image.shape[0], original_image.shape[1], 1), 0, dtype=np.uint8)
faces = face_cascade.detectMultiScale(gray_original_image, 1.3, 5)
face_areas = []
for (x, y, w, h) in faces:
face_areas.append(([x, y, w, h], original_image[y:y+h, x:x+w]))
center = (x + w // 2, y + h // 2)
radius = max(h, w) // 2
cv2.circle(mask, center, radius, (255), -1)
# blur original image
kernel = np.ones((5, 5), np.float32) / 25
blurred_image = cv2.filter2D(original_image, -1, kernel)
# blur mask to get tapered edge
mask = cv2.filter2D(mask, -1, kernel)
# composite blurred and unblurred faces
mask_inverted = cv2.bitwise_not(mask)
background = cv2.bitwise_and(
blurred_image, blurred_image, mask=mask_inverted)
foreground = cv2.bitwise_and(original_image, original_image, mask=mask)
composite = cv2.add(background, foreground)
cv2.imshow('composite', composite)
cv2.waitKey(0)
cv2.destroyAllWindows()
Original
Output from script
Desired result (no obvious line between blurred/non blurred)
You did most of the job, instead of cv2.mean, which by the way does not work when I tried, simply do:
composite = (foreground + background)
and you would get:

Using circle detection and color detection to recognize an object

I am using color detection (purple in particular) and circle detection to detect pollen object (the purple circular one) in the image below.
Then I write the letter "P" in the object detected. Unfortunately it didn't work as I expected.
I can fix it if I change the radius, but it is not a good idea since I still have lots of similar images with various radius to process. i think the main point is how to know the exact range of the purple in this image. Generally, I want to know how to get the range of an arbitrary color in an image. Some people gave me a sample code but it didn't work well.
Here is my program.
import cv2
import numpy as np
# In[2]:
path = "./sample.JPG"
font = cv2.FONT_HERSHEY_COMPLEX
# In[3]:
def image_resize(image, width = None, height = None, inter = cv2.INTER_AREA):
# initialize the dimensions of the image to be resized and
# grab the image size
dim = None
(h, w) = image.shape[:2]
# if both the width and height are None, then return the
# original image
if width is None and height is None:
return image
# check to see if the width is None
if width is None:
# calculate the ratio of the height and construct the
# dimensions
r = height / float(h)
dim = (int(w * r), height)
# otherwise, the height is None
else:
# calculate the ratio of the width and construct the
# dimensions
r = width / float(w)
dim = (width, int(h * r))
# resize the image
resized = cv2.resize(image, dim, interpolation = inter)
# return the resized image
return resized
# In[4]:
iml = cv2.imread(path,cv2.IMREAD_COLOR)
img = image_resize(iml,width=960)
# In[5]:
hsv = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
hsv = cv2.medianBlur(hsv,5)
#mask = cv2.inRange(hsv, (120, 180, 50), (160, 255, 255))
mask = cv2.inRange(hsv, (105, 100,50), (160, 255, 255))
#mask = cv2.inRange(hsv, (126, 142, 57), (145, 255, 255))
#cv2.GaussianBlur(cimg, (9,9),3)
#cimg = cv2.medianBlur(cimg,5)
output = cv2.bitwise_and(hsv, hsv, mask = mask)
#circles = cv2.HoughCircles(mask[:,:,0],cv2.HOUGH_GRADIENT,1,mask.shape[0]/16,param1=15,param2=20,minRadius=18,maxRadius=38)
circles = cv2.HoughCircles(output[:,:,0],cv2.HOUGH_GRADIENT,1,output.shape[0]/16,param1=15,param2=20,minRadius=15,maxRadius=30)
print(len(circles))
circles = np.uint16(np.around(circles))[0,:]
# In[6]:
for i in circles:
cv2.putText(img,'P',(i[0],i[1]), font, 0.5,(0,255,0),1,cv2.LINE_AA)
# In[7]:
cv2.imwrite("./result.jpg",img)
Note that this answer is not meant to be a solution but maybe a new point of view to achieve your task. Even though it may work in some cases it will probably not be robust enough for automating any processes. That being said, the problem with converting to HSV colorspace is that if the image (as in your case) has similar color objects drawn on it then it will be difficult to distiguish one object from another with cv2.inRange(). I tried to alter your code a bit and made an example on how I would approach this.
First you could try to look for all contours after OTSU theresholding on the image and filter the biggest (donut) and other small ones out with a criteria of your choosing.
Once you have that you can make a ROI around that contour. Then I would try to perform the cv2.inRange() on each ROI.
After that I would search for contours again on each ROI and count white pixels or make a "circularity" criteria for contours. If they pass that means that it has a lot of pixels in range and draw the letter T. Hope it helps a bit. Cheers!
Example:
import cv2
import numpy as np
# In[2]:
path = "./purplecirc4.JPG"
font = cv2.FONT_HERSHEY_COMPLEX
# In[3]:
def image_resize(image, width = None, height = None, inter = cv2.INTER_AREA):
# initialize the dimensions of the image to be resized and
# grab the image size
dim = None
(h, w) = image.shape[:2]
# if both the width and height are None, then return the
# original image
if width is None and height is None:
return image
# check to see if the width is None
if width is None:
# calculate the ratio of the height and construct the
# dimensions
r = height / float(h)
dim = (int(w * r), height)
# otherwise, the height is None
else:
# calculate the ratio of the width and construct the
# dimensions
r = width / float(w)
dim = (width, int(h * r))
# resize the image
resized = cv2.resize(image, dim, interpolation = inter)
# return the resized image
return resized
# In[4]:
iml = cv2.imread(path,cv2.IMREAD_COLOR)
img = image_resize(iml,width=960)
# Threshold with OTSU to get all contours
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray,(5,5),0)
_,thresh = cv2.threshold(blur,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
_, contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
# Empty list for contours that could be positive
ROIs=[]
# Append possible contours to list
# (I have selected height to eliminate unwanted noise)
for cnt in contours:
x,y,w,h = cv2.boundingRect(cnt)
if 200 > h > 20:
x1 = x-20
x2 = x+w+20
y1 = y-20
y2 = y+h+20
roi = img[y1:y2, x1:x2]
ROIs.append(roi)
# Iterate through list of ROIS and transform to HSV
# (I made a little adjustment in values )
for i in ROIs:
hsv = cv2.cvtColor(i,cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, (115,100,50), (160,255,255))
# Search for contours on every ROI in list and select the biggest one
_, contours, hierarchy = cv2.findContours(mask,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
cnt = max(contours, key=cv2.contourArea)
# Draw them whole on hsv then transform to gray and perform OTSU threshold and search for contoures
cv2.drawContours(hsv, [cnt], 0, 255, -1)
gray = cv2.cvtColor(hsv, cv2.COLOR_BGR2GRAY)
_,thresh = cv2.threshold(gray,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
_, contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
cnt = max(contours, key=cv2.contourArea)
# Make a "roundness" criterion and draw the letter
x,y,w,h = cv2.boundingRect(cnt)
perimeter = cv2.arcLength(cnt,True)
radius = perimeter/(2*np.pi)
area = cv2.contourArea(cnt)
circ = 4*area/(np.pi*(radius*2)**2)
if circ > 0.70:
cv2.putText(i,'P',(int(x+(w/2.5)),int(y+(h/2))), font, 0.5,(0,255,0),1,cv2.LINE_AA)
# Display result:
resized = cv2.resize(img, (0,0), fx=0.5, fy=0.5)
cv2.imshow("roi",resized)
cv2.waitKey(0)
cv2.destroyAllWindows()
Result:

Python: Showing every Object of an image in its own window

I've written some code, to crop an object (in this case the Data Matrix Code) from an image:
import numpy as np
import cv2
image = cv2.imread("datamatrixc.png")
img_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
img_height, img_width = image.shape[:2]
WHITE = [255, 255, 255]
# Threshold filter
ret, thresh = cv2.threshold(img_gray, 127, 255, cv2.THRESH_BINARY_INV)
# Get Contours
_, contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# Get Last element of the contours object
max = len(contours) - 1
cnt = contours[max]
# Get coordinates for the bounding box
x, y, w, h = cv2.boundingRect(cnt)
image_region = image[ int(((img_height / 2) - h) / 2) : int(((img_height / 2) - h) / 2 + h), int(x): int(x + w) ]
dmc = cv2.copyMakeBorder(image_region, 10, 10, 10, 10, cv2.BORDER_CONSTANT, value = WHITE)
cv2.imshow("Test", dmc)
cv2.waitKey(0)
cv2.destroyAllWindows()
The code works fine and I received as result:
However, the next image is a little more complicated.
I receive the same result as in the previous image, but I have no idea how to detect the two other objects.
Is there an easier way every object showing in its window?
For this specific image take the biggest contours you have and check if the object is 4 sided shape.If the half-point between the bounding box's corners (see pairs below) is in the contour array then voila, problem solved.
Pairs : TopRight-TopLeft, TopRight-BottomRight, TopLeft-BottomLeft, BottomLeft-BottomRight
Or you could check if there pixels that are not black/white inside the bounding box ?
And for the ploting individualy just slap a for on what you allready have
How about this?
import numpy as np
import cv2
image = cv2.imread("datamatrixc.png")
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
ret, bin_img = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
kernel = np.ones((3,3),np.uint8)
closing = cv2.morphologyEx(bin_img, cv2.MORPH_CLOSE, kernel, iterations=4)
n_labels, labels, stats, centroids = cv2.connectedComponentsWithStats(bin_img)
size_thresh = 5000
for i in range(1, n_labels):
if stats[i, cv2.CC_STAT_AREA] >= size_thresh:
print(stats[i, cv2.CC_STAT_AREA])
x = stats[i, cv2.CC_STAT_LEFT]
y = stats[i, cv2.CC_STAT_TOP]
w = stats[i, cv2.CC_STAT_WIDTH]
h = stats[i, cv2.CC_STAT_HEIGHT]
cv2.imshow('img', image[y:y+h, x:x+w])
cv2.waitKey(0)

Categories