Related
I am trying to extract information about a single bubble from OMR bubble sheet to determine if its correctly marked or not.
Here is the Original bubble sheet :-
What I did :-
Got Wrapped Perspective of the section containing bubbles (say, img_wrap) img_wrap
Converted img_wrap to Grayscale and applied adaptive threshold (say, img_thresh) img_thresh
For segmenting it further to just single bubble I tried Blob detector method given here
However, I did not get expected results from step3 and couldn't find any other method or workaround for this
Complete Code :-
import cv2
import numpy as np
def extract_rect(contours): #Extracting rectangular area containing the bubbles
rect_contours = []
for c in contours:
if cv2.contourArea(c) > 10000:
perimeter = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.02*perimeter, True) #approximates a curve or a polygon with another curve/polygon with less vertices so that the distance between them is less or equal to the specified precision. Uses Douglas-Peucker algorithm
# if len(approx) == 4: #cv2.boundingRect seems to be automatically taking care of this
# rect_contours.append(c)
rect_contours.append(c)
rect_contours = sorted(rect_contours, key=cv2.contourArea,reverse=True)
return rect_contours
def findlen(p1, p2):
length = ((p2[0] - p1[0])**2 + (p2[1] - p1[1])**2)**0.5 # length = sqroot[(x2-x1)**2 + (y2-y1)**2]
return length
def rect_points(rect_contour): #Finding corner points of rectangle extracted to further find wrapped perspective of the same in step 1
perimeter = cv2.arcLength(rect_contour, True)
approx = cv2.approxPolyDP(rect_contour, 0.02*perimeter, True)
print("APPROX")
print(type(approx))
print(approx)
cv2.drawContours(img, approx, -1, (100,10,55), 18)
cv2.drawContours(img, rect_contour, -1, (100,10,55), 1)
x, y, w, h = cv2.boundingRect(rect_contour)
print("printing x y w h")
print(x, y, w, h)
# Corner points of the rectangle further used to be used to wrap the rectangular section
point_1 = np.array([x, y])
point_2 = np.array([x+w, y])
point_3 = np.array([x, y+h])
point_4 = np.array([w, h])
paper_length = findlen(point_1, point_3)
paper_breadth = findlen(point_1, point_2)
if paper_breadth > paper_length: # here breadth should be smaller than length
temp = point_1.copy()
point_1 = point_3.copy()
point_3 = point_4.copy()
point_4 = point_2.copy()
point_2 = temp
corner_list = np.ndarray(shape=(4,2), dtype=np.int32)
np.append(corner_list, point_1)
np.append(corner_list, point_2)
np.append(corner_list, point_3)
np.append(corner_list, point_4)
print("corners list")
print(corner_list)
new_cornerlist = np.zeros((4, 1, 2), np.int32)
add = corner_list.sum(1)
# print(add)
# print(np.argmax(add))
new_cornerlist[0] = corner_list[np.argmin(add)] #[0,0]
new_cornerlist[3] = corner_list[np.argmax(add)] #[w,h]
diff = np.diff(corner_list, axis=1)
new_cornerlist[1] = corner_list[np.argmin(diff)] #[w,0]
new_cornerlist[2] = corner_list[np.argmax(diff)] #[h,0]
print(new_cornerlist.shape)
return new_cornerlist
def detect_blob(image): #Part of step 3
# Set our filtering parameters
# Initialize parameter setting using cv2.SimpleBlobDetector
params = cv2.SimpleBlobDetector_Params()
# Set Area filtering parameters
params.filterByArea = True
params.minArea = 90
# Set Circularity filtering parameters
params.filterByCircularity = True
params.minCircularity = 0.2
# Set Convexity filtering parameters
params.filterByConvexity = True
params.minConvexity = 0.7
# Set inertia filtering parameters
params.filterByInertia = True
params.minInertiaRatio = 0.05
# Create a detector with the parameters
detector = cv2.SimpleBlobDetector_create(params)
# Detect blobs
keypoints = detector.detect(image)
# Draw blobs on our image as red circles
blank = np.zeros((1, 1))
blobs = cv2.drawKeypoints(image, keypoints, blank, (0, 0, 255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
number_of_blobs = len(keypoints)
text = "Number of Circular Blobs: " + str(len(keypoints))
cv2.putText(blobs, text, (20, 550), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 100, 255), 2)
cv2.imshow("Filtering Circular Blobs Only", blobs)
cv2.waitKey(0)
img_path = 'bubblesheet.jpg' #Path of the input bubble image
img = cv2.imread(img_path)
print(img.shape) # Original size is 1600 * 1200 and 3 color channels
img_width = 600
img_height = 600
img = cv2.resize(img, (img_width, img_height), interpolation=cv2.INTER_AREA)
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # grayscaled image Grayscaling bcoz further for edge detection we only require intensity info., and similar intensity pixels to detect the contours
img_blur = cv2.GaussianBlur(img_gray, (5,5), 0) # blurred image
cv2.imshow('Blurred', img_blur)
img_canny = cv2.Canny(img_blur, 20, 110) # Edge detection on processed image using Canny edge detection , binary thresholding could have been an alternative (i.e If the pixel value is smaller than the threshold, it is set to 0, otherwise it is set to a maximum value. )
cv2.imshow('Edge detection', img_canny)
contours, heirarchy = cv2.findContours(img_canny, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE) # Find contours.
img_contours = img.copy()
cv2.drawContours(img_contours, contours, -1, (0,255,0), 1) #parameters are (image, contours, countour_idx, contour_color, contour_thickness) . contour_idx is -1 for all contours
cv2.imshow('Contours', img_contours)
rect_contours = extract_rect(contours)
cv2.drawContours(img, rect_contours[1], -1, (0,255,0), 1)
wrap_points = rect_points(rect_contours[2])
if wrap_points.size != 0:
cv2.drawContours(img, wrap_points, -1, (0,0,255), 12)
wrap_points = rect_points(rect_contours[2])
cv2.drawContours(img, wrap_points, -1, (0,0,255), 12)
warp_img_width = int(img_width)
warp_img_height = int(img_height)
warp_from = np.float32(wrap_points)
warp_to = np.float32([[0,0], [warp_img_width, 0], [0, warp_img_height], [warp_img_width, warp_img_height]])
transformation_matrix = cv2.getPerspectiveTransform(warp_from, warp_to)
img_warp = cv2.warpPerspective(img, transformation_matrix, (warp_img_height, warp_img_height))
cv2.imshow('Wrapped Perspective', img_warp)
img_warp_gray = cv2.cvtColor(img_warp, cv2.COLOR_BGR2GRAY)
#Using adaptive threshold to get the best threshold value
img_thresh = cv2.adaptiveThreshold(img_warp_gray, 200, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 11, 2)
cv2.imshow('img_thresh', img_thresh)
detect_blob(img_thresh) #Attempt to Extract each bubble from the image
cv2.imshow('Original', img)
cv2.waitKey(0)
I'm having an image and i want to crop only the number inside with out the lines bounding around image. Here is the sample image:
As you can see there is 2 lines on the right and bottom of the image, i want to crop only number 8, if nothing inside, i will return None for the result, but with my code, it also return the area which include these line. So is there anyway to fix this?
Result i got:
Or with empty image, i got:
Here is my code:
import cv2
def process(image, readFile=True, returnType='binary'):
out_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Performing OTSU threshold
ret, thresh1 = cv2.threshold(out_gray, 220, 255, cv2.THRESH_OTSU + cv2.THRESH_BINARY_INV)
# Specify structure shape and kernel size.
# Kernel size increases or decreases the area
# of the rectangle to be detected.
# A smaller value like (10, 10) will detect
# each word instead of a sentence.
rect_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (8,8))
# Appplying dilation on the threshold image
dilation = cv2.dilate(thresh1, rect_kernel, iterations = 1)
# Finding contours
contours, hierarchy = cv2.findContours(dilation, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
if len(contours) <= 0:
return False
contours, boundingBoxes = sort_contours(contours, 'left-to-right')
# Creating a copy of image
im2 = img.copy()
idata = 0
# Looping through the identified contours
# Then rectangular part is cropped and passed on
# to pytesseract for extracting text from it
# Extracted text is then written into the text file
i = 1
minHeight = 10
minWidth = 10
result = []
for cnt in contours:
x, y, w, h = cv2.boundingRect(cnt)
# Drawing a rectangle on copied image
rect = cv2.rectangle(im2, (x, (y-1)), (x + w, y + h + 1), (0, 255, 0), 1)
cv2.imwrite(f'{constant.__RESULT_PATH}/cutted123withbounding.png', im2)
# Cropping the text block for giving input to OCR
cropped = im2[y:y + h, x:x + w]
if w >= minWidth and h >= minHeight:
result.append(thresh1)
i = i + 1
return result
I want to mask human fingernails (fingernails white and everything including the hand is black). I do simple image operations then Canny edge detection after I smoothen the image then find contours to give internal contours white color which would be fingernails.
My problem is that when fingernails are painted it is quite easy to detect however when there is no paint it becomes really complicated and the program has to get 50 images and save outputs to a certain folder.
I am confused about how to proceed, if anybody did something similar I would appreciate some help.
import cv2
import numpy as np
import matplotlib.pyplot as plt
def display_img(img):
fig = plt.figure(figsize = (12,10))
ax = fig.add_subplot(111)
plt.imshow(img,cmap='gray')
img = cv2.imread('nail2.png')
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
blur = cv2.blur(gray,ksize=(1,1))
kernel = np.ones((5,5),np.uint8)
display_img(blur)
med = np.median(gray)
gradient = cv2.Laplacian(blur,cv2.CV_64F)
gradient = cv2.convertScaleAbs(gradient)
plt.imshow(gradient,'gray')
lower = int(max(0,0.7*med))
upper = int(min(255,1.3*med))
edges = cv2.Canny(blur,lower,upper)
display_img(edges)
edges = cv2.GaussianBlur(edges, (11, 11), 0) # smoothing before applying threshold
display_img(edges)
image, contours, hierarchy = cv2.findContours(edges, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)
# Create empty array to hold internal contours
image_internal = np.zeros(image.shape)
# Iterate through list of contour arrays
for i in range(len(contours)):
# If third column value is NOT equal to -1 than its internal
if hierarchy[0][i][3] != -1:
# Draw the Contour
cv2.drawContours(image_internal, contours, i, 255, -1)
display_img(image_internal)
below is a good result:
some bad result even though fingers have pink paint:
Well, you have a big light and scale problem in these two images. But a possible solution is to segment the color channels and look for blobs.
Then you can segment with blob params.
The code you can try here:
import cv2
import numpy as np
fra = cv2.imread('nails.png')
height, width, channels = fra.shape
src = cv2.medianBlur(fra, 21)
hsv = cv2.cvtColor(src, cv2.COLOR_BGR2HSV_FULL)
mask = cv2.inRange(hsv, np.array([0, 0, 131]), np.array([62, 105, 255]))
mask = cv2.erode(mask, None, iterations=8)
mask = cv2.dilate(mask, None, iterations=8)
params = cv2.SimpleBlobDetector_Params()
params.filterByArea = True
params.minArea = int((height * width) / 500)
params.maxArea = int((height * width) / 10)
params.filterByCircularity = True
params.minCircularity = 0.5
params.filterByConvexity = True
params.minConvexity = 0.5
params.filterByInertia = True
params.minInertiaRatio = 0.01
detector = cv2.SimpleBlobDetector_create(params)
key_points = detector.detect(255 - mask)
vis = cv2.bitwise_and(hsv, hsv, mask=mask)
vis = cv2.addWeighted(src, 0.2, vis, 0.8, 0)
cv2.drawKeypoints(vis, key_points, vis, (0, 0, 255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
for kp in key_points:
cv2.drawMarker(vis, (int(kp.pt[0]), int(kp.pt[1])), color=(0, 255, 0), markerType=cv2.MARKER_CROSS, thickness=3)
cv2.imshow("VIS", vis)
cv2.imwrite('nails_detected.png', vis)
cv2.waitKey(0)
cv2.destroyAllWindows()
Good luck!
I have this original image:
then I have applied the following code to
Converted the Original image to HSV image
Then using cv2.findContours() I have made a list containing all the contours.
Then i have removed all the contours of area less than 30.
Then I got the following image:
What I want is to remove the boundary from the resulting image it is of no use (outer boundary of leaf).I only need the inner patches of the leaf.
This is the code i used.
import cv2
import numpy as np
img = cv2.imread('Apple___Blackrot30.JPG')
hsv = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
lower_gr = np.array([25,0,0])
upper_gr = np.array([90,255,255])
mask = cv2.inRange(hsv,lower_gr,upper_gr)
mask=~mask
res = cv2.bitwise_and(img,img,mask = mask)
blur = cv2.bilateralFilter(res,9,75,75)
im2,cont,_ = cv2.findContours(mask,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
areas = [cv2.contourArea(each_conts) for each_conts in cont]
cont_counter = 0
for each_conts in areas:
if each_conts < 30:
cv2.fillPoly(im2, pts =[cont[cont_counter]], color=(0,0,0))
if each_conts > 1024:
cv2.drawContours(mask, cont[cont_counter], 0, (255,255,255), -1)
cont_counter+=1
cv2.imshow('cn',im2)
You can use the concept of hierarchy of contours to solve this problem. But there is a caveat, all your images must be the same as the one in the question.
I just added some additional stuff to your code.
Code:
img2 = img.copy()
im2, cont, hierarchy = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
l = []
for e, h in enumerate(hierarchy[0]):
#print (e, h[3])
if h[3] == -1:
l.append(e)
for i in l:
if cv2.contourArea(cont[i]) < 1000:
cv2.drawContours(img2, [cont[i]], -1, (0, 255, 255), 2)
cv2.imshow('img2', img2)
Result:
hierarchy returns an array expressing the parent-child relationship of contours. As per the documentation link,
it as an array of four values : [Next, Previous, First_Child, Parent].
In the hierarchy array I scanned the Parent column (4th column) to see whether it has no parent contours (-1) and drew them
I assume you only need the inner spots inside a leaf.
Segment using the otsu algorithm
apply the flood-fill operation to ensure you capture all leaf pixels
Extract only the inner contour
All can simply be done using opencv below are the codes:
import cv2
import numpy as np
def flood_fill_binary(binary):
hh = binary.shape[0]
ww = binary.shape[1]
xx = 10
yy = 10
black = [0,0,0]
binary = cv2.copyMakeBorder(binary,10,10,10,10,cv2.BORDER_CONSTANT,value=black)
im_floodfill = binary.copy()
h, w = binary.shape[:2]
mask = np.zeros((h+2, w+2), np.uint8)
cv2.floodFill(im_floodfill, mask, (0,0), 255)
im_floodfill_inv = cv2.bitwise_not(im_floodfill)
im_out = binary | im_floodfill_inv
crop_og = im_out[yy:yy+hh,xx:xx+ww]
return crop_og
def leaf_spots_detector(image):
image = image.astype('uint8')
hh = image.shape[0]
ww = image.shape[1]
xx = 10
yy = 10
#kernel = np.ones((3,3),np.uint8)
grayed_image = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
_, segmented = cv2.threshold(grayed_image,0, 255,
cv2.THRESH_BINARY+cv2.THRESH_OTSU)
segmented = flood_fill_binary(segmented)
segmented = cv2.copyMakeBorder(segmented,xx,xx,yy,yy,cv2.BORDER_CONSTANT,value=
[0,0,0])
major = cv2.__version__.split('.')[0]
if major == '3':
ret, contours, hierarchy = cv2.findContours(segmented, cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE)
else:
contours, hierarchy = cv2.findContours(segmented, cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE)
print(hierarchy.shape)
image_external = np.zeros(segmented.shape, segmented.dtype)
for i in range(1,len(contours)):
#if hierarchy[0][i][3] == -1:
cv2.drawContours(image_external, contours, i,(225,255,255), -1)
image_external = image_external[yy:yy+hh,xx:xx+ww]
#image_external = cv2.dilate(image_external,kernel,iterations = 1)
return image_external
image = cv2.imread('image/path.png')
leaf_spots = leaf_spots_detector(image)
cv2.imshow("detected spots", leaf_spots)
cv2.waitKey(0)
cv2.destroyAllWindow()
One of the first processing steps in a tool I'm coding is to find the coordinates of the outside corners of 4 big black squares. They will then be used to do a homographic transform, in order to deskew / unrotate the image (a.k.a perspective transform), to finally get a rectangular image. Here is an example of - rotated and noisy - input (download link here):
To keep the big squares only, I'm using morphological transformations like closing/opening:
import cv2, numpy as np
img = cv2.imread('rotatednoisy-cropped.png', cv2.IMREAD_GRAYSCALE)
kernel = np.ones((30, 30), np.uint8)
img = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel)
cv2.imwrite('output.png', img)
Input file (download link):
Output, after morphological transform:
Problem: the output squares are not square anymore, and therefore the coordinates of the top left corner of the square will be not precise at all!
I could reduce the kernel size, but then it would keep more unwanted small elements.
Question: how to get a better detection of the corners of the squares?
Note:
As a morphological closing is just a dilatation + an erosion, I found the culprit:
import cv2, numpy as np
img = cv2.imread('rotatednoisy-cropped.png', cv2.IMREAD_GRAYSCALE)
kernel = np.ones((30, 30), np.uint8)
img = cv2.dilate(img, kernel, iterations = 1)
After this step, it's still ok:
Then
img = cv2.erode(img, kernel, iterations = 1)
gives
and it's not ok anymore!
See this link for detailed explanation on how to de-skew an image.
import cv2
import numpy as np
def corners(box):
cx,cy,w,h,angle = box[0][0],box[0][1],box[1][0],box[1][1],box[2]
CV_PI = 22./7.
_angle = angle*CV_PI/180.;
b = np.cos(_angle)*0.5;
a = np.sin(_angle)*0.5;
pt = []
pt.append((int(cx - a*h - b*w),int(cy + b*h - a*w)));
pt.append((int(cx + a*h - b*w),int(cy - b*h - a*w)));
pt.append((int(2*cx - pt[0][0]),int(2*cy - pt[0][1])));
pt.append((int(2*cx - pt[1][0]),int(2*cy - pt[1][1])));
return pt
if __name__ == '__main__':
image = cv2.imread('image.jpg',cv2.IMREAD_UNCHANGED)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
n = 3
sigma = 0.3 * (n/2 - 1) + 0.8
gray = cv2.GaussianBlur(gray, ksize=(n,n), sigmaX=sigma)
ret,binary = cv2.threshold(gray, 0, 255, cv2.THRESH_OTSU+cv2.THRESH_BINARY)
_,contours,_ = cv2.findContours(binary, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
contours.sort(key=lambda x: len(x), reverse=True)
points = []
for i in range(0,4):
shape = cv2.approxPolyDP(contours[i], 0.05*cv2.arcLength(contours[i],True), True)
if len(shape) == 4:
points.append(shape)
points = np.array(points,dtype=np.int32)
points = np.reshape(points, (-1,2))
box = cv2.minAreaRect(points)
pt = corners(box)
for i in range(0,4):
image = cv2.line(image, (pt[i][0],pt[i][1]), (pt[(i+1)%4][0],pt[(i+1)%4][1]), (0,0,255))
(h,w) = image.shape[:2]
(center) = (w//2,h//2)
angle = box[2]
if angle < -45:
angle = (angle+90)
else:
angle = -angle
M = cv2.getRotationMatrix2D(center, angle, 1.0)
rotated = cv2.warpAffine(image, M, (w,h), flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_CONSTANT)
cv2.imshow('image', image)
cv2.imshow('rotated', rotated)
cv2.waitKey(0)
cv2.destroyAllWindows()
You could try by searching and filtering out your specific contours (black rectangles) and sorting them with a key. Then select the extreme point for each contour (left, right, top, bottom) and you will get the points. Note that this approach is ok for this picture only and if the picture was roteted in other direction, you would have to change the code accordingly. I am not an expert but I hope this helps a bit.
import numpy as np
import cv2
img = cv2.imread("rotate.jpg")
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, threshold = cv2.threshold(gray,150,255,cv2.THRESH_BINARY)
im, contours, hierarchy = cv2.findContours(threshold,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
contours.sort(key=lambda c: np.min(c[:,:,1]))
j = 1
if len(contours) > 0:
for i in range(0, len(contours)):
size = cv2.contourArea(contours[i])
if 90 < size < 140:
if j == 1:
c1 = contours[i]
j += 1
elif j == 2:
c2 = contours[i]
j += 1
elif j == 3:
c3 = contours[i]
j += 1
elif j == 4:
c4 = contours[i]
break
Top = tuple(c1[c1[:, :, 1].argmin()][0])
Right = tuple(c2[c2[:, :, 0].argmax()][0])
Left = tuple(c3[c3[:, :, 0].argmin()][0])
Bottom = tuple(c4[c4[:, :, 1].argmax()][0])
cv2.circle(img, Top, 2, (0, 255, 0), -1)
cv2.circle(img, Right, 2, (0, 255, 0), -1)
cv2.circle(img, Left, 2, (0, 255, 0), -1)
cv2.circle(img, Bottom, 2, (0, 255, 0), -1)
cv2.imshow("Image", img)
cv2.waitKey(0)
Result:
You can extract the squares as single blobs after binarization with a suitable threshold, and select the appropriate ones based on size. You can also first denoise with a median filter if you want.
Then a tight rotated bounding rectangle will give you the corners (you can obtain it by running Rotating Calipers on the Convex Hull).