Human fingernail segmanation without training a model - python

I want to mask human fingernails (fingernails white and everything including the hand is black). I do simple image operations then Canny edge detection after I smoothen the image then find contours to give internal contours white color which would be fingernails.
My problem is that when fingernails are painted it is quite easy to detect however when there is no paint it becomes really complicated and the program has to get 50 images and save outputs to a certain folder.
I am confused about how to proceed, if anybody did something similar I would appreciate some help.
import cv2
import numpy as np
import matplotlib.pyplot as plt
def display_img(img):
fig = plt.figure(figsize = (12,10))
ax = fig.add_subplot(111)
plt.imshow(img,cmap='gray')
img = cv2.imread('nail2.png')
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
blur = cv2.blur(gray,ksize=(1,1))
kernel = np.ones((5,5),np.uint8)
display_img(blur)
med = np.median(gray)
gradient = cv2.Laplacian(blur,cv2.CV_64F)
gradient = cv2.convertScaleAbs(gradient)
plt.imshow(gradient,'gray')
lower = int(max(0,0.7*med))
upper = int(min(255,1.3*med))
edges = cv2.Canny(blur,lower,upper)
display_img(edges)
edges = cv2.GaussianBlur(edges, (11, 11), 0) # smoothing before applying threshold
display_img(edges)
image, contours, hierarchy = cv2.findContours(edges, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)
# Create empty array to hold internal contours
image_internal = np.zeros(image.shape)
# Iterate through list of contour arrays
for i in range(len(contours)):
# If third column value is NOT equal to -1 than its internal
if hierarchy[0][i][3] != -1:
# Draw the Contour
cv2.drawContours(image_internal, contours, i, 255, -1)
display_img(image_internal)
below is a good result:
some bad result even though fingers have pink paint:

Well, you have a big light and scale problem in these two images. But a possible solution is to segment the color channels and look for blobs.
Then you can segment with blob params.
The code you can try here:
import cv2
import numpy as np
fra = cv2.imread('nails.png')
height, width, channels = fra.shape
src = cv2.medianBlur(fra, 21)
hsv = cv2.cvtColor(src, cv2.COLOR_BGR2HSV_FULL)
mask = cv2.inRange(hsv, np.array([0, 0, 131]), np.array([62, 105, 255]))
mask = cv2.erode(mask, None, iterations=8)
mask = cv2.dilate(mask, None, iterations=8)
params = cv2.SimpleBlobDetector_Params()
params.filterByArea = True
params.minArea = int((height * width) / 500)
params.maxArea = int((height * width) / 10)
params.filterByCircularity = True
params.minCircularity = 0.5
params.filterByConvexity = True
params.minConvexity = 0.5
params.filterByInertia = True
params.minInertiaRatio = 0.01
detector = cv2.SimpleBlobDetector_create(params)
key_points = detector.detect(255 - mask)
vis = cv2.bitwise_and(hsv, hsv, mask=mask)
vis = cv2.addWeighted(src, 0.2, vis, 0.8, 0)
cv2.drawKeypoints(vis, key_points, vis, (0, 0, 255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
for kp in key_points:
cv2.drawMarker(vis, (int(kp.pt[0]), int(kp.pt[1])), color=(0, 255, 0), markerType=cv2.MARKER_CROSS, thickness=3)
cv2.imshow("VIS", vis)
cv2.imwrite('nails_detected.png', vis)
cv2.waitKey(0)
cv2.destroyAllWindows()
Good luck!

Related

Line detection from a picture

picture to be detected - different color
For a picture like above, the lines are blue, I use mask to get the contours of these lines, please see below code:
img = cv2.imread("./more5.png")#"https://i.stack.imgur.com/LiLPv.png"
blueLower = np.array([50,50,50])
blueUpper = np.array([130,255,255])
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
blue_mask = cv2.inRange(hsv, blueLower, blueUpper)
blue = cv2.bitwise_and(img ,img ,mask=blue_mask)
contours, hierarchy = cv2.findContours(preprocess(blue), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
Lines detected
complete code:
import numpy as np
import cv2
def preprocess(img):
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img_blur = cv2.GaussianBlur(img_gray, (5, 5), 1)
img_canny = cv2.Canny(img_blur, 50, 50) # edge cascade
kernel = np.ones((3, 3))
img_dilate = cv2.dilate(img_canny, kernel, iterations=1)
img_erode = cv2.erode(img_dilate, kernel, iterations=1)
return img_erode
# BGR to HSV to LAB to ...
def find_tip(points, convex_hull):
length = len(points)
indices = np.setdiff1d(range(length), convex_hull)
for i in range(2):
j = indices[i] + 2
if j > length - 1:
j = length - j
if np.all(points[j] == points[indices[i - 1] - 2]):
return tuple(points[j])
img = cv2.imread("./more5.png")
blueLower = np.array([50,50,50])
blueUpper = np.array([130,255,255])
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
blue_mask = cv2.inRange(hsv, blueLower, blueUpper)
blue = cv2.bitwise_and(img ,img ,mask=blue_mask)
contours, hierarchy = cv2.findContours(preprocess(blue), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
print(f"Contours size {len(contours)}") #
for cnt in contours:
peri = cv2.arcLength(cnt, True) #
approx = cv2.approxPolyDP(cnt, 0.004 * peri, True) #
hull = cv2.convexHull(approx, returnPoints=False)
sides = len(hull) #
if 6 > sides > 3 and sides + 2 == len(approx):
arrow_tip = find_tip(approx[:,0,:], hull.squeeze())
if arrow_tip:
cv2.drawContours(img, [cnt], -1, (0, 255, 0), 3)
cv2.circle(img, arrow_tip, 3, (0, 0, 255), cv2.FILLED)
cv2.imshow("Image", img)
cv2.waitKey(0)
However, if the line and the connected shape is the SAME color (please see the picture below), this method does NOT work anymore, how can I detect the lines from such a picture? Thanks for any ideas!
picture to be detected - same color
If your graph is a tree (it does not has closed circuits), then only the lines are connected to the same area on both sides.
If you paint the external area of the same color, only the lines will have the same color on both sides
Then you can do a convolution to have the lines averaged with the external space color, and the other lines averaged with the internal spaces color
import matplotlib.pyplot as plt
import numpy as np
#download the example image from this webpage
from PIL import Image as Pim
from io import BytesIO
import requests
photoURL="https://i.stack.imgur.com/LiLPv.png"
response = requests.get(photoURL)
image = np.array(Pim.open(BytesIO(response.content)).convert('L')) # Convert to greyscale
#mask to only 2 colors
gray=128
black=0
Monochrome = lambda t: gray if t!=image[0,0] else black #If color<>upper left color, then it is 0, else is 1
vfunc = np.vectorize(Monochrome)
image=vfunc(image)
#fill the external space with the same color as the upper left pixel
import cv2
UpperLeftPixel=(0,0)
AdjacentColor=255#White
imajeAdjacent=cv2.floodFill(image, None, UpperLeftPixel, AdjacentColor)
#convolution to average colors of lines with neighbor spaces
averagingFilter=np.ones(shape=(9,9))/9**2
import scipy
from scipy.signal import convolve2d as conv2d
image=conv2d(image,averagingFilter, mode="same")
plt.imshow(image)
plt.show()

Not enough background filtering

I am trying to filter the background of images presenting electric cables. I tried to do the following:
Transform from color to gray
Apply cv2.Laplacian or 2 times of cv2.Sobel for finding edges in both directions.
Apply thresholding cv2.THRESH_BINARY(_INV), cv2.THRESH_OTSU
Lastly, I tried to find edges with 'filtered' images using cv2.Canny together with cv2.HoughLinesP
Overall, the results aren't satisfying at all. I will give an example of 2 images:
And the output of my script:
I also played with the values in config, but the results weren't different much.
Here's the little script I managed to do:
import cv2
import matplotlib.pyplot as plt
import numpy as np
def img_show(images, cmap=None):
fig = plt.figure(figsize=(17, 10))
root = 3 # len(images) ** 0.5
for i, img in enumerate(images):
ax = fig.add_subplot(root, root, i + 1)
ax.imshow(img, cmap=cmap[i])
plt.show()
class Config:
scale = 0.4
min_threshold = 120
max_threshold = 200
canny_min_threshold = 100
canny_max_threshold = 200
config = Config()
def find_lines(img, rgb_img):
dst = cv2.Canny(img, config.canny_min_threshold, config.canny_max_threshold)
cdstP = np.copy(rgb_img)
lines = cv2.HoughLinesP(dst, 1, np.pi / 180, 150, None, 0, 0)
lines1 = lines[:, 0, :]
for x1, y1, x2, y2 in lines1[:]:
cv2.line(cdstP, (x1, y1), (x2, y2), (255, 0, 0), 5)
return cdstP
if __name__ == "__main__":
bgr_img = cv2.imread('DJI_0009.JPG')
bgr_img = cv2.resize(bgr_img, (0, 0), bgr_img, config.scale, config.scale)
rgb_img = cv2.cvtColor(bgr_img, cv2.COLOR_BGR2RGB)
gray_img = cv2.cvtColor(bgr_img, cv2.COLOR_BGR2GRAY)
# _, threshold = cv2.threshold(gray_img, config.min_threshold, config.max_threshold, cv2.THRESH_BINARY)
# laplacian = cv2.Laplacian(rgb_img, cv2.CV_8UC1)
sobelx = cv2.Sobel(gray_img, cv2.CV_8UC1, 1, 0)
sobely = cv2.Sobel(gray_img, cv2.CV_8UC1, 0, 1)
blended = cv2.addWeighted(src1=sobelx, alpha=0.5, src2=sobely, beta=0.5, gamma=0)
_, threshold = cv2.threshold(blended, config.min_threshold, config.max_threshold,
cv2.THRESH_BINARY | cv2.THRESH_OTSU)
p1 = find_lines(threshold, rgb_img)
p2 = find_lines(blended, rgb_img)
p3 = find_lines(gray_img, rgb_img)
plots = [rgb_img, p1, p2, p3]
cmaps = [None] + ['gray'] * (len(plots) - 1)
img_show(plots, cmaps)
I am assuming I need to do much better filtring. However, I also tried image segmentation, but the results weren't promising at all.
Any ideas on how to improve this?
Thanks
Here is one way to do that in Python/OpenCV. I threshold, then optionally clean with morphology. Then get the contours and for each contour compute its rotated rectangle. Then get the dimensions of the rotated rectangle and compute the aspect ratio (largest dimension / smallest dimension) and optionally the area. Then I threshold on the aspect ratio (and optionally the area) and keep only those contours that pass)
Input:
import cv2
import numpy as np
image = cv2.imread("DCIM-100-MEDIA-DJI-0009-JPG.jpg")
hh, ww = image.shape[:2]
# convert to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# create a binary thresholded image
thresh = cv2.threshold(gray, 64, 255, cv2.THRESH_BINARY)[1]
# invert so line is white on black background
thresh = 255 - thresh
# apply morphology
kernel = np.ones((11,11), np.uint8)
clean = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)
# get external contours
contours = cv2.findContours(clean, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = contours[0] if len(contours) == 2 else contours[1]
area_thresh = ww / 2
aspect_thresh = ww / 30
print(area_thresh,aspect_thresh)
print('')
result = image.copy()
for c in contours:
# get rotated rectangle from contour
# get its dimensions
rotrect = cv2.minAreaRect(c)
(center), (dim1,dim2), angle = rotrect
maxdim = max(dim1,dim2)
mindim = min(dim1,dim2)
area = dim1 * dim2
if mindim != 0:
aspect = maxdim / mindim
#print(area, aspect)
#if area > area_thresh and aspect > aspect_thresh:
if aspect > aspect_thresh:
# draw contour on input
cv2.drawContours(result,[c],0,(0,0,255),3)
print(area, aspect)
# save result
cv2.imwrite("DCIM-100-MEDIA-DJI-0009-JPG_thresh.jpg",thresh)
cv2.imwrite("DCIM-100-MEDIA-DJI-0009-JPG_clean.jpg",clean)
cv2.imwrite("DCIM-100-MEDIA-DJI-0009-JPG_result.jpg",result)
# display result
cv2.imshow("thresh", thresh)
cv2.imshow("clean", clean)
cv2.imshow("result", result)
cv2.waitKey(0)
cv2.destroyAllWindows()
Thresholded image:
Morphology cleaned image:
Result image:

Open CV Watershed not segmenting oval objects properly

Attemping to create a way to process images to count different types of tablets. The following code has been working well for circular objects, however oval shapes are creating issues that I cant find a workaround for.
kernel = np.ones((5,5),np.uint8)
image = cv2.imread('sample.jpg')
shifted = cv2.GaussianBlur(image, (15, 15), 1)
shifted = cv2.pyrMeanShiftFiltering(shifted, 21, 51)
shifted = cv2.erode(shifted,kernel,iterations=1)
shifted = cv2.dilate(shifted,kernel,iterations=1)
cv2.imwrite("step1.jpg", shifted)
gray = cv2.cvtColor(shifted, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray, 0, 255,
cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
cv2.imwrite("step2.jpg", thresh)
thresh = cv2.morphologyEx(thresh,cv2.MORPH_OPEN,kernel, iterations = 2)
cv2.imwrite("step3.jpg", thresh)
thresh = cv2.bitwise_not(thresh)
thresh = cv2.erode(thresh,kernel,iterations=1)
cv2.imwrite("step4.jpg", thresh)
D = ndimage.distance_transform_edt(thresh)
localMax = peak_local_max(D, indices=False, min_distance=10,
labels=thresh)
markers = ndimage.label(localMax, structure=np.ones((3, 3)))[0]
labels = watershed(-D, markers, mask=thresh)
print("[INFO] {} unique segments found".format(len(np.unique(labels)) - 1))
for label in np.unique(labels):
if label == 0:
continue
mask = np.zeros(gray.shape, dtype="uint8")
mask[labels == label] = 255
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
c = max(cnts, key=cv2.contourArea)
((x, y), r) = cv2.minEnclosingCircle(c)
cv2.circle(image, (int(x), int(y)), int(r), (0, 255, 0), 2)
cv2.putText(image, "#{}".format(label), (int(x) - 10, int(y)),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
cv2.imwrite("step5.jpg", image)
cv2.waitKey(0)
Image that is being used is :
https://imgur.com/a/1U49DeT
Output after filtering yields :
https://imgur.com/a/vXwrWlG
Any teaching points as to how to fix this would be greatly appreciated.
I think there is a better way to use the watershed operator.
It relies on having a good gradient, but if the images are similar to this one, you should be able to do this effectively. Also, there are very powerful edge detectors today, much better than the one I used on this demo.
import cv2
import numpy as np
import higra as hg
from skimage.segmentation import relabel_sequential
import matplotlib.pyplot as plt
def main():
img_path = "pills.jpg"
img = cv2.imread(img_path)
img = cv2.resize(img, (256, 256))
img = cv2.GaussianBlur(img, (9, 9), 0)
edges = cv2.Canny(img, 100, 100)
size = img.shape[:2]
graph = hg.get_4_adjacency_graph(size)
edge_weights = hg.weight_graph(graph, edges, hg.WeightFunction.mean)
tree, altitudes = hg.watershed_hierarchy_by_area(graph, edge_weights)
segments = hg.labelisation_horizontal_cut_from_threshold(tree, altitudes, 500)
segments, _, _ = relabel_sequential(segments)
print('The number of pills is ', segments.max() - 1)
plt.imshow(segments)
plt.show()
if __name__ == "__main__":
main()
Initially, I resize the image to speed up the computation and apply a blur to reduce the background gradient. I detect its edges (gradient) and create a graph with it as edge weights; then I compute the watershed hierarchy ordered by area and threshold it obtaining the connected component at that level, from this you can count the number of segments.

How to do remove jagged edges of an object in an image with python and opencv

I have tried to search and found two solutions that could solve the problem I have but are not working by my side. I want to remove the jagged edges or smooth the image attached. I have tried to implement the following codes
import cv2
import numpy as np
import os
os.chdir("F:/Examples")
image = cv2.imread("image1.jpeg")
blur = cv2.GaussianBlur(image, (21, 21), 0)
mask = np.zeros(image.shape[:2], dtype=np.uint8)
output = np.where(mask==np.array([255, 255, 255]), blur, image)
cv2.imwrite("", output)
The above codes returns unchanged image as if the applied mask didn't work. Here is the input image
I also tried to implement some solutions from different links including (How to blur the image according to segmentation map) without success. Any help will be acknowledged
While the edges or the external contour should be clear as in this image
I almost managed to solve it.
The main problem is that the contour is partially curved, and partially with straight lines.
Explanations are in the comments:
import numpy as np
import cv2
from scipy.interpolate import splprep, splev
im = cv2.imread('image1.jpeg')
bk = im.copy()
# Fill background with black color
cv2.floodFill(im, None, seedPoint=(1,1), newVal=(0, 0, 0), loDiff=(5, 5, 5), upDiff=(5, 5, 5))
gray = cv2.cvtColor(im, cv2.COLOR_RGB2GRAY)
ret, thresh_gray = cv2.threshold(gray, 5, 255, cv2.THRESH_BINARY)
# Use "open" morphological operation for removing small contours (noise)
thresh_gray = cv2.morphologyEx(thresh_gray, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5,5)));
# Background
bk[(thresh_gray > 0)] = 0
bk = cv2.morphologyEx(bk, cv2.MORPH_DILATE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (20,20)));
#cv2.imshow('bk', bk)
# Foreground
fg = im.copy()
tmm_fg = cv2.morphologyEx(fg, cv2.MORPH_DILATE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (20,20)));
fg_gray = cv2.cvtColor(fg, cv2.COLOR_RGB2GRAY)
fg[(fg_gray==0)] = tmm_fg[(fg_gray==0)]
#thresh_gray = cv2.morphologyEx(thresh_gray, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (50,50)));
# Find contours (there is only one contour)
# _, contours, _ = cv2.findContours(thresh_gray, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) # 3 outputs works only in OpenCV 3. [-2:] is used for compatibility.
contours, _ = cv2.findContours(thresh_gray, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[-2:] # https://stackoverflow.com/questions/48291581/how-to-use-cv2-findcontours-in-different-opencv-versions
c = contours[0]
# Smooth contour
# https://agniva.me/scipy/2016/10/25/contour-smoothing.html
x,y = c.T
x = x.tolist()[0]
y = y.tolist()[0]
tck, u = splprep([x,y], u=None, s=1.0, per=1)
u_new = np.linspace(u.min(), u.max(), 150)
x_new, y_new = splev(u_new, tck, der=0)
res_array = [[[int(i[0]), int(i[1])]] for i in zip(x_new,y_new)]
smoothened = np.asarray(res_array, dtype=np.int32)
# Build a mask
mask = np.zeros_like(thresh_gray)
cv2.drawContours(mask, [smoothened], -1, 255, -1)
# For testig
test_im = cv2.cvtColor(thresh_gray, cv2.COLOR_GRAY2RGB)
cv2.drawContours(test_im, [smoothened], 0, (0, 255, 0), 1)
res = bk
res[(mask > 0)] = fg[(mask > 0)]
cv2.imshow('test_im', test_im)
cv2.imshow('res', res)
cv2.waitKey(0)
cv2.destroyAllWindows()

Python OpenCV - Extrapolating the largest rectangle off of a set of contour points

I'm trying to make an OpenCV detect a bed in the image. I am running the usual Grayscale, Blur, Canny, and I've tried Convex Hull. However, since there's quite a number of "noise" which gives extra contours and messes up the object detection. Because of this, I am unable to detect the bed properly.
Here is the input image as well as the Canny Edge Detection result:
As you can see, it's almost there. I have the outline of the bed already, albeit, that the upper right corner has a gap - which is preventing me from detecting a closed rectangle.
Here's the code I'm running:
import cv2
import numpy as np
def contoursConvexHull(contours):
print("contours length = ", len(contours))
print("contours length of first item = ", len(contours[1]))
pts = []
for i in range(0, len(contours)):
for j in range(0, len(contours[i])):
pts.append(contours[i][j])
pts = np.array(pts)
result = cv2.convexHull(pts)
print(len(result))
return result
def auto_canny(image, sigma = 0.35):
# compute the mediam of the single channel pixel intensities
v = np.median(image)
# apply automatic Canny edge detection using the computed median
lower = int(max(0, (1.0 - sigma) * v))
upper = int(min(255, (1.0 + sigma) *v))
edged = cv2.Canny(image, lower, upper)
# return edged image
return edged
# Get our image in color mode (1)
src = cv2.imread("bed_cv.jpg", 1)
# Convert the color from BGR to Gray
srcGray = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)
# Use Gaussian Blur
srcBlur = cv2.GaussianBlur(srcGray, (3, 3), 0)
# ret is the returned value, otsu is an image
##ret, otsu = cv2.threshold(srcBlur, 0, 255,
## cv2.THRESH_BINARY+cv2.THRESH_OTSU)
# Use canny
##srcCanny = cv2.Canny(srcBlur, ret, ret*2, 3)
srcCanny1 = auto_canny(srcBlur, 0.70)
# im is the output image
# contours is the contour list
# I forgot what hierarchy was
im, contours, hierarchy = cv2.findContours(srcCanny1,
cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE)
##cv2.drawContours(src, contours, -1, (0, 255, 0), 3)
ConvexHullPoints = contoursConvexHull(contours)
##cv2.polylines(src, [ConvexHullPoints], True, (0, 0, 255), 3)
cv2.imshow("Source", src)
cv2.imshow("Canny1", srcCanny1)
cv2.waitKey(0)
Since the contour of the bed isn't closed, I can't fit a rectangle nor detect the contour with the largest area.
The solution I can think of is to extrapolate the largest possible rectangle using the contour points in the hopes of bridging that small gap, but I'm not too sure how to proceed since the rectangle is incomplete.
Since you haven't provided any other examples, I provide an algorithm working with this case. But bare in mind that you will have to find ways of adapting it to however the light and background changes on other samples.
Since there is a lot of noise and a relatively high dynamic range, I suggest not to use Canny and instead use Adaptive Thresholding and Find Contours on that (it doesn't need edges as an input), that helps with choosing different threshold values for different parts of the image.
My result:
Code:
import cv2
import numpy as np
def clahe(img, clip_limit=2.0, grid_size=(8,8)):
clahe = cv2.createCLAHE(clipLimit=clip_limit, tileGridSize=grid_size)
return clahe.apply(img)
src = cv2.imread("bed.png")
# HSV thresholding to get rid of as much background as possible
hsv = cv2.cvtColor(src.copy(), cv2.COLOR_BGR2HSV)
lower_blue = np.array([0, 0, 120])
upper_blue = np.array([180, 38, 255])
mask = cv2.inRange(hsv, lower_blue, upper_blue)
result = cv2.bitwise_and(src, src, mask=mask)
b, g, r = cv2.split(result)
g = clahe(g, 5, (3, 3))
# Adaptive Thresholding to isolate the bed
img_blur = cv2.blur(g, (9, 9))
img_th = cv2.adaptiveThreshold(img_blur, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
cv2.THRESH_BINARY, 51, 2)
im, contours, hierarchy = cv2.findContours(img_th,
cv2.RETR_CCOMP,
cv2.CHAIN_APPROX_SIMPLE)
# Filter the rectangle by choosing only the big ones
# and choose the brightest rectangle as the bed
max_brightness = 0
canvas = src.copy()
for cnt in contours:
rect = cv2.boundingRect(cnt)
x, y, w, h = rect
if w*h > 40000:
mask = np.zeros(src.shape, np.uint8)
mask[y:y+h, x:x+w] = src[y:y+h, x:x+w]
brightness = np.sum(mask)
if brightness > max_brightness:
brightest_rectangle = rect
max_brightness = brightness
cv2.imshow("mask", mask)
cv2.waitKey(0)
x, y, w, h = brightest_rectangle
cv2.rectangle(canvas, (x, y), (x+w, y+h), (0, 255, 0), 1)
cv2.imshow("canvas", canvas)
cv2.imwrite("result.jpg", canvas)
cv2.waitKey(0)

Categories