I am trying to detect black shape on photo like this.
So far i have picture with shape but still on there is many lines and noise and from that i cannot use the findContours() because it's also mark the line. Can You give me some advice or help with this task. I will be so grateful for help!
Original image
Binary image
import cv2
import numpy as np
import imutils
def color_seg(choice):
if choice == 'blue':
lower_hue = np.array([100,30,30])
upper_hue = np.array([150,148,255])
elif choice == 'white':
lower_hue = np.array([0,0,0])
upper_hue = np.array([0,0,255])
elif choice == 'black':
lower_hue = np.array([0,0,0])
upper_hue = np.array([50,50,100])
return lower_hue, upper_hue
# Take each frame
frame = cv2.imread('11.jpg')
#frame = cv2.imread('images/road_1.jpg')
frame = imutils.resize(frame, height = 500)
chosen_color = 'black'
# Convert BGR to HSV
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# define range of a color in HSV
lower_hue, upper_hue = color_seg(chosen_color)
# Threshold the HSV image to get only blue colors
mask = cv2.inRange(hsv, lower_hue, upper_hue)
kernel = np.ones((5,5),np.uint8)
erosion = cv2.erode(mask,kernel,iterations = 10)
erosion = cv2.filter2D(mask,-1,kernel)
erosion = cv2.GaussianBlur(mask,(5,5),cv2.BORDER_DEFAULT)
cv2.imshow('frame',frame)
cv2.imshow('mask',mask)
cv2.waitKey(0)
You're on the right track. After obtaining your binary image you need to perform morphological operations to filter out noise and isolate the object. Afterwards, we can find contours then filter using contour approximation and contour area. We draw the detected region onto a blank mask then bitwise-and with the original image. Here's the steps:
Binary image
Morphological operations
Detected region in green
Isolated result after bitwise operations
Code
import numpy as np
import cv2
# Color threshold
image = cv2.imread('1.jpg')
original = image.copy()
blank = np.zeros(image.shape, dtype=np.uint8)
blur = cv2.GaussianBlur(image, (7,7), 0)
hsv = cv2.cvtColor(blur, cv2.COLOR_BGR2HSV)
lower = np.array([0, 0, 0])
upper = np.array([179, 93, 97])
mask = cv2.inRange(hsv, lower, upper)
# Morph operations
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7,7))
opening = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel, iterations=1)
close = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel, iterations=2)
# Find contours and filter using contour approximation + contour area
cnts = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
for c in cnts:
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.04 * peri, True)
area = cv2.contourArea(c)
if len(approx) > 3 and area > 1000:
cv2.drawContours(image, [c], -1, (36,255,12), -1)
cv2.drawContours(blank, [c], -1, (255,255,255), -1)
# Bitwise-and for result
blank = cv2.cvtColor(blank, cv2.COLOR_BGR2GRAY)
result = cv2.bitwise_and(original,original,mask=blank)
result[blank==0] = (255,255,255)
cv2.imshow('mask', mask)
cv2.imshow('opening', opening)
cv2.imshow('close', close)
cv2.imshow('result', result)
cv2.imshow('image', image)
cv2.waitKey()
Related
I have the following image of a lego board with some bricks on it
Now I am trying to detect the thick black lines (connecting the white squares) with OpenCV. I have already experimented a lot with HoughLinesP, converted the image to gray or b/w before, applied blur, ...
Nonthing led to usable results.
# Read image
img = cv2.imread('image.jpg', cv2.IMREAD_GRAYSCALE)
# Resize Image
img = cv2.resize(img, (0,0), fx=0.25, fy=0.25)
# Initialize output
out = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
# Median blurring to get rid of the noise; invert image
img = cv2.medianBlur(img, 5)
# Adaptive Treshold
bw = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
cv2.THRESH_BINARY,15,8)
# HoughLinesP
linesP = cv2.HoughLinesP(bw, 500, np.pi / 180, 50, None, 50, 10)
# Draw Lines
if linesP is not None:
for i in range(0, len(linesP)):
l = linesP[i][0]
cv2.line(out, (l[0], l[1]), (l[2], l[3]), (0,0,255), 3, cv2.LINE_AA)
The adaptive treshold lets you see edges quite well, but with HoughLinesP you don't get anything usable out of it
What am I doing wrong?
Thanks, both #fmw42 and #jeru-luke for your great solutions to this problem! I liked isolating / masking the green board, so I combined both:
import cv2
import numpy as np
img = cv2.imread("image.jpg")
scale_percent = 50 # percent of original size
width = int(img.shape[1] * scale_percent / 100)
height = int(img.shape[0] * scale_percent / 100)
dim = (width, height)
# resize image
img = cv2.resize(img, dim, interpolation = cv2.INTER_AREA)
lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
a_component = lab[:,:,1]
# binary threshold the a-channel
th = cv2.threshold(a_component,127,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)[1]
# numpy black
black = np.zeros((img.shape[0],img.shape[1]),np.uint8)
# function to obtain the largest contour in given image after filling it
def get_region(image):
contours, hierarchy = cv2.findContours(image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
c = max(contours, key = cv2.contourArea)
mask = cv2.drawContours(black,[c],0,255, -1)
return mask
mask = get_region(th)
# turning the region outside the green block white
green_block = cv2.bitwise_and(img, img, mask = mask)
green_block[black==0]=(255,255,255)
# median blur
median = cv2.medianBlur(green_block, 5)
# threshold on black
lower = (0,0,0)
upper = (15,15,15)
thresh = cv2.inRange(median, lower, upper)
# apply morphology open and close
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3,3))
morph = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (29,29))
morph = cv2.morphologyEx(morph, cv2.MORPH_CLOSE, kernel)
# filter contours on area
contours = cv2.findContours(morph, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = contours[0] if len(contours) == 2 else contours[1]
result = green_block.copy()
for c in contours:
area = cv2.contourArea(c)
if area > 1000:
cv2.drawContours(result, [c], -1, (0, 0, 255), 2)
# view result
cv2.imshow("result", result)
cv2.waitKey(0)
cv2.destroyAllWindows()
Here I am presenting a repeated segmentation approach using color.
This answer is based on the usage of LAB color space
1. Isolating the green lego block
img = cv2.imread(image_path)
lab = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
a_component = lab[:,:,1]
# binary threshold the a-channel
th = cv2.threshold(a_component,127,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)[1]
th
# function to obtain the largest contour in given image after filling it
def get_region(image):
contours, hierarchy = cv2.findContours(image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
c = max(contours, key = cv2.contourArea)
black = np.zeros((image.shape[0], image.shape[1]), np.uint8)
mask = cv2.drawContours(black,[c],0,255, -1)
return mask
mask = get_region(th)
mask
# turning the region outside the green block white
green_block = cv2.bitwise_and(img, img, mask = mask)
green_block[black==0]=(255,255,255)
green_block
2. Segmenting the road
To get an approximate region of the road, I subtracted the mask and th.
cv2.subtract() performs arithmetic subtraction, where cv2 will take care of negative values.
road = cv2.subtract(mask,th)
# `road` contains some unwanted spots/contours which are removed using the function "get_region"
only_road = get_region(road)
only_road
Masking only the road segment with the original image gives
road_colored = cv2.bitwise_and(img, img, mask = only_road)
road_colored[only_road==0]=(255,255,255)
road_colored
From the above image only the black regions (road) are present, which is easy to segment:
# converting to grayscale and applying threshold
th2 = cv2.threshold(road_colored[:,:,1],127,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)[1]
# using portion of the code from fmw42's answer, to get contours above certain area
contours = cv2.findContours(th2, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = contours[0] if len(contours) == 2 else contours[1]
result = img.copy()
for c in contours:
area = cv2.contourArea(c)
if area > 1000:
cv2.drawContours(result, [c], -1, (0, 0, 255), 4)
result
Note:
To clean up the end result, you can apply morphological operations on th2 before drawing contours.
Here is one way to do that in Python/OpenCV.
Read the image
Apply median blur
Threshold on black color using cv2.inRange()
Apply morphology to clean it up
Get contours and filter on area
Draw contours on input
Save the result
Input:
import cv2
import numpy as np
# read image
img = cv2.imread('black_lines.jpg')
# median blur
median = cv2.medianBlur(img, 5)
# threshold on black
lower = (0,0,0)
upper = (15,15,15)
thresh = cv2.inRange(median, lower, upper)
# apply morphology open and close
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3,3))
morph = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (29,29))
morph = cv2.morphologyEx(morph, cv2.MORPH_CLOSE, kernel)
# filter contours on area
contours = cv2.findContours(morph, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = contours[0] if len(contours) == 2 else contours[1]
result = img.copy()
for c in contours:
area = cv2.contourArea(c)
if area > 1000:
cv2.drawContours(result, [c], -1, (0, 0, 255), 2)
# save result
cv2.imwrite("black_lines_threshold.jpg", thresh)
cv2.imwrite("black_lines_morphology.jpg", morph)
cv2.imwrite("black_lines_result.jpg", result)
# view result
cv2.imshow("threshold", thresh)
cv2.imshow("morphology", morph)
cv2.imshow("result", result)
cv2.waitKey(0)
cv2.destroyAllWindows()
Threshold image:
Morphology image:
Result:
I have the image, i have removed the noise (dots in the background) and, I want to draw a bounding box around the block of text In image how can I do it using python OpenCV
Input image:
Noise Removed Image:
Here is the code used to remove noise in background Where i can change to save images with bounding boxes around the text
import cv2
import matplotlib.pyplot as plt
import glob
import os
def remove_dots(image_path,outdir):
image = cv2.imread(image_path)
mask = np.zeros(image.shape, dtype=np.uint8)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (3,3), 0)
thresh = cv2.adaptiveThreshold(blur,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV,51,9)
# Create horizontal kernel then dilate to connect text contours
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5,5))
dilate = cv2.dilate(thresh, kernel, iterations=2)
# Find contours and filter out noise using contour approximation and area filtering
cnts = cv2.findContours(dilate, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
for c in cnts:
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.04 * peri, True)
x,y,w,h = cv2.boundingRect(c)
area = w * h
ar = w / float(h)
if area > 1200 and area < 50000 and ar <8:
cv2.drawContours(mask, [c], -1, (255,255,255), -1)
# Bitwise-and input image and mask to get result
mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
result = cv2.bitwise_and(image, image, mask=mask)
result[mask==0] = (255,255,255) # Color background white
cv2.imwrite(os.path.join(outdir,os.path.basename(image_path)),result)
for jpgfile in glob.glob(r'C:\custom\TableDetectionWork\text_detection_dataset/*'):
print(jpgfile)
remove_dots(jpgfile,r'C:\custom\TableDetectionWork\textdetect/')
You can do that by using a horizontal morphology filter to merge the letters in a mask image. Then find the contours. Then get the bounding boxes.
Input:
import cv2
import numpy as np
img = cv2.imread("john.jpg")
# convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# threshold
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)[1]
# invert
thresh = 255 - thresh
# apply horizontal morphology close
kernel = np.ones((5 ,191), np.uint8)
morph = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)
# get external contours
contours = cv2.findContours(morph, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = contours[0] if len(contours) == 2 else contours[1]
# draw contours
result = img.copy()
for cntr in contours:
# get bounding boxes
pad = 10
x,y,w,h = cv2.boundingRect(cntr)
cv2.rectangle(result, (x-pad, y-pad), (x+w+pad, y+h+pad), (0, 0, 255), 4)
# save result
cv2.imwrite("john_bbox.png",result)
# display result
cv2.imshow("thresh", thresh)
cv2.imshow("morph", morph)
cv2.imshow("result", result)
cv2.waitKey(0)
cv2.destroyAllWindows()
Morphology Closed Image:
Bounding Boxes Image:
Here is the core of your code modified to do what you want in Python/OpenCV. It is just adding my code to the end of your code.
Input:
import cv2
import numpy as np
image = cv2.imread("john.jpg")
mask = np.zeros(image.shape, dtype=np.uint8)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (3,3), 0)
thresh = cv2.adaptiveThreshold(blur,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV,51,9)
# Create horizontal kernel then dilate to connect text contours
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5,5))
dilate = cv2.dilate(thresh, kernel, iterations=2)
# Find contours and filter out noise using contour approximation and area filtering
cnts = cv2.findContours(dilate, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
for c in cnts:
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.04 * peri, True)
x,y,w,h = cv2.boundingRect(c)
area = w * h
ar = w / float(h)
if area > 1200 and area < 50000 and ar <8:
cv2.drawContours(mask, [c], -1, (255,255,255), -1)
# Bitwise-and input image and mask to get result
mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
result = cv2.bitwise_and(image, image, mask=mask)
result[mask==0] = (255,255,255) # Color background white
# NEW CODE HERE TO END _____________________________________________________________
gray2 = cv2.cvtColor(result, cv2.COLOR_BGR2GRAY)
thresh2 = cv2.threshold(gray2, 128, 255, cv2.THRESH_BINARY)[1]
thresh2 = 255 - thresh2
kernel = np.ones((5 ,191), np.uint8)
close = cv2.morphologyEx(thresh2, cv2.MORPH_CLOSE, kernel)
# get external contours
contours = cv2.findContours(close, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = contours[0] if len(contours) == 2 else contours[1]
# draw contours
result2 = result.copy()
for cntr in contours:
# get bounding boxes
pad = 10
x,y,w,h = cv2.boundingRect(cntr)
cv2.rectangle(result2, (x-pad, y-pad), (x+w+pad, y+h+pad), (0, 0, 255), 4)
cv2.imwrite("john_bboxes.jpg", result2)
cv2.imshow("mask",mask)
cv2.imshow("thresh",thresh)
cv2.imshow("dilate",dilate)
cv2.imshow("result",result)
cv2.imshow("gray2",gray2)
cv2.imshow("thresh2",thresh2)
cv2.imshow("close",close)
cv2.imshow("result2",result2)
cv2.waitKey(0)
cv2.destroyAllWindows()
Bounding Boxes on Your Result:
I need to extract the 12 oval shapes from the image and store them in separate variables say 1 to 12.
The original image was as follows
Original Image:
Output image:
Can someone help me extract all those oval shapes into different variables ?
my code is
import cv2
import numpy as np
path = r'/home/parallels/Desktop/Opencv/data/test.JPG'
i = cv2.imread(path, -1)
img_rgb = cv2.resize(i, (1280,720))
cv2.namedWindow("Original Image",cv2.WINDOW_NORMAL)
img = cv2.cvtColor(img_rgb, cv2.COLOR_RGB2HSV)
img = cv2.bilateralFilter(img,9,105,105)
r,g,b=cv2.split(img)
equalize1= cv2.equalizeHist(r)
equalize2= cv2.equalizeHist(g)
equalize3= cv2.equalizeHist(b)
equalize=cv2.merge((r,g,b))
equalize = cv2.cvtColor(equalize,cv2.COLOR_RGB2GRAY)
ret,thresh_image = cv2.threshold(equalize,0,255,cv2.THRESH_OTSU+cv2.THRESH_BINARY)
equalize= cv2.equalizeHist(thresh_image)
canny_image = cv2.Canny(equalize,250,255)
canny_image = cv2.convertScaleAbs(canny_image)
kernel = np.ones((3,3), np.uint8)
dilated_image = cv2.dilate(canny_image,kernel,iterations=1)
contours, hierarchy = cv2.findContours(dilated_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours= sorted(contours, key = cv2.contourArea, reverse = True)[:10]
c=contours[0]
print(cv2.contourArea(c))
final = cv2.drawContours(img, [c], -1, (255,0, 0), 3)
mask = np.zeros(img_rgb.shape,np.uint8)
new_image = cv2.drawContours(mask,[c],0,255,-1,)
new_image = cv2.bitwise_and(img_rgb, img_rgb, mask=equalize)
cv2.namedWindow("new",cv2.WINDOW_NORMAL)
cv2.imshow("new",new_image)
cv2.waitKey(0)
You're on the right track. After obtaining your binary image, you can perform contour area + aspect ratio filtering. We can sort the contours in order from left-to-right using imutils.contours.sort_contours(). We find contours then filter using cv2.contourArea
and aspect ratio with cv2.approxPolyDP + cv2.arcLength. If they pass this filter, we draw the contours and append it to a oval list to keep track of the contours. Here's the results:
Filtered mask
Results
Isolated ovals
Output from oval list
Oval contours: 12
Code
import cv2
import numpy as np
from imutils import contours
# Load image, resize, convert to HSV, bilaterial filter
image = cv2.imread('1.jpg')
resize = cv2.resize(image, (1280,720))
original = resize.copy()
mask = np.zeros(resize.shape[:2], dtype=np.uint8)
hsv = cv2.cvtColor(resize, cv2.COLOR_RGB2HSV)
hsv = cv2.bilateralFilter(hsv,9,105,105)
# Split into channels and equalize
r,g,b=cv2.split(hsv)
equalize1 = cv2.equalizeHist(r)
equalize2 = cv2.equalizeHist(g)
equalize3 = cv2.equalizeHist(b)
equalize = cv2.merge((r,g,b))
equalize = cv2.cvtColor(equalize,cv2.COLOR_RGB2GRAY)
# Blur and threshold for binary image
blur = cv2.GaussianBlur(equalize, (3,3), 0)
thresh = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
# Find contours, sort from left-to-right
# Filter using contour area and aspect ratio filtering
ovals = []
num = 0
cnts = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
(cnts, _) = contours.sort_contours(cnts, method="left-to-right")
for c in cnts:
area = cv2.contourArea(c)
x,y,w,h = cv2.boundingRect(c)
ar = w / float(h)
if area > 1000 and ar < .8:
cv2.drawContours(resize, [c], -1, (36,255,12), 3)
cv2.drawContours(mask, [c], -1, (255,255,255), -1)
cv2.putText(resize, str(num), (x,y - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (36,55,12), 2)
ovals.append(c)
num += 1
result = cv2.bitwise_and(original, original, mask=mask)
result[mask==0] = (255,255,255)
print('Oval contours: {}'.format(len(ovals)))
cv2.imshow('equalize', equalize)
cv2.imshow('thresh', thresh)
cv2.imshow('resize', resize)
cv2.imshow('result', result)
cv2.imshow('mask', mask)
cv2.waitKey()
I'm working on sample task where I need to highlight unusual marks on any mobile devices. I'm trying with opencv python. But, I'm not getting actual contors for the unusual marks.
Input image is like:
And output image is expected as below:
I'm trying something like below, but it didn't work.
import cv2
from matplotlib import pyplot as plt
blurValue = 15
img_path = "input.jpg"
# reading the image
image = cv2.imread(img_path)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (blurValue, blurValue), 0)
edged = cv2.Canny(image, 100, 255)
#applying closing function
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (7, 7))
closed = cv2.morphologyEx(edged, cv2.MORPH_CLOSE, kernel)
lower = np.array([4, 20, 93])
upper = np.array([83, 79, 166])
# hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# blur = cv2.GaussianBlur(hsv, (blurValue, blurValue), 0)
mask = cv2.inRange(closed, lower, upper)
result_1 = cv2.bitwise_and(frame, frame, mask = mask)
cnts = cv2.findContours(result_1.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
for c in cnts:
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.02 * peri, True)
cv2.drawContours(image, [approx], -1, (0, 255, 0), 2)
plt.imshow(image)
plt.title("image")
plt.show()
Any help will be appreciated. Thank you.
My suggestion would be to use adaptive thresholding and filter on area (and possibly other characteristics). Here is my code and results using Python OpenCV.
Input:
import cv2
import numpy as np
# read image
img = cv2.imread("iphone.jpg")
# convert img to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# apply gaussian blur
blur = cv2.GaussianBlur(gray, (29,29), 0)
# do adaptive threshold on gray image
thresh = cv2.adaptiveThreshold(blur, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 51, 3)
# apply morphology open then close
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (17, 17))
open = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)
close = cv2.morphologyEx(open, cv2.MORPH_CLOSE, kernel)
# Get contours
cnts = cv2.findContours(close, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
result = img.copy()
for c in cnts:
area = cv2.contourArea(c)
if area < 10000 and area > 5000:
cv2.drawContours(result, [c], -1, (0, 255, 0), 2)
# write results to disk
cv2.imwrite("iphone_thresh.jpg", thresh)
cv2.imwrite("iphone_close.jpg", close)
cv2.imwrite("iphone_markings.jpg", result)
# display it
cv2.imshow("IMAGE", img)
cv2.imshow("THRESHOLD", thresh)
cv2.imshow("CLOSED", close)
cv2.imshow("RESULT", result)
cv2.waitKey(0)
Thresholded Image:
Morphology Processed Image:
Final Result:
I would also suggest that you align the image with a known clean iPhone image and create a mask of the camera and logo, etc., markings so that you can filter the results to exclude those (and perhaps even the border of the camera outline).
My idea is to filter my body with color-keying. If I wear a red t shirt I would like to find the center of the red tshirt with the contours. Is there a good way for this?
Here is my code:
import numpy as np
import cv2
import imutils
cap = cv2.VideoCapture(0)
while True:
# Take each frame
_, frame = cap.read()
frame = imutils.resize(frame, width = 400)
# Convert BGR to HSV
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# define range of red color in HSV
lower_red = np.array([0,50,50])
upper_red = np.array([10,255,255])
lower_skin = np.array([0, 48, 80])
upper_skin = np.array([20, 255, 255])
skin_mask = cv2.inRange(hsv, lower_skin, upper_skin)
# Threshold the HSV image to get only red colors
red_mask = cv2.inRange(hsv, lower_red, upper_red)
mask = red_mask + skin_mask
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (11, 11))
mask = cv2.erode(mask, kernel, iterations = 2)
mask = cv2.dilate(mask, kernel, iterations = 2)
mask = cv2.GaussianBlur(mask, (3, 3), 0)
skin = cv2.bitwise_and(frame, frame, mask = mask)
gray = cv2.cvtColor(skin, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
thresh = cv2.threshold(blurred, 60, 255, cv2.THRESH_BINARY)[1]
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
# # loop over the contours
for c in cnts:
cv2.drawContours(skin, [c], -1, (0, 255, 0), 2)
cv2.imshow("Image", skin)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
cv2.destroyAllWindows()
So at the moment I only draw contours of the Tshirt and the skin. But I would like to find the center of the red t shirt contour. Is this possible? Do I have to filter red on its own and do not add it to the skin mask? Maybe my current code is not able to do this, I would be happy for proposes! :)
Find t-shirt using your red mask and cv2.findContours and find its center using cv2.moments(contour) as described in docs:
for cnt in contours:
center_x = int(cnt['m10']/cnt['m00'])
center_y = int(cnt['m01']/cnt['m00'])