Facing problem while removing checkerboard pattern. I'm using cv2.Threshold but it selected unexpected pixels too (red marked) .
import cv2
import numpy as np
input = cv2.imread('image.png')
ret, logo_mask = cv2.threshold(input[:,:,0], 0, 255, cv2.THRESH_BINARY|cv2.THRESH_OTSU)
cv2.imshow(logo_mask)
Input image:
Output image:
Anyone can help?
Getting perfect results that covers all cases is challenging.
The following solution assumes that the white checkerboard color is (255, 255, 255), and gray is (230, 230, 230).
Another assumptions is that the clusters with that specific colors in the other parts of the image are very small.
We may use the following stages:
Find "white mask" and "gray mask" where color is (255, 255, 255) and (230, 230, 230).
Create unified mask using bitwise or.
Find contours, and remove small contours from the mask (assumed to be "noise").
Code sample:
import cv2
import numpy as np
input = cv2.imread('image.png')
white_mask = np.all(input == 255, 2).astype(np.uint8)*255 # cv2.inRange(input, (255, 255, 255), (255, 255, 255))
gray_mask = np.all(input == 230, 2).astype(np.uint8)*255 # gray_mask = cv2.inRange(input, (230, 230, 230), (230, 230, 230))
mask = cv2.bitwise_or(white_mask, gray_mask) # Create unified mask
ctns = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[-2] # Find contours
# Remove small contours from mask
for c in ctns:
area = cv2.contourArea(c) # Find the area of each contours
if (area < 10): # Ignore small contours (assume noise).
cv2.drawContours(mask, [c], 0, 0, -1)
mask = cv2.dilate(mask, np.ones((3, 3), np.uint8)) # Dilate the mask - "cosmetics"
output = cv2.copyTo(input, 255-mask) # Put black color in the masked part.
# Show images for testing
cv2.imshow('input', input)
cv2.imshow('mask', mask)
cv2.imshow('output', output)
cv2.waitKey()
cv2.destroyAllWindows()
white_mask:
gray_mask:
mask:
output:
In case there are large white areas or gray areas in the foreground part, the above solution may not work.
I thought of a process for finding only the areas that overlaps a boundary between white and gray rectangle.
It's not working, because there are small parts between the tree branches that are excluded.
The following code may give you inspirations:
import cv2
import numpy as np
input = cv2.imread('image.png')
#ret, logo_mask = cv2.threshold(input[:,:,0], 0, 255, cv2.THRESH_BINARY|cv2.THRESH_OTSU)
white_mask = np.all(input == 255, 2).astype(np.uint8)*255 # cv2.inRange(input, (255, 255, 255), (255, 255, 255))
gray_mask = np.all(input == 230, 2).astype(np.uint8)*255 # gray_mask = cv2.inRange(input, (230, 230, 230), (230, 230, 230))
cv2.imwrite('white_mask.png', white_mask)
cv2.imwrite('gray_mask.png', gray_mask)
# Apply opening for removing small clusters
opened_white_mask = cv2.morphologyEx(white_mask, cv2.MORPH_OPEN, np.ones((3, 3), np.uint8))
opened_gray_mask = cv2.morphologyEx(gray_mask, cv2.MORPH_OPEN, np.ones((3, 3), np.uint8))
cv2.imwrite('opened_white_mask.png', opened_white_mask)
cv2.imwrite('opened_gray_mask.png', opened_gray_mask)
white_mask_shell = cv2.dilate(opened_white_mask, np.ones((3, 3), np.uint8)) - opened_white_mask # Dilate white_mask and keep only the "shell"
gray_mask_shell = cv2.dilate(opened_gray_mask, np.ones((3, 3), np.uint8)) - opened_gray_mask # Dilate gray_mask and keep only the "shell"
white_mask_shell = cv2.dilate(white_mask_shell, np.ones((3, 3), np.uint8)) # Dilate the "shell"
gray_mask_shell = cv2.dilate(gray_mask_shell, np.ones((3, 3), np.uint8)) # Dilate the "shell"
cv2.imwrite('white_mask_shell.png', white_mask_shell)
cv2.imwrite('gray_mask_shell.png', gray_mask_shell)
overlap_shell = cv2.bitwise_and(white_mask_shell, gray_mask_shell)
cv2.imwrite('overlap_shell.png', overlap_shell)
dilated_overlap_shell = cv2.dilate(overlap_shell, np.ones((17, 17), np.uint8))
mask = cv2.bitwise_or(cv2.bitwise_and(white_mask, dilated_overlap_shell), cv2.bitwise_and(gray_mask, dilated_overlap_shell))
cv2.imshow('input', input)
cv2.imshow('white_mask', white_mask)
cv2.imshow('gray_mask', gray_mask)
cv2.imshow('white_mask', white_mask)
cv2.imshow('gray_mask', gray_mask)
cv2.imshow('opened_white_mask', opened_white_mask)
cv2.imshow('opened_gray_mask', opened_gray_mask)
cv2.imshow('overlap_shell', overlap_shell)
cv2.imshow('dilated_overlap_shell', dilated_overlap_shell)
cv2.imshow('mask', mask)
cv2.waitKey()
cv2.destroyAllWindows()
Related
I have the following code:
import cv2 as cv
import numpy as np
image = cv.imread("input1.jpg")
img_gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
img_denoised = cv.GaussianBlur(img_gray,(5,5),2)
ret, thresh = cv.threshold(img_denoised, 216, 255, cv.THRESH_BINARY)
kernel = np.ones((1,1),np.uint8)
opening = cv.dilate(thresh, kernel)
opening = cv.erode(opening, kernel)
# detect the contours on the binary image using cv.CHAIN_APPROX_NONE
contours, hierarchy = cv.findContours(image=opening, mode=cv.RETR_TREE, method=cv.CHAIN_APPROX_NONE)
for i in contours:
x, y, w, h = cv.boundingRect(i)
cv.drawContours(image, [i], -1, (0, 0, 255), 2)
cv.imshow("A.jpg", image)
cv.waitKey(0)
cv.destroyAllWindows()
Output:
enter image description here
It only shows the stars with a red contours but I want all the text to have a red contours, including the background. Here is the original file:
enter image description here
Many thanks in advance!
I messed with this a bit and the best outcome I could get was the following, I think with some tweaking you could ignore the shading, as I'm converting it to grayscale it seems to be dropping the correct contour on the shapes, but the text is working as expected;
import cv2
import numpy as np
src = cv2.imread('c:\\input1.jpg')
gray = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)
# blur
blur = cv2.GaussianBlur(gray, (3, 3), 0)
# canny edge
canny = cv2.Canny(blur, 100, 200)
# dilate
kernel = np.ones((5, 5))
dilate = cv2.dilate(canny, kernel, iterations=1)
# find contours
contours, hierarchy = cv2.findContours(
dilate, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
# draw contours
cv2.drawContours(src, contours, -1, (0, 255, 0), 3)
cv2.imshow("a.jpg", src)
cv2.waitKey()
I'm attempting to remove all but the largest contours while retaining the child contours within the largest contour. Effectively I want to go from here: Input Image to: Output Image I don't know how to go about this though (I'm not a programmer by any stretch and had help putting together the below). The code I do have doesn't retain the child contours. Output of Below.
import cv2 as cv
import numpy as np
img = cv.imread('small_contour.jpg')
image_contours = np.zeros((img.shape[1], img.shape[0], 1), np.uint8)
image_binary = np.zeros((img.shape[1], img.shape[0], 1), np.uint8)
for channel in range(img.shape[2]):
ret, image_thresh = cv.threshold(img[:, :, channel], 127, 255, cv.THRESH_BINARY)
contours = cv.findContours(image_thresh, 1, 1)[0]
cv.drawContours(image_contours, contours, -1, (255,255,255), 3)
contours = cv.findContours(image_contours, cv.RETR_LIST,
cv.CHAIN_APPROX_SIMPLE)[0]
cv.drawContours(image_binary, [max(contours, key = cv.contourArea)],
-1, (255, 255, 255), -1)
cv.imwrite('fill_contour.jpg', image_binary)
cv.imshow('Small Contour', image_binary)
cv.waitKey(0)
What your code does in this line cv.drawContours(image_binary, [max(contours, key = cv.contourArea)], -1, (255, 255, 255), -1) is to set to color (255, 255, 255) the pixels of the image_binary corresponding to the boundary and inner pixels of the bigger contour found by cv.findContours.
Creating the image_binary you are also swapping width with height and this is an error.
You don't need to keep the children: you should use cv.RETR_TREE for that, then find the index of the father and look for all contours with that father doing it recursively for the nested contours (children of children ...). See https://docs.opencv.org/4.1.0/d9/d8b/tutorial_py_contours_hierarchy.html But this is not useful for your goal.
You can use the result you currently have as a mask for the original image, but the code need some fixes, starting from the linked image named VS0L9.jpg.
Note that I'm using cv2, just change to cv.
Load the image
im = cv2.imread('VS0L9.jpg', cv2.IMREAD_GRAYSCALE)
Find the contours (I used RETR_TREE)
contours, hierarchy = cv2.findContours(im, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
Find the bigger contour:
bigger = max(contours, key=lambda item: cv2.contourArea(item))
Initialize the mask (what you tried with image_binary):
the_mask = np.zeros_like(im)
Draw the fill the bigger contour on the mask image, the region of interest is set to (255, 255, 255):
cv2.drawContours(the_mask, [bigger], -1, (255, 255, 255), cv2.FILLED)
At this point the mask looks like the output of your code, but with proper width and height.
Use the mask with the original image to show only the area of interest:
res = cv2.bitwise_and(im, im, mask = the_mask)
Or, alternatively:
res = im.copy()
res[the_mask == 0] = 0
Now res is the desired result.
You can use morphological reconstruction, with erode image as marker.
import cv2
img = cv2.imread('VS0L9.jpg', cv2.IMREAD_GRAYSCALE)
thresh = cv2.threshold(img, 40, 255, cv2.THRESH_BINARY)[1]
kernel=cv2.getStructuringElement(cv2.MORPH_RECT, (17,17))
kernel2 = cv2.getStructuringElement(cv2.MORPH_RECT, (3,3))
marker = cv2.erode(thresh,kernel,iterations = 5)
while True:
tmp=marker.copy()
marker=cv2.dilate(marker, kernel2)
marker=cv2.min(thresh, marker)
difference = cv2.subtract(marker, tmp)
if cv2.countNonZero(difference) == 0:
break
cv2.imwrite('out.png', marker)
cv2.imshow('result', marker )
I need to remove the gray drawing from the image background and only need symbols drawn over it.
Here is my code to do that using morphologyEx but it did not remove the entire gray drawing that is in background.
img_path = "images/new_drawing.png"
img = cv2.imread(img_path)
kernel = np.ones((2,2), dtype=np.uint8)
result = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel, iterations=1)
cv2.imshow('Without background',result);
cv2.waitKey(0)
cv2.destroyAllWindows()
I tried this also and got expected results in grayscale but unable to convert it to BGR.
Here is my code
img = cv2.imread('images/new_drawing.png')
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
med_blur = cv2.medianBlur(gray_img, ksize=3)
_, thresh = cv2.threshold(med_blur, 190, 255, cv2.THRESH_BINARY)
blending = cv2.addWeighted(gray_img, 0.5, thresh, 0.9, gamma=0)
cv2.imshow("blending", blending);
Also i used contours to identify symbols and draw them to white image but problem is that it also identify background drawing that i don't want.
Input image
Expected output image
Also the drawing will be always in gray color as in image.
Please help me out to get better result.
You are almost there...
Instead of using cv2.inRange to "catch" the non-gray pixel I suggest using cv2.inRange for catching all the pixels you want to change to white color:
mask = cv2.inRange(hsv, (0, 0, 100), (255, 5, 255))
The hue range is irrelevant.
The saturation is close to zero (shades of gray).
The brightness excludes the black pixels (you like to keep).
In order to get a nicer solution, I also used the following additional stages:
Build a mask of non-black pixels:
nzmask = cv2.inRange(hsv, (0, 0, 5), (255, 255, 255))
Erode the above mask:
nzmask = cv2.erode(nzmask, np.ones((3,3)))
Apply and operation between mask and nzmask:
mask = mask & nzmask
The above stages keeps the gray pixels around the black text.
Without the above stages, the black text gets thinner.
The last stage is replacing mask pixels with white:
new_img = img.copy()
new_img[np.where(mask)] = 255
Here is the code:
import numpy as np
import cv2
img_path = "new_drawing.png"
img = cv2.imread(img_path)
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, (0, 0, 100), (255, 5, 255))
cv2.imshow('mask before and with nzmask', mask);
# Build mask of non black pixels.
nzmask = cv2.inRange(hsv, (0, 0, 5), (255, 255, 255))
# Erode the mask - all pixels around a black pixels should not be masked.
nzmask = cv2.erode(nzmask, np.ones((3,3)))
cv2.imshow('nzmask', nzmask);
mask = mask & nzmask
new_img = img.copy()
new_img[np.where(mask)] = 255
cv2.imshow('mask', mask);
cv2.imshow('new_img', new_img);
cv2.waitKey(0)
cv2.destroyAllWindows()
Result:
Here is one way to do that in Python/OpenCV.
Read the input
Convert to HSV and separate channels
Threshold the saturation channel
Threshold the value channel and invert
Combine the two threshold images as a mask
Apply the mask to the input to write white where the mask is black
Save the result
Input:
import cv2
import numpy as np
# read image
img = cv2.imread('symbols.png')
# convert image to hsv colorspace
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(hsv)
# threshold saturation image
thresh1 = cv2.threshold(s, 92, 255, cv2.THRESH_BINARY)[1]
# threshold value image and invert
thresh2 = cv2.threshold(v, 128, 255, cv2.THRESH_BINARY)[1]
thresh2 = 255 - thresh2
# combine the two threshold images as a mask
mask = cv2.add(thresh1,thresh2)
# use mask to remove lines in background of input
result = img.copy()
result[mask==0] = (255,255,255)
# display IN and OUT images
cv2.imshow('IMAGE', img)
cv2.imshow('SAT', s)
cv2.imshow('VAL', v)
cv2.imshow('THRESH1', thresh1)
cv2.imshow('THRESH2', thresh2)
cv2.imshow('MASK', mask)
cv2.imshow('RESULT', result)
cv2.waitKey(0)
cv2.destroyAllWindows()
# save output image
cv2.imwrite('symbols_thresh1.png', thresh1)
cv2.imwrite('symbols_thresh2.png', thresh2)
cv2.imwrite('symbols_mask.png', mask)
cv2.imwrite('symbols_cleaned.png', result)
Saturation channel thresholded:
Value channel thresholded and inverted:
Mask:
Result:
i'm trying to detect vertical lines where the pixels RGB has every color in less than 100 |Dark| , here is an example RGB (100,100,100).
import numpy as np
import cv2
img = cv2.imread('testD2.png')
lower = np.array([0, 0, 0], dtype = "uint8")
upper = np.array([100,100,100], dtype = "uint8")
mask = cv2.inRange(img, lower, upper)
img = cv2.bitwise_and(img, img, mask = mask)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray,50,150,apertureSize = 3)
minLineLength=img.shape[1]-300
lines = cv2.HoughLinesP(image=edges,rho=0.02,theta=np.pi/500, threshold=10,lines=np.array([]), minLineLength=minLineLength,maxLineGap=100)
if lines is not None:
a,b,c = lines.shape
for i in range(a):
cv2.line(img, (lines[i][0][0], lines[i][0][1]), (lines[i][0][2], lines[i][0][3]), (0, 0, 255), 3, cv2.LINE_AA)
cv2.imshow('edges', edges)
cv2.imshow('result', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
i have to change the color of the end lines too,i mean the first and the last line.
Using cv2.findContours() may work better:
You can use cv2.findContours() and cv2.boundingRect() to identify the bars and return the information (x,y,h,w) that describes these rectangles. Here are a few examples.
If you want to only identify the lines and mark them you can do:
import cv2
import numpy as np
img = cv2.imread('oVKlP.png')
g = cv2.imread('oVKlP.png',0)
(T, mask) = cv2.threshold(g, 100, 255, cv2.THRESH_BINARY_INV)
_, contours, hierarchy = cv2.findContours(mask.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
img = cv2.drawContours(img.copy(), contours, -1, (0,255,0), 2)
cv2.imwrite('just_contours.png',img)
Result:
If you want to display some of the line info like maybe the x value for a side of the bar you can do:
import cv2
import numpy as np
img = cv2.imread('oVKlP.png')
g = cv2.imread('oVKlP.png',0)
(T, mask) = cv2.threshold(g, 100, 255, cv2.THRESH_BINARY_INV)
_, contours, hierarchy = cv2.findContours(mask.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# bounds with x,y,h,w for each bar
bounds = [cv2.boundingRect(i) for i in contours]
bounds.reverse()
img = cv2.drawContours(img.copy(), contours, -1, (0,0,255), 2)
font = cv2.FONT_HERSHEY_SIMPLEX
n = 20
b = 0
for (x,y,w,h) in bounds:
cv2.circle(img, (x,y+n+10), 5, (0, 255, 0), -1, cv2.LINE_AA)
cv2.putText(img, '{0}'.format(x), (x-b, y+n), font, .6, (255, 0, 255), 2, cv2.LINE_AA)
n+=33
b+=3
cv2.imwrite('fancy_marks.png',img)
Result:
I am trying to extract the number from this image using MSER contours.
Using this code..
import cv2
import numpy as np
mser = cv2.MSER_create()
img = cv2.imread('C:\\Users\\Link\\Desktop\\7.png')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
vis = img.copy()
regions, _ = mser.detectRegions(gray)
hulls = [cv2.convexHull(p.reshape(-1, 1, 2)) for p in regions]
poly = cv2.polylines(vis, hulls, 1, (0, 255, 0))
mask = np.zeros((img.shape[0], img.shape[1], 1), dtype=np.uint8)
mask = cv2.dilate(mask, np.ones((150, 150), np.uint8))
for i,contour in enumerate(hulls):
cv2.drawContours(mask, [contour], -1, (255, 255, 255), -1)
text_only = cv2.bitwise_and(img, img, mask=mask)
cv2.imwrite('poly{}.png'.format(i), text_only)
cv2.imshow('img', vis)
cv2.waitKey(0)
cv2.imshow('mask', mask)
cv2.waitKey(0)
cv2.imshow('text', text_only)
cv2.waitKey(0)
...I obtain this as image (apart from the whole image itself as result of MSER detection):
How can I made the background transparent ? All that I found about this procedure, which use Opencv and Python, is this other question: NumPy/OpenCV 2: how do I crop non-rectangular region?
Applying the code suggested there produce the next output, which is not what I would like to have:
The goal is to have something like this:
If it's not possible at that level, atleast how can I turn the black area into transparent ?
Thanks