Filling in Circles in OpenCV - python

I've been struggling with this for a while. I've been trying to figure out some sort of a way in OpenCV in Python to fill in while circles in an image that's entirely black and white.
To be clear this image has been tresholded using adaptive thresholding but now I have these rings which I'd like to be able to fill in. Ideally whatever algorithm is used to fill in circles should be able for both sets of pictures I included.
If anyone could offer any guidance in this regard I'd greatly appreciate it.
Before Algorithm:
After Algorithm:
Before Algorithm:
After Algorithm:

A simple search in Google would have given you this article, which answers exactly your question.
I adopted that solution for your input:
import cv2
import numpy as np
# Read image
im_in = cv2.imread("circles.jpg", cv2.IMREAD_GRAYSCALE)
# Threshold
th, im_th = cv2.threshold(im_in, 127, 255, cv2.THRESH_BINARY)
# Copy the thresholded image
im_floodfill = im_th.copy()
# Mask used to flood filling.
# NOTE: the size needs to be 2 pixels bigger on each side than the input image
h, w = im_th.shape[:2]
mask = np.zeros((h+2, w+2), np.uint8)
# Floodfill from point (0, 0)
cv2.floodFill(im_floodfill, mask, (0,0), 255)
# Invert floodfilled image
im_floodfill_inv = cv2.bitwise_not(im_floodfill)
# Combine the two images to get the foreground
im_out = im_th | im_floodfill_inv
# Display images.
cv2.imwrite("circles_filled.png", im_out)
Input file circles.png:
Output file circles_filled.png:

You can also fill the circles by drawing the contours.
import cv2
import numpy as np
#Run Main
if __name__ == "__main__" :
image = cv2.imread("circle.jpg", -1)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
ret,thresh = cv2.threshold(gray, 200, 255, cv2.THRESH_BINARY)
_,contours,_ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
cv2.drawContours(image, contours, -1, (255,255,255), thickness=-1)
cv2.namedWindow('Image', cv2.WINDOW_NORMAL)
cv2.imshow('Image', image)
cv2.waitKey(0)
cv2.destroyAllWindows()

Related

Removing Background Around Contour

I am only a couple weeks into learning coding with Python and OpenCV, but StackOverflow has helped me numerous times. However I cant seem to figure this issue out so decided to ask my first question.
I am trying to take an image
Find the largest contour by area
Remove the background outside the contour
Effectively removing the background from the largest "object" in the
picture.
I am struggling with the last part. I know I need to create a mask somehow then place the mask over the original image.
How do I create the correct type of mask? And how do I place the mask on top of the original image?
This is my code:
import cv2
import numpy as np
# Load image
image = cv2.imread('Resources/X.png')
# Grayscale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Find Canny edges
edged = cv2.Canny(gray, 30, 200)
# Finding Contours
contours, hierarchy = cv2.findContours(edged,
cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
cv2.imshow('Canny Edges After Contouring', edged)
print("Number of Contours found = " + str(len(contours)))
cv2.waitKey(0)
# Largest contour
c = max(contours, key=cv2.contourArea)
# Not sure what to do from here. Attempt below:
mask = np.zeros(image.shape, np.uint8) # What is this actually doing? what does np.unit8 mean?
cv2.drawContours(mask, c, -1, (255, 255, 255), 1) # I am drawing the correct outline/contour
cv2.imshow('Mask', mask)
cv2.waitKey(0)
Any help would be appreciated.
Thanks
Chris
EDIT:
I managed to do it but not exactly sure what I am doing :-(
How would I get a different color background? I presume I have to fill the blank_mask with another color?
Also not sure what the bitwise function is actually doing.
blank_mask = np.zeros(image.shape, dtype=np.uint8)
cv2.fillPoly(blank_mask, [c], (255,255,255))
blank_mask = cv2.cvtColor(blank_mask, cv2.COLOR_BGR2GRAY)
result = cv2.bitwise_and(original,original,mask=blank_mask)
cv2.imshow('Result', result)
Here is one way to change the background on your image using Python/OpenCV.
Read the input and get its dimensions
Threshold on black and invert to get white on black background
Get the largest contour from the inverted threshold image
Draw the largest contour as white filled on a black background as a mask
Create an inverted mask
Create a new colored background image
Apply the mask to the image
Apply the inverted mask to the background color image
Add the two images
Save the result
import cv2
import numpy as np
# Read image
img = cv2.imread('shapes.png')
hh, ww = img.shape[:2]
# threshold on black
# Define lower and uppper limits of what we call "white-ish"
lower = np.array([0, 0, 0])
upper = np.array([0, 0, 0])
# Create mask to only select black
thresh = cv2.inRange(img, lower, upper)
# invert mask so shapes are white on black background
thresh_inv = 255 - thresh
# get the largest contour
contours = cv2.findContours(thresh_inv, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = contours[0] if len(contours) == 2 else contours[1]
big_contour = max(contours, key=cv2.contourArea)
# draw white contour on black background as mask
mask = np.zeros((hh,ww), dtype=np.uint8)
cv2.drawContours(mask, [big_contour], 0, (255,255,255), cv2.FILLED)
# invert mask so shapes are white on black background
mask_inv = 255 - mask
# create new (blue) background
bckgnd = np.full_like(img, (255,0,0))
# apply mask to image
image_masked = cv2.bitwise_and(img, img, mask=mask)
# apply inverse mask to background
bckgnd_masked = cv2.bitwise_and(bckgnd, bckgnd, mask=mask_inv)
# add together
result = cv2.add(image_masked, bckgnd_masked)
# save results
cv2.imwrite('shapes_inverted_mask.jpg', mask_inv)
cv2.imwrite('shapes_masked.jpg', image_masked)
cv2.imwrite('shapes_bckgrnd_masked.jpg', bckgnd_masked )
cv2.imwrite('shapes_result.jpg', result)
cv2.imshow('mask', mask)
cv2.imshow('image_masked', image_masked)
cv2.imshow('bckgrnd_masked', bckgnd_masked)
cv2.imshow('result', result)
cv2.waitKey(0)
cv2.destroyAllWindows()
Mask image from largest contour:
Image masked:
Background masked:
Result:

How Can I Detect If There are Secondary Objects in an Image

I am looking for a way to detect if there are secondary objects in an image or if the image just has the one main object. I've done a bit of research, but I haven't been able to find anything quite like what I am looking for.
An example image would be:
The main object being the two detergent bottles since they overlap and the secondary object would be the "2 pack" pop up bubble in the top right. I would expect this image to return something like: "This image has secondary objects" or a count of the objects.
Here is one way to do that in Python/OpenCV
Read the input
Convert to gray and invert
OTSU threshold
Morphology close
Get external contours
Draw contours on image
Count contours
Print messages
Save results
Input:
import cv2
import numpy as np
# read image
img = cv2.imread("tide.jpg")
# convert img to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# invert gray image
gray = 255 - gray
# threshold gray image
#thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY)[1]
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)[1]
# apply morphology close
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
morph = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)
# Get contours
cntrs = cv2.findContours(morph, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cntrs = cntrs[0] if len(cntrs) == 2 else cntrs[1]
result = img.copy()
for c in cntrs:
cv2.drawContours(result, [c], -1, (0,0,255), 1)
count = len(cntrs)
print("")
print("count =",count)
print("")
if count > 1:
print("This image has secondary objects")
else:
print("This image has primary object only")
# write results to disk
cv2.imwrite("tide_thresh.png", thresh)
cv2.imwrite("tide_morph.png", morph)
cv2.imwrite("tide_object_contours.png", result)
# display it
cv2.imshow("thresh", thresh)
cv2.imshow("morph", morph)
cv2.imshow("result", result)
cv2.waitKey(0)
Thresholded image:
Morphology close image:
Contours on image:
Count of contours and messages:
count = 2
This image has secondary objects
Following #fmw42's advice, I did a bit of research and found a script that worked well after a little bit of tinkering:
import cv2
import numpy as np
import sys
img = cv2.imread(sys.argv[1], cv2.IMREAD_UNCHANGED)
#convert img to grey
img_grey = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
#set a thresh
thresh = 230
#get threshold image
ret,thresh_img = cv2.threshold(img_grey, thresh, 255, cv2.THRESH_BINARY_INV)
#find contours
contours, hierarchy = cv2.findContours(thresh_img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
#create an empty image for contours
# img_contours = np.zeros(img.shape)
img_contours = np.zeros_like(img)
# draw the contours on the empty image
cv2.drawContours(img_contours, contours, -1, 255, 3)
#save image
cv2.imshow('contours',img_contours)
# Wait indefinitely until you push a key. Once you do, close the windows
print len(contours)
cv2.waitKey(0)
cv2.destroyAllWindows()
My main issue was the threshold setting and I found that 230 worked best with my sample images, although it still is not perfect. I'm hoping there is a better way or something I can add to this.
this image returned 1 as expected, but my initial test image returns 3 at this threshold setting when I would expect 2. At 200 thresh it returns 2, but I was willing to compromise because the main thing I need to know is if it is more than 1.

how to detect irregular circles in python with opencv

I want to create a vision system for the detection of defect in SMD capacitors, the defect is called "pinhole" and they are small holes in the surface of the chip that are generated at the time of construction. my objective is to create an algorithm that is able of detecting these holes and with this, discard the chips that have this defect
For the moment I have created two codes:
the first one converts the original image to a binary image so that I can clear the circles, the code and the result is as follows. after obtaining the image in a binary way, I don't know how to make my algorithm detect that there is a circle, according to what I have investigated with "HOUGH" it can only be applied to images in grayscale
`
import cv2
import numpy as np
from matplotlib import pyplot as plt
img = cv2.imread("C:/Users/Johanna Menendez/Documents/UNIR/TFM/chips/20190905_124734.jpg", 0)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
newImg = cv2.resize(img, (0,0), fx=0.50, fy=0.50)
(thresh, blackAndWhiteImage) = cv2.threshold(newImg, 127, 255, cv2.THRESH_BINARY)
numpy_vertical_concat = np.concatenate((newImg, blackAndWhiteImage), axis=1)
cv2.imshow('binary', numpy_vertical_concat)
cv2.waitKey(0)
`
binary
-The second is with contours detection, the problem with this is that it not only detects circles and likewise after this I don't know how to make my algorithm detect that it is a circle
import cv2
`
import numpy as np
from matplotlib import pyplot as plt
img = cv2.imread("C:/Users/Johanna Menendez/Documents/UNIR/TFM/chips/20190905_124734.jpg", 0)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
newImg = cv2.resize(img, (0,0), fx=0.50, fy=0.50)
gray = cv2.cvtColor(newImg, cv2.COLOR_RGB2GRAY)
(thresh, blackAndWhiteImage) = cv2.threshold(gray, 130, 255, cv2.THRESH_BINARY)
(contours, hierarchy) = cv2.findContours(blackAndWhiteImage, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
img = cv2.drawContours(newImg, contours, -1, (0, 255, 0), 2)
#numpy_vertical_concat = np.concatenate((newImg, img), axis=1)
cv2.imshow('binary', img)
cv2.waitKey(0)`
contours detection
could help me find out how to know the way for my algorithm to detect the circles please?

How improve image quality to extract text from image using Tesseract

I'm trying to use Tessract in the code below to extract the two lines of the image. I tryied to improve the image quality but even though it didn't work.
Can anyone help me?
from PIL import Image, ImageEnhance, ImageFilter
import pytesseract
img = Image.open(r'C:\ocr\test00.jpg')
new_size = tuple(4*x for x in img.size)
img = img.resize(new_size, Image.ANTIALIAS)
img.save(r'C:\\test02.jpg', 'JPEG')
print( pytesseract.image_to_string( img ) )
Given the comment by #barny I don't know if this will work, but you can try the code below. I created a script that selects the display area and warps this into a straight image. Next a threshold to a black and white mask of the characters and the result is cleaned up a bit.
Try if it improves recognition. If it does, also look at the intermediate stages so you'll understand all that happens.
Update: It seems Tesseract prefers black text on white background, inverted and dilated the result.
Result:
Updated result:
Code:
import numpy as np
import cv2
# load image
image = cv2.imread('disp.jpg')
# create grayscale
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# perform threshold
retr, mask = cv2.threshold(gray_image, 190, 255, cv2.THRESH_BINARY)
# findcontours
ret, contours, hier = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# select the largest contour
largest_area = 0
for cnt in contours:
if cv2.contourArea(cnt) > largest_area:
cont = cnt
largest_area = cv2.contourArea(cnt)
# find the rectangle (and the cornerpoints of that rectangle) that surrounds the contours / photo
rect = cv2.minAreaRect(cont)
box = cv2.boxPoints(rect)
box = np.int0(box)
#### Warp image to square
# assign cornerpoints of the region of interest
pts1 = np.float32([box[2],box[3],box[1],box[0]])
# provide new coordinates of cornerpoints
pts2 = np.float32([[0,0],[500,0],[0,110],[500,110]])
# determine and apply transformationmatrix
M = cv2.getPerspectiveTransform(pts1,pts2)
tmp = cv2.warpPerspective(image,M,(500,110))
# create grayscale
gray_image2 = cv2.cvtColor(tmp, cv2.COLOR_BGR2GRAY)
# perform threshold
retr, mask2 = cv2.threshold(gray_image2, 160, 255, cv2.THRESH_BINARY_INV)
# remove noise / close gaps
kernel = np.ones((5,5),np.uint8)
result = cv2.morphologyEx(mask2, cv2.MORPH_CLOSE, kernel)
#draw rectangle on original image
cv2.drawContours(image, [box], 0, (255,0,0), 2)
# dilate result to make characters more solid
kernel2 = np.ones((3,3),np.uint8)
result = cv2.dilate(result,kernel2,iterations = 1)
#invert to get black text on white background
result = cv2.bitwise_not(result)
#show image
cv2.imshow("Result", result)
cv2.imshow("Image", image)
cv2.waitKey(0)
cv2.destroyAllWindows()

How to improve edge detection and remove background from an image?

I am using the code below to remove the backroung of the images and highlight only my region of interest (ROI), however, the algorithm behaves in a wrong way in some images, discarding the stain (ROI) and deleting along with the background.
import numpy as np
import cv2
#Read the image and perform threshold
img = cv2.imread('photo.bmp')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
blur = cv2.medianBlur(gray,5)
_,thresh = cv2.threshold(blur,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
#Search for contours and select the biggest one
contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
cnt = max(contours, key=cv2.contourArea)
#Create a new mask for the result image
h, w = img.shape[:2]
mask = np.zeros((h, w), np.uint8)
#Draw the contour on the new mask and perform the bitwise operation
cv2.drawContours(mask, [cnt],-1, 255, -1)
res = cv2.bitwise_and(img, img, mask=mask)
#Display the result
cv2.imwrite('photo.png', res)
#cv2.imshow('img', res)
cv2.waitKey(0)
cv2.destroyAllWindows()
I don't know if I understand correctly because when I run your code I do not get the output you posted (exit). If you would like to obtain only the mole it can't be done by simply thresholding because the mole is too near the border plus if you look at your image closley you will see that it has some sort of frame. However there is a simple way to do this for this image but it may not work in other cases. You can draw a fake border over your image and seperate the ROI from other noise area. Then make a threshold for which contour you wish to display. Cheers!
Example:
#Import all necessery libraries
import numpy as np
import cv2
#Read the image and perform threshold and get its height and weight
img = cv2.imread('moles.png')
h, w = img.shape[:2]
# Transform to gray colorspace and blur the image.
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray,(5,5),0)
# Make a fake rectangle arround the image that will seperate the main contour.
cv2.rectangle(blur, (0,0), (w,h), (255,255,255), 10)
# Perform Otsu threshold.
_,thresh = cv2.threshold(blur,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
# Create a mask for bitwise operation
mask = np.zeros((h, w), np.uint8)
# Search for contours and iterate over contours. Make threshold for size to
# eliminate others.
_, contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
for i in contours:
cnt = cv2.contourArea(i)
if 1000000 >cnt > 100000:
cv2.drawContours(mask, [i],-1, 255, -1)
# Perform the bitwise operation.
res = cv2.bitwise_and(img, img, mask=mask)
# Display the result.
cv2.imwrite('mole_res.jpg', res)
cv2.imshow('img', res)
cv2.waitKey(0)
cv2.destroyAllWindows()
Result:

Categories