How do i recongize artifacts and holes using OpenCV in a image? - python

I need help with OpenCV
I have a picture with a complex for lying on the ground now i need to extract this form from the picture and cleaned it from noise. But now there is a logo which i need to remove and 4 holes to identify.What i could do Original image
My code so far:
import cv2
import numpy as np
# Read the original image
img = cv2.imread('Amoebe_1.jpg')
# resize image
scale_down = 0.4
img = cv2.resize(img, None, fx= scale_down, fy= scale_down, interpolation= cv2.INTER_LINEAR)
# Display original image
cv2.imshow('Original', img)
cv2.waitKey(0)
# Denoising
dst = cv2.fastNlMeansDenoisingColored(img,None,20,10,10,21)
# Canny Edge Detection
edges = cv2.Canny(image=dst, threshold1=100, threshold2=200) # Canny Edge Detection
# Contour Detection
contours1, hierarchy1 = cv2.findContours(edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# draw contours on the original image for `CHAIN_APPROX_SIMPLE`
image_copy1 = img.copy()
cv2.drawContours(image_copy1, contours1, -1, (0, 255, 0), 2, cv2.LINE_AA)
# see the results
cv2.imshow('Simple approximation', image_copy1)
# Display Canny Edge Detection Image
cv2.imshow('Canny Edge Detection', edges)
cv2.waitKey(0)
#Floodfill
h,w,chn = img.shape
seed = (w/2,h/2)
mask = np.zeros((h+2,w+2),np.uint8)
bucket = edges.copy()
cv2.floodFill(bucket, mask, (0,0), (255,255,255))
cv2.imshow('Mask', bucket)
cv2.waitKey(0)
cv2.destroyAllWindows()

Having a shot at this in ImageJ, extracting the red channel from the raw image gives me this:
Which is close to a binary image already. Running a small (3pix) median filter and thresholding gives this as a binary:
Running cv.findContours() on that last one and analysing contour areas should give you the little holes and the "eye". Use cv.drawContours() with the bigger objects to fill up the eye and logo area, maybe dilate() to fill small discrepancies.

Related

Get number of pixels of each object in image

As shown in the images, I have a masked MRI in which certain muscles have been segmented out. I am trying to get the area of these muscles in pixels. Currently, I can get all the non-zero pixels in the image using np.count_nonzero(result), however, I am looking to get the individual areas of each left and right muscle, not just the total.
I know I can manually crop/select an ROI of each area, but I do not want to do this for 100s of images. Also not all muscles are cleanly on each side as seen by the bottom image (cannot just split the image in half). The images are represented as 2D arrays.
Thank you!
You can use OpenCV Library to get the number of pixels for each object in the image.
import cv2 # OpenCV library
# Read the image
image = cv2.imread(r'D:\image_sample.jpg')
# Convert the image to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Get the mask (predefined threshold)
mask = cv2.inRange(gray, 200, 255)
#Find all Objects contour's
contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
for i in range(len(contours)):
m=max(contours[i][0])
area = cv2.contourArea(contours[i])
print(f"Area{i+1} = {area} pixels")
image = cv2.drawContours(image, contours, i, (0,255,255*i), -1)
img_text = cv2.putText(image, str(area), m, cv2.FONT_HERSHEY_PLAIN, 1, (255,0,255), 1, cv2.LINE_AA, False)
#Show the resut
cv2.imshow('image with contours', image)
cv2.waitKey()

How to enclose the irregular figure contour and fill it with with 5px dots in opencv using python?

This is my
I want to get this
but the problem is I am not able to enclose the contour and how should I add these dots?
Does Open cv have any such function to handle this?
So basically,
The first problem is how to enclose this image
Second, how to add Dots.
Thank you
Here is one way to do that in Python/OpenCV. However, I cannot close your dotted outline without connecting separate regions. But it will give you some idea how to proceed with most of what you want to do.
If you manually add a few more dots to your input image where there are large gaps, then the morphology kernel can be made smaller such that it can connected the regions without merging separate parts that should remain isolated.
Read the input
Convert to grayscale
Threshold to binary
Apply morphology close to try to close the dotted outline. Unfortunately it connected separate regions.
Get the external contours
Draw white filled contours on a black background as a mask
Draw a single black circle on a white background
Tile out the circle image to the size of the input
Mask the tiled circle image with the filled contour image
Save results
Input:
import cv2
import numpy as np
import math
# read input image
img = cv2.imread('island.png')
hh, ww = img.shape[:2]
# convert img to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# threshold
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY)[1]
# use morphology to close figure
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (35,35))
morph = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel, )
# find contours and bounding boxes
mask = np.zeros_like(thresh)
contours = cv2.findContours(morph, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = contours[0] if len(contours) == 2 else contours[1]
for cntr in contours:
cv2.drawContours(mask, [cntr], 0, 255, -1)
# create a single tile as black circle on white background
circle = np.full((11,11), 255, dtype=np.uint8)
circle = cv2.circle(circle, (7,7), 3, 0, -1)
# tile out the tile pattern to the size of the input
numht = math.ceil(hh / 11)
numwd = math.ceil(ww / 11)
tiled_circle = np.tile(circle, (numht,numwd))
tiled_circle = tiled_circle[0:hh, 0:ww]
# composite tiled_circle with mask
result = cv2.bitwise_and(tiled_circle, tiled_circle, mask=mask)
# save result
cv2.imwrite("island_morph.jpg", morph)
cv2.imwrite("island_mask.jpg", mask)
cv2.imwrite("tiled_circle.jpg", tiled_circle)
cv2.imwrite("island_result.jpg", result)
# show images
cv2.imshow("morph", morph)
cv2.imshow("mask", mask)
cv2.imshow("tiled_circle", tiled_circle)
cv2.imshow("result", result)
cv2.waitKey(0)
Morphology connected image:
Contour Mask image:
Tiled circles:
Result:

How to select the bubbles inside the droplet correctly

This is Wendy.Image processing related question.
I have to select the bubbles properly to analyze its size and calculate the velocity among image sequences.
original image
enter image description here
image sequences
enter image description here//enter image description here//enter image description here
In the picture,the largest shape is a droplet,and inside the droplet contains lots of tiny bubbles.
So far, the questions are:
1.Because the shape of bubbles are broken, few contours are detected. At the same time, lots of edges detected. How can I fill out the edges to be contours ( like a coin, I just want the outline of each bubble and don't want to get the lines inside bubbles)
current progress
enter image description here
enter image description here
code(detect contours)
gray=cv2.imread("1000.tif")
blurred = cv2.GaussianBlur(gray, (3,3), 0)
edged = cv2.Canny(blurred, 5, 60)
cv2.imshow('edged ', edged )
cv2.waitKey(0)
contours, hierarchy = cv2.findContours(edged,cv2.RETR_EXTERNAL,cv2.RETR_LIST)
img = gray.copy()
cv2.drawContours(img,contours,-1,(0,255),1)
cv2.imwrite("contours.tif", img)
cv2.imshow("contours", img)
cv2.waitKey(0)
code(fill holes)
im_floodfill = edged.copy()
h, w = edged.shape[:2]
mask = np.zeros((h+2, w+2), np.uint8)
cv2.floodFill(im_floodfill, mask, (0,0), 255)
im_floodfill_inv = cv2.bitwise_not(im_floodfill)
im_out = edged | im_floodfill_inv
cv2.imshow("Thresholded Image", edged)
cv2.imshow("Floodfilled Image", im_floodfill)
cv2.imshow("Inverted Floodfilled Image", im_floodfill_inv)
cv2.imshow("Foreground", im_out)
cv2.waitKey(0)
cv2.imwrite("fill.jpg", im_out)
the result will be
enter image description here
The velocity of the bubbles is my objective. I've tried to put the binary image sequences into Iamgej and used the "trackmate" (a tool for automated, and semi-automated particle tracking). It has a fixed bubble size setting to track, however, the size of the bubbles is quite different in my case, so is there any method to track bubbles moving?
If this isn't clear enough please let me know and I can try and be more specific. Thank you for your time.
Here is one way in Python/OpenCV.
Read the input
Convert to gray
Threshold
Fill the holes with morphology close
Get the contours and the largest contour (though presumably just one)
Draw the contour on a copy of the input
Save the results
Input:
import cv2
import numpy as np
# read image
img = cv2.imread('vapor.jpg')
# convert to grayscale
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# threshold
#thresh = cv2.threshold(gray,5,255,cv2.THRESH_BINARY)[1]
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)[1]
# apply close morphology to fill white circles
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (45,45))
thresh_cleaned = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)
# get largest contour
contours = cv2.findContours(thresh_cleaned, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours = contours[0] if len(contours) == 2 else contours[1]
big_contour = max(contours, key=cv2.contourArea)
# draw contour
result = img.copy()
cv2.drawContours(result,[big_contour],0,(0,0,255),1)
# save image with points drawn
cv2.imwrite('vapor_thresh.jpg',thresh)
cv2.imwrite('vapor_cleaned_thresh.jpg',thresh_cleaned)
cv2.imwrite('vapor_contour.jpg',result)
cv2.imshow("thresh", thresh)
cv2.imshow("thresh_cleaned", thresh_cleaned)
cv2.imshow("contour", result)
cv2.waitKey(0)
cv2.destroyAllWindows()
Threshold image:
Morphology filled image:
Contour image:

Extract building edges from map image using Python

I got a map image here.
I need to extract the edges of buildings for further process, the result would be like step 2 for the post here.
Since I am not familiar with this field, can this be done by libraries such as OpenCV?
Seems you want to select individual buildings, so I used color separation. The walls are darker, which makes for good separation in the HSV colorspace. Note that the final result can be improved by zooming in more and/or by using an imagetype with less compression, such as PNG.
Select walls
First I determined good values for separation. For that I used this script. I found that the best result would be to separate the yellow and the gray separately and then combine the resulting masks. Not all walls closed perfectly, so I improved the result by closing the mask a bit. The result is a mask that displays all walls:
Left to right: Yellow mask, Gray mask, Combined and solidified mask
Find buildings
Next I used findCountours to separate out buildings. Since the wall contours will probably not be very useful (as walls are interconnected), I used the hierarchy to find the 'lowest' contours (that have no other contours inside of them). These are the buildings.
Result of findContours: the outline of all contours in green, the outline of individual buildings in red
Note that buildings on the edge are not detected. This is because using this technique they are not a separate contour, but part of the exterior of the image. This can be solve this by drawing a rectangle in gray on the border of the image. You may not want this in your final application, but I included it in case you do.
Code:
import cv2
import numpy as np
#load image and convert to hsv
img = cv2.imread("fLzI9.jpg")
# draw gray box around image to detect edge buildings
h,w = img.shape[:2]
cv2.rectangle(img,(0,0),(w-1,h-1), (50,50,50),1)
# convert image to HSV
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
# define color ranges
low_yellow = (0,28,0)
high_yellow = (27,255,255)
low_gray = (0,0,0)
high_gray = (179,255,233)
# create masks
yellow_mask = cv2.inRange(hsv, low_yellow, high_yellow )
gray_mask = cv2.inRange(hsv, low_gray, high_gray)
# combine masks
combined_mask = cv2.bitwise_or(yellow_mask, gray_mask)
kernel = np.ones((3,3), dtype=np.uint8)
combined_mask = cv2.morphologyEx(combined_mask, cv2.MORPH_DILATE,kernel)
# findcontours
contours, hier = cv2.findContours(combined_mask,cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# find and draw buildings
for x in range(len(contours)):
# if a contour has not contours inside of it, draw the shape filled
c = hier[0][x][2]
if c == -1:
cv2.drawContours(img,[contours[x]],0,(0,0,255),-1)
# draw the outline of all contours
for cnt in contours:
cv2.drawContours(img,[cnt],0,(0,255,0),2)
# display result
cv2.imshow("Result", img)
cv2.waitKey(0)
cv2.destroyAllWindows()
Result:
With buildings drawn solid red and all contours as green overlay
Here's a simple approach
Convert image to grayscale and Gaussian blur to smooth edges
Threshold image
Perform Canny edge detection
Find contours and draw contours
Threshold image using cv2.threshold()
Perform Canny edge detection with cv2.Canny()
Find contours using cv2.findContours() and cv2.drawContours()
import cv2
image = cv2.imread('1.jpg')
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (3, 3), 0)
thresh = cv2.threshold(blurred, 240 ,255, cv2.THRESH_BINARY_INV)[1]
canny = cv2.Canny(thresh, 50, 255, 1)
cnts = cv2.findContours(canny, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
for c in cnts:
cv2.drawContours(image,[c], 0, (36,255,12), 2)
cv2.imshow('thresh', thresh)
cv2.imshow('canny', canny)
cv2.imshow('image', image)
cv2.imwrite('thresh.png', thresh)
cv2.imwrite('canny.png', canny)
cv2.imwrite('image.png', image)
cv2.waitKey(0)

How to discard the edges of an image using opencv?

I'm pre-processing some images in order to remove the background from my area of ​​interest. However, the images on my bench have rounded edges due to the focus of the camera. How do I discard these rounded edges and be able to remove only my object of interest from the image? The code below I can remove the background of the image, but it does not work right due to the edges around.
import numpy as np
import cv2
#Read the image and perform threshold and get its height and weight
img = cv2.imread('IMD408.bmp')
h, w = img.shape[:2]
# Transform to gray colorspace and blur the image.
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray,(5,5),0)
# Make a fake rectangle arround the image that will seperate the main contour.
cv2.rectangle(blur, (0,0), (w,h), (255,255,255), 10)
# Perform Otsu threshold.
_,thresh = cv2.threshold(blur,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
# Create a mask for bitwise operation
mask = np.zeros((h, w), np.uint8)
# Search for contours and iterate over contours. Make threshold for size to
# eliminate others.
contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
for i in contours:
cnt = cv2.contourArea(i)
if 1000000 >cnt > 100000:
cv2.drawContours(mask, [i],-1, 255, -1)
# Perform the bitwise operation.
res = cv2.bitwise_and(img, img, mask=mask)
# Display the result.
cv2.imwrite('IMD408.png', res)
cv2.imshow('img', res)
cv2.waitKey(0)
cv2.destroyAllWindows()
input image:
Exit:
Error:
Since you mentioned that all the images have the same hue, then this should work well for them. Steps is to do some white balancing which will increase the contrast a bit.
Get the greyscale.
Threshold the grayscale image. Values less than 127 are set to 255 (white). This will give you a binary image, which will become a mask for the original image.
Apply the mask
You might have to play around with the thresholding if you want better results, here is the link for that. But this should get you started. I'm using a different OpenCV version compared to you might have to tweak the code a bit.
import cv2
def equaliseWhiteBalance(image):
''' Return equilised WB of an image '''
wb = cv2.xphoto.createSimpleWB() #Create WB Object
imgWB = wb.balanceWhite(img) #Balance White on image
r,g,b = cv2.split(imgWB) #Get individual r,g,b channels
r_equ = cv2.equalizeHist(r) #Equalise RED channel
g_equ = cv2.equalizeHist(g) #Equalise GREEN channel
b_equ = cv2.equalizeHist(b) #Equalise BLUE channel
img_equ_WB = cv2.merge([r_equ,g_equ,b_equ]) #Merge equalised channels
return imgWB
#Read the image
img = cv2.imread('IMD408.bmp')
result = img.copy()
#Get whiteBalance of image
imgWB = equaliseWhiteBalance(img)
cv2.imshow('img', imgWB)
cv2.waitKey(0)
# Get gray image
gray = cv2.cvtColor(imgWB,cv2.COLOR_RGB2GRAY)
cv2.imshow('img', gray)
cv2.waitKey(0)
# Perform threshold
_, thresh = cv2.threshold(gray,127,255,cv2.THRESH_BINARY)
cv2.imshow('img', thresh)
cv2.waitKey(0)
# Apply mask
result[thresh!=0] = (255,255,255)
cv2.imshow('img', result)
cv2.waitKey(0)
If all the dark corner vignettes have different sizes per image, then I suggest looking for centroid of contours on the binary (mask) image. Centroids with a 'short' distance to any corner of your image will be the dark vignettes, so their value can be changed from black to white.

Categories