Seeds segmentation in OpenCV/Python - python

I know that this topic has been covered before but I'm stuck.
I'm quite new and I trying to segment a seeds using Python and OpenCV
Here is my steps
And now I'm stuck on how to separate the 3 seeds in the center of the image. Then I use the distance transformation, but the result is not satisfactory (why it is hardly visible?). I don't know how to proceed further. I would like to achive something like this.
Distance transform
In more circular object dt works well but not in my case.
My code:
import cv2
from google.colab.patches import cv2_imshow
import numpy as np
img = cv2.imread('connected_seeds.jpg')
img = cv2.resize(img,(600,400))
# Gray scale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Blur image
blur_img = cv2.medianBlur(gray, 7)
# Thresholding
ret, thresh = cv2.threshold(blur_img, 0, 255, cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
# Closing
kernel = np.ones((5,5), np.uint8)
closing = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel, iterations=2)
sure_bg = cv2.dilate(closing,kernel,iterations=2)
# Distance transform
dist_transform = cv2.distanceTransform(closing, cv2.DIST_L2, 5)
ret, sure_fg = cv2.threshold(dist_transform,0.5*dist_transform.max(),255,0)
here is the article I'm following:
https://learning.rc.virginia.edu/notes/opencv/

Related

OpenCV Contour Detection in color

I'm trying to detect the optic disc in an image of the back of the eye using OpenCV and findContour, then fitEllipse, but my program isn't detecting the optic disc at all. How do I fix this? Code and images are below
import cv2
import numpy as np
from sklearn.linear_model import LinearRegression
import math
from decimal import Decimal
def find_elongation(image):
img = cv2.imread(image,0)
ret,thresh = cv2.threshold(img,127,255,0)
contour,hierarchy = cv2.findContours(thresh, 1, 2)
contours = []
for i in range(len(contour)):
if len(contour[i])>=5:
contours.append(contour[i])
cnt = contours[0]
k = cv2.isContourConvex(cnt)
ellipse = cv2.fitEllipse(cnt)
im = cv2.ellipse(img,ellipse,(0,255,0),2)
(x,y),(ma,Ma),angle = cv2.fitEllipse(cnt)
return Ma/ma
print(find_elongation('eye.png'))
print(find_elongation('eye2.png'))
print(find_elongation('eye3.png'))
Image (one of them):
I'm trying to get the brightly colored circle in the middle:
Thanks for the help!
I have developed a piece of code to implement what you have asked. It mainly uses de Value channel of the HSV color space followed by some morphological operations.
import cv2
import numpy as np
import matplotlib.pyplot as plt
# Read the image
img = cv2.imread('so.png')
# Transform the image to HSV color-space and keep only the value channel
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
h,s,v = cv2.split(hsv)
# Threshold the iamge with the 95% of the brightest possible pixels
maxVal = np.max(v)
per = 0.95
_, th = cv2.threshold(v, maxVal*per, 255, cv2.THRESH_BINARY)
# Erode the image and find the connected components
th = cv2.erode(th, np.ones((2,2), np.uint8))
n, conComp, stats, centroids = cv2.connectedComponentsWithStats(th)
# Obtain the sizes of the connectedComponents skipping the background
sizes = stats[1:,-1]
# Obtain the number of the connectedComponent with biggest size
nComp = np.argmax(sizes) + 1
# Isolate the connectedComponent and apply closing
out = np.zeros((img.shape[0], img.shape[1]), np.uint8)
out[conComp==nComp] = 1
out = cv2.morphologyEx(out, cv2.MORPH_CLOSE, np.ones((10,10)))
# Apply gradient to mask to obtain the border of the ellipse
out = cv2.morphologyEx(out, cv2.MORPH_GRADIENT, np.ones((2,2)))
# Join the border of the ellipse with the image to display it
img[out==1] = (0,0,0)
plt.imshow(cv2.cvtColor(img,cv2.COLOR_BGR2RGB))
plt.show()
I attach the output I have obtained with the picture you posted:

Detection and segmentation of region of interest from X-ray images

I am working on some Xray images and I want to detect and segment region of interest from the image.
Consider input image
I want to detect square like shapes in the image, as highlighted in the image
Output: region of interest will somehow look like this
This is my code which I have done so far
import cv2
import numpy as np
import pandas as pd
import os
from PIL import Image
import matplotlib.pyplot as plt
from skimage.io import imread, imshow
img = cv2.imread('image.jpg',0)
imshow(img)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
imshow(gray)
equ = cv2.equalizeHist(img)
imshow(equ)
img_resize = cv2.resize(img, (300, 300))
print(img_resize.shape)
figure_size = 9
new_image_gauss = cv2.GaussianBlur(img_resize, (figure_size, figure_size),0)
imshow(new_image_gauss)
img_edge = cv2.Canny(equ,100,200)
# show the image edges on the newly created image window
imshow(img_edge)
kernel = np.ones((5,5), np.uint8)
img_erosion = cv2.erode(img_edge, kernel, iterations=1)
img_dilation = cv2.dilate(img_edge, kernel, iterations=1)
imshow(img_erosion)
Results which I have got;
Please guide me.
TIA
One thing that might help is to do morphology gradient on your image to emphasize the edges in Python OpenCV before you do your Canny edge detection.
Input:
import cv2
import numpy as np
# read image
img = cv2.imread("xray2.jpg")
# convert img to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# do morphology gradient
kernel = cv2.getStructuringElement(cv2.MORPH_RECT , (3,3))
morph = cv2.morphologyEx(gray, cv2.MORPH_GRADIENT, kernel)
# apply gain
morph = cv2.multiply(morph, 5)
# write results
cv2.imwrite("xray2_gradient_edges.jpg",morph)
# show lines
cv2.imshow("morph", morph)
cv2.waitKey(0)

how to detect irregular circles in python with opencv

I want to create a vision system for the detection of defect in SMD capacitors, the defect is called "pinhole" and they are small holes in the surface of the chip that are generated at the time of construction. my objective is to create an algorithm that is able of detecting these holes and with this, discard the chips that have this defect
For the moment I have created two codes:
the first one converts the original image to a binary image so that I can clear the circles, the code and the result is as follows. after obtaining the image in a binary way, I don't know how to make my algorithm detect that there is a circle, according to what I have investigated with "HOUGH" it can only be applied to images in grayscale
`
import cv2
import numpy as np
from matplotlib import pyplot as plt
img = cv2.imread("C:/Users/Johanna Menendez/Documents/UNIR/TFM/chips/20190905_124734.jpg", 0)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
newImg = cv2.resize(img, (0,0), fx=0.50, fy=0.50)
(thresh, blackAndWhiteImage) = cv2.threshold(newImg, 127, 255, cv2.THRESH_BINARY)
numpy_vertical_concat = np.concatenate((newImg, blackAndWhiteImage), axis=1)
cv2.imshow('binary', numpy_vertical_concat)
cv2.waitKey(0)
`
binary
-The second is with contours detection, the problem with this is that it not only detects circles and likewise after this I don't know how to make my algorithm detect that it is a circle
import cv2
`
import numpy as np
from matplotlib import pyplot as plt
img = cv2.imread("C:/Users/Johanna Menendez/Documents/UNIR/TFM/chips/20190905_124734.jpg", 0)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
newImg = cv2.resize(img, (0,0), fx=0.50, fy=0.50)
gray = cv2.cvtColor(newImg, cv2.COLOR_RGB2GRAY)
(thresh, blackAndWhiteImage) = cv2.threshold(gray, 130, 255, cv2.THRESH_BINARY)
(contours, hierarchy) = cv2.findContours(blackAndWhiteImage, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
img = cv2.drawContours(newImg, contours, -1, (0, 255, 0), 2)
#numpy_vertical_concat = np.concatenate((newImg, img), axis=1)
cv2.imshow('binary', img)
cv2.waitKey(0)`
contours detection
could help me find out how to know the way for my algorithm to detect the circles please?

Best way to separate rock fragments in image

I'm trying to make a program to calculate the size distribution of rock fragments based on images from a conveyor belt. I can only achieve that by image segmentation and labeling. Unfortunately, my results are not satisfactory because the program has a problem with dark spots on rock fragments. These spots are incorrectly recognized as edges of the fragments (e.g in the left top corner program found a lot of small fragments, where in reality this is just one big fragment. Did anyone deal with a problem like that?
What I tried until now:
Canny edge detector
thresholding with various parameters
findContours in opencv
sharpening / bilateral filtering
meijering, sato, frangi, hessian ridge detectors from skimage.filters - I'm not sure if I used them in a proper way - maybe someone has more experience with these and know how to find parameters that would fit.
opening/closing (dilate, erode) with different kernels
current approach: opening -> finding background area -> finding foreground area -> subtracting background and foreground area -> watershed.
code for the current approach:
import numpy as np
import cv2
from skimage import data, util, filters, color, measure
from skimage.segmentation import watershed, clear_border
import matplotlib.pyplot as plt
from skimage import img_as_ubyte
name = 'cut.png'
img = cv2.imread(name, 0)
img_color_org = cv2.imread(name)
ret, thresh = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
kernel2 = np.ones((2, 2), np.uint8)
kernel3 = np.ones((3, 3), np.uint8)
opening = cv2.morphologyEx(
thresh, cv2.MORPH_OPEN, kernel2, iterations=1)
sure_bg = cv2.dilate(opening, kernel3, iterations=3)
dist_transform = cv2.distanceTransform(opening, cv2.DIST_L2, 3)
ret2, sure_fg = cv2.threshold(
dist_transform, 0.15*dist_transform.max(), 255, 0)
sure_fg = np.uint8(sure_fg)
unknown = cv2.subtract(sure_bg, sure_fg)
ret3, markers = cv2.connectedComponents(sure_fg)
markers = markers + 10
markers[unknown == 255] = 0
markers = cv2.watershed(img_color_org, markers)
img_color_org[markers == -1] = [0, 0, 255]
img2 = color.label2rgb(markers, bg_label=0)
cv2.imwrite('img.png', img_color_org)
cv2.waitKey(0)
Original Image
Image after background subtraction, and before watershed operation
Watershed applied to the original image
hand-made segmentation with desired edges
Original image with better quality
Any help will be greatly appreciated!

Middle line between 2 contour lines in opencv python?

I have an image
After my code runs,
The new image is
I need to find the line between them like this
How do I do?
My code
import numpy as np
import cv2
import cv2 as cv
ima = cv2.imread('track1.pNg')
imgray = cv2.cvtColor(ima,cv2.COLOR_BGR2GRAY)
im = cv2.cvtColor(ima,cv2.COLOR_BGR2GRAY)
imm = cv2.inRange(im,(0),(49))
kernel = np.ones((5,5),np.uint8)
gradient = cv2.morphologyEx(imm, cv2.MORPH_GRADIENT, kernel)
il = cv2.dilate(gradient, kernel, iterations=7)
ol = cv2.erode(il, kernel, iterations=7)
contours,hei = cv2.findContours(ol,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
img = cv2.drawContours(ima, contours, -1, (200,255,0), 3)
cv2.imshow('window',ima)
How can i achieve this?
This answer explains how to find a line that runs between two sides of a shape. The center can be found by iteratively eroding the image.
This is the result:
This is the code I used:
import cv2
import numpy as np
img = 255-cv2.imread('/home/stephen/Desktop/PITqe.png',0)
kernel = np.ones((20,20), np.uint8)
img = cv2.erode(img, kernel, iterations=2)
size = np.size(img)
skel = np.zeros(img.shape,np.uint8)
ret,img = cv2.threshold(img,127,255,0)
element = cv2.getStructuringElement(cv2.MORPH_CROSS,(3,3))
done = False
while( not done):
eroded = cv2.erode(img,element)
temp = cv2.dilate(eroded,element)
temp = cv2.subtract(img,temp)
skel = cv2.bitwise_or(skel,temp)
img = eroded.copy()
zeros = size - cv2.countNonZero(img)
cv2.imshow('img', img)
cv2.waitKey(100)
if zeros==size:
done = True
cv2.imshow("img",skel)
cv2.waitKey(0)
cv2.destroyAllWindows()
Here is another way to skeletonize in OpenCV (without explicitly iterating) by using distance transform and top hat morphology.
Input:
import cv2
import numpy as np
# read image and invert so blob is white on black background
img = 255-cv2.imread('tall_blob.png',0)
# do some eroding of img, but not too much
kernel = np.ones((20,20), np.uint8)
img = cv2.erode(img, kernel, iterations=2)
# threshold img
ret, thresh = cv2.threshold(img,127,255,0)
# do distance transform
dist = cv2.distanceTransform(thresh, distanceType=cv2.DIST_L2, maskSize=5)
# set up cross for tophat skeletonization
kernel = cv2.getStructuringElement(cv2.MORPH_CROSS,(3,3))
skeleton = cv2.morphologyEx(dist, cv2.MORPH_TOPHAT, kernel)
# threshold skeleton
ret, skeleton = cv2.threshold(skeleton,0,255,0)
# display skeleton
cv2.imshow("skeleton",skeleton)
cv2.waitKey(0)
cv2.destroyAllWindows()
# save results
cv2.imwrite('tall_blob_skeleton.png', skeleton)

Categories