Python: how could this image be properly segmented? - python

I would like to segment (isolate) the rod-like structures shown in this image:
The best I've managed to do is this
# Imports the libraries.
from skimage import io, filters
import matplotlib.pyplot as plt
import numpy as np
# Imports the image as a numpy array.
img = io.imread('C:/Users/lopez/Desktop/Test electron/test.tif')
# Thresholds the images using a local threshold.
thresh = filters.threshold_local(img,301,offset=0)
binary_local = img > thresh # Thresholds the image
binary_local = np.invert(binary_local) # inverts the thresholded image (True becomes False and vice versa).
# Shows the image.
plt.figure(figsize=(10,10))
plt.imshow(binary_local,cmap='Greys')
plt.axis('off')
plt.show()
Which produces this result
However, as you can see from the segmented image, I haven't managed to isolate the rods. What should be black background is filled with interconnected structures. Is there a way to neatly isolate the rod-like structures from all other elements in the image?
The original image can be downloaded from this website
https://dropoff.nbi.ac.uk/pickup.php
Claim ID: qMNrDHnfEn4nPwB8
Claim Passcode: UkwcYoYfXUfeDto8

Here is my attempt using a Meijering filter. The Meijering filter relies on symmetry when it looks for tubular structures and hence the regions where rods overlap (breaking the symmetry of the tubular shape) are not that well recovered, as can be seen in the overlay below.
Also, there is some random crap that I have trouble getting rid off digitally, but maybe you can clean your prep a bit more before imaging.
#!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
from skimage.io import imread
from skimage.transform import rescale
from skimage.restoration import denoise_nl_means
from skimage.filters import meijering
from skimage.measure import label
from skimage.color import label2rgb
def remove_small_objects(binary_mask, size_threshold):
label_image = label(binary_mask)
object_sizes = np.bincount(label_image.ravel())
labels2keep, = np.where(object_sizes > size_threshold)
labels2keep = labels2keep[1:] # remove the first label, which corresponds to the background
clean = np.in1d(label_image.ravel(), labels2keep).reshape(label_image.shape)
return clean
if __name__ == '__main__':
raw = imread('test.tif')
raw -= raw.min()
raw /= raw.max()
# running everything on the large image took too long for my patience;
raw = rescale(raw, 0.25, anti_aliasing=True)
# smooth image while preserving edges
smoothed = denoise_nl_means(raw, h=0.05, fast_mode=True)
# filter for tubular shapes
sigmas = range(1, 5)
filtered = meijering(smoothed, sigmas=sigmas, black_ridges=False)
# Meijering filter always evaluates to high values at the image frame;
# we hence set the filtered image to zero at those locations
frame = np.ones_like(filtered, dtype=np.bool)
d = 2 * np.max(sigmas) + 1 # this is the theoretical minimum ...
d += 2 # ... but doesn't seem to be enough so we increase d
frame[d:-d, d:-d] = False
filtered[frame] = np.min(filtered)
thresholded = filtered > np.percentile(filtered, 80)
cleaned = remove_small_objects(thresholded, 200)
overlay = raw.copy()
overlay[np.invert(cleaned)] = overlay[np.invert(cleaned)] * 2/3
fig, axes = plt.subplots(2, 3, sharex=True, sharey=True)
axes = axes.ravel()
axes[0].imshow(raw, cmap='gray')
axes[1].imshow(smoothed, cmap='gray')
axes[2].imshow(filtered, cmap='gray')
axes[3].imshow(thresholded, cmap='gray')
axes[4].imshow(cleaned, cmap='gray')
axes[5].imshow(overlay, cmap='gray')
for ax in axes:
ax.axis('off')
fig, ax = plt.subplots()
ax.imshow(overlay, cmap='gray')
ax.axis('off')
plt.show()
If this code makes it into a paper, I want an acknowledgement and a copy of the paper. ;-)

Related

How to show the Gaussian mixture models for clustering an image?

I used the attached code to get the GMM for some images. I also want to show the GMM on the histogram of the image. I already did that. However, I also wanna show the GMM clusters distribution. I attached the output of the GMM on the histogram and another image of what I wanna get.
Thanksenter image description here
# Code for GMM
import os
import matplotlib.pyplot as plt
import numpy as np
import cv2
img = cv2.imread("test.jpg")
#Convert MxNx3 image into Kx3 where K=MxN
img2 = img.reshape((-1,3)) #-1 reshape means, in this case MxN
from sklearn.mixture import GaussianMixture as GMM
#covariance choices, full, tied, diag, spherical
gmm_model = GMM(n_components=6, covariance_type='full').fit(img2) #tied works better than full
gmm_labels = gmm_model.predict(img2)
#Put numbers back to original shape so we can reconstruct segmented image
original_shape = img.shape
segmented = gmm_labels.reshape(original_shape[0], original_shape[1])
cv2.imwrite("test_segmented.jpg", segmented)
gmm_model.means_
gmm_model.covariances_
gmm_model.weights_
print(gmm_model.means_, gmm_model.covariances_, gmm_model.weights_)
data = img2.ravel()
data = data[data != 0]
data = data[data != 1] #Removes background pixels (intensities 0 and 1)
gmm = GMM(n_components = 6)
gmm = gmm.fit(X=np.expand_dims(data,1))
gmm_x = np.linspace(0,255,256)
gmm_y = np.exp(gmm.score_samples(gmm_x.reshape(-1,1)))
#Plot histograms and gaussian curves
fig, ax = plt.subplots()
ax.hist(img.ravel(),255,[2,256], density=True, stacked=True)
ax.plot(gmm_x, gmm_y, color="crimson", lw=2, label="GMM")
ax.set_ylabel("Frequency")
ax.set_xlabel("Pixel Intensity")
plt.legend()
plt. grid(False)
plt.show()

how to take a grey scale numpy image and tint it red

I have a 2D grey scale image, loaded using imread.0
I want to colourise it.
whats the best way to use numpy/skimage/python to achieve this?
It will depend a bit on the exact format of your input. But the basic procedure should be as simple as:
>>> import numpy as np
>>> from skimage import data, io
>>>
# an example grey scale image
>>> grey = data.coins()
# a helper for convenient channel (RGB) picking
>>> RGB = np.array((*"RGB",))
# the actual coloring can be written as an outer product
>>> red = np.multiply.outer(grey, RGB=='R')
# save for posterity
>>> io.imsave('red.png', red)
if this is a single channel image you could convert it to a "redscale" image by doing something like this:
zero_channel = np.zeros_like(greyscale_array)
redscale = np.stack([greyscale_array, zero_channel, zero_channel], axis=2)
without fully understanding the shape of your array it's difficult to answer though
import matplotlib.pyplot as plt
from skimage import color
from skimage import img_as_float
from PIL import Image
jpgfile = Image.open("pp.jpg")
grayscale_image = img_as_float(jpgfile)
image = color.gray2rgb(grayscale_image)
red_multiplier = [1, 0, 0]
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(8, 4),
sharex=True, sharey=True)
ax1.imshow(red_multiplier * image)
plt.show()

How to compute the gradients of image using Python

I wonder how to use Python to compute the gradients of the image. The gradients include x and y direction. I want to get an x gradient map of the image and a y gradient map of the image. Can anyone tell me how to do this?
Thanks~
I think you mean this:
import numpy as np
from scipy import ndimage
import matplotlib.pyplot as plt
# Create a black image
img=np.zeros((640,480))
# ... and make a white rectangle in it
img[100:-100,80:-80]=1
# See how it looks
plt.imshow(img,cmap=plt.cm.gray)
plt.show()
# Rotate it for extra fun
img=ndimage.rotate(img,25,mode='constant')
# Have another look
plt.imshow(img,cmap=plt.cm.gray)
plt.show()
# Get x-gradient in "sx"
sx = ndimage.sobel(img,axis=0,mode='constant')
# Get y-gradient in "sy"
sy = ndimage.sobel(img,axis=1,mode='constant')
# Get square root of sum of squares
sobel=np.hypot(sx,sy)
# Hopefully see some edges
plt.imshow(sobel,cmap=plt.cm.gray)
plt.show()
Or you can define the x and y gradient convolution kernels yourself and call the convolve() function:
# Create a black image
img=np.zeros((640,480))
# ... and make a white rectangle in it
img[100:-100,80:-80]=1
# Define kernel for x differences
kx = np.array([[1,0,-1],[2,0,-2],[1,0,-1]])
# Define kernel for y differences
ky = np.array([[1,2,1] ,[0,0,0], [-1,-2,-1]])
# Perform x convolution
x=ndimage.convolve(img,kx)
# Perform y convolution
y=ndimage.convolve(img,ky)
sobel=np.hypot(x,y)
plt.imshow(sobel,cmap=plt.cm.gray)
plt.show()
you can use opencv to compute x and y gradients as below:
import numpy as np
import cv2
img = cv2.imread('Desert.jpg')
kernely = np.array([[1,1,1],[0,0,0],[-1,-1,-1]])
kernelx = np.array([[1,0,-1],[1,0,-1],[1,0,-1]])
edges_x = cv2.filter2D(img,cv2.CV_8U,kernelx)
edges_y = cv2.filter2D(img,cv2.CV_8U,kernely)
cv2.imshow('Gradients_X',edges_x)
cv2.imshow('Gradients_Y',edges_y)
cv2.waitKey(0)
We can do it with scikit-image filters module functions too, as shown below:
import matplotlib.pylab as plt
from skimage.io import imread
from skimage.color import rgb2gray
from skimage import filters
im = rgb2gray(imread('../images/cameraman.jpg')) # RGB image to gray scale
plt.gray()
plt.figure(figsize=(20,20))
plt.subplot(221)
plt.imshow(im)
plt.title('original', size=20)
plt.subplot(222)
edges_y = filters.sobel_h(im)
plt.imshow(edges_y)
plt.title('sobel_x', size=20)
plt.subplot(223)
edges_x = filters.sobel_v(im)
plt.imshow(edges_x)
plt.title('sobel_y', size=20)
plt.subplot(224)
edges = filters.sobel(im)
plt.imshow(edges)
plt.title('sobel', size=20)
plt.show()

Rendering a CSV with pixel values to an image with PyPlot

I try to draw white black line picture as the raw data in CSV.
1 is black and 0 is white.
So I can modify value between 1~0 to draw gray level picture in the future.
I encounter a problem. The output picture resolution does not follow raw data.
Original raw data has 1080*2160 value between 1~0. 1*1 scale is a pixel.
import matplotlib.pyplot as plt
data = pd.read_csv('python line.csv', encoding='big5')
fig, ax = plt.subplots()
heatmap = ax.pcolor(data, cmap=plt.cm.Greys)
ax.invert_yaxis()
ax.xaxis.tick_top()
I think that everything is fine with your code, but the size of the pyplot figure forces the irregular "bands" you are seeing in your image.
If you increase the figure size, this might help your image quality. To do that, you do plt.figure(figsize=(10,20)) for a 10 inch x 20 inch image. The example I have below uses a smaller image, but you should be able to scale it by changing the values.
I suggest you use the Python Image Library (PIL). Here's how I would implement this with tools available in the Anaconda distribution:
from PIL import Image
import random
import pandas as pd
import matplotlib.pyplot as plt
import math
import random
%matplotlib inline
df = pd.DataFrame()
for i in range(200):
#df[i] = [1 if i % 2 == 0 else 0 for i in range(100)]
df[i] = [random.uniform(0, 1) for i in range(100)]
df.to_csv('data.csv', index=False)
df1 = pd.read_csv('data.csv')
im1 = Image.new('RGBA', (200,100),'white')
pixels = []
white = (255,255,255)
black = (0,0,0)
for i, row in df1.iterrows():
for j in range(df1.shape[1]):
c = math.floor(float(row[j])*255)
pixels.append((c,c,c))
im1.putdata(pixels)
plt.figure(figsize=(10,5))
imgplot = plt.imshow(im1)

Crop out partial image using NumPy (or SciPy)

Using numpy or scipy (I am not using OpenCV) I am trying to crop a region out of an image.
For instance, I have this:
and I want to get this:
Is there something like cropPolygon(image, vertices=[(1,2),(3,4)...]) with numpy or SciPy?
Are you using matplotlib?
One approach I've taken previously is to use the .contains_points() method of a matplotlib.path.Path to construct a boolean mask, which can then be used to index into the image array.
For example:
import numpy as np
from matplotlib.path import Path
from scipy.misc import lena
img = lena()
# vertices of the cropping polygon
xc = np.array([219.5, 284.8, 340.8, 363.5, 342.2, 308.8, 236.8, 214.2])
yc = np.array([284.8, 220.8, 203.5, 252.8, 328.8, 386.2, 382.2, 328.8])
xycrop = np.vstack((xc, yc)).T
# xy coordinates for each pixel in the image
nr, nc = img.shape
ygrid, xgrid = np.mgrid[:nr, :nc]
xypix = np.vstack((xgrid.ravel(), ygrid.ravel())).T
# construct a Path from the vertices
pth = Path(xycrop, closed=False)
# test which pixels fall within the path
mask = pth.contains_points(xypix)
# reshape to the same size as the image
mask = mask.reshape(img.shape)
# create a masked array
masked = np.ma.masked_array(img, ~mask)
# if you want to get rid of the blank space above and below the cropped
# region, use the min and max x, y values of the cropping polygon:
xmin, xmax = int(xc.min()), int(np.ceil(xc.max()))
ymin, ymax = int(yc.min()), int(np.ceil(yc.max()))
trimmed = masked[ymin:ymax, xmin:xmax]
Plotting:
from matplotlib import pyplot as plt
fig, ax = plt.subplots(2, 2)
ax[0,0].imshow(img, cmap=plt.cm.gray)
ax[0,0].set_title('original')
ax[0,1].imshow(mask, cmap=plt.cm.gray)
ax[0,1].set_title('mask')
ax[1,0].imshow(masked, cmap=plt.cm.gray)
ax[1,0].set_title('masked original')
ax[1,1].imshow(trimmed, cmap=plt.cm.gray)
ax[1,1].set_title('trimmed original')
plt.show()

Categories