Crop out partial image using NumPy (or SciPy) - python

Using numpy or scipy (I am not using OpenCV) I am trying to crop a region out of an image.
For instance, I have this:
and I want to get this:
Is there something like cropPolygon(image, vertices=[(1,2),(3,4)...]) with numpy or SciPy?

Are you using matplotlib?
One approach I've taken previously is to use the .contains_points() method of a matplotlib.path.Path to construct a boolean mask, which can then be used to index into the image array.
For example:
import numpy as np
from matplotlib.path import Path
from scipy.misc import lena
img = lena()
# vertices of the cropping polygon
xc = np.array([219.5, 284.8, 340.8, 363.5, 342.2, 308.8, 236.8, 214.2])
yc = np.array([284.8, 220.8, 203.5, 252.8, 328.8, 386.2, 382.2, 328.8])
xycrop = np.vstack((xc, yc)).T
# xy coordinates for each pixel in the image
nr, nc = img.shape
ygrid, xgrid = np.mgrid[:nr, :nc]
xypix = np.vstack((xgrid.ravel(), ygrid.ravel())).T
# construct a Path from the vertices
pth = Path(xycrop, closed=False)
# test which pixels fall within the path
mask = pth.contains_points(xypix)
# reshape to the same size as the image
mask = mask.reshape(img.shape)
# create a masked array
masked = np.ma.masked_array(img, ~mask)
# if you want to get rid of the blank space above and below the cropped
# region, use the min and max x, y values of the cropping polygon:
xmin, xmax = int(xc.min()), int(np.ceil(xc.max()))
ymin, ymax = int(yc.min()), int(np.ceil(yc.max()))
trimmed = masked[ymin:ymax, xmin:xmax]
Plotting:
from matplotlib import pyplot as plt
fig, ax = plt.subplots(2, 2)
ax[0,0].imshow(img, cmap=plt.cm.gray)
ax[0,0].set_title('original')
ax[0,1].imshow(mask, cmap=plt.cm.gray)
ax[0,1].set_title('mask')
ax[1,0].imshow(masked, cmap=plt.cm.gray)
ax[1,0].set_title('masked original')
ax[1,1].imshow(trimmed, cmap=plt.cm.gray)
ax[1,1].set_title('trimmed original')
plt.show()

Related

matplotlib.pyplot figure will not close and becomes unresponsive

I'm trying to draw an ROI to extract pixels of interest from a hyperspectral image. After I draw my ROI and extract the pixels of interest, the figure does not close and then becomes unresponsive. Can anyone help me fix this issue?
My code is below:
import matplotlib
# matplotlib.use('Qt5Agg')
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import numpy as np
import spectral.io.envi as envi
import matplotlib.path as mpath
import rasterio as rio
import rasterio.features
from shapely.geometry import Polygon
class get_roi_pixels():
# Load an image using matplotlib's imread function
# img = plt.imread('image.jpg')
def __init__(self, img_path):
self.img_path = img_path
self.hdr_path = '.'.join([self.img_path.split('.')[0],'HDR'])
self.out_file = '.'.join(['_'.join([self.img_path.split('.')[0],'CROP']),'HDR'])
self.img_obj = envi.open(self.hdr_path)
self.rio_transform = rio.open(self.img_path).transform
self.img = self.img_obj.load()
# clean the data
self.img = np.where(self.img>1, np.nan, self.img)
self.img = np.where(self.img<0, np.nan, self.img)
self.img, self.nanCs = self.remove_nan_columns(self.img)
self.img, self.nanRs = self.remove_nan_rows(self.img)
def select_roi(self):
# Display the image
fig1 = plt.figure()
ax = fig1.add_subplot(111)
ax.imshow(self.img[:,:,126])
# Define the ROI by selecting the vertices of the irregular polygon
self.roi = plt.ginput(-1, timeout=30, mouse_stop=3, mouse_pop=2)
# Create a binary mask to store the ROI
self.mask = np.zeros(self.img.shape[:2], dtype=bool)
# Draw the irregular polygon on the mask using the selected vertices
plt.fill(
[x for x, y in self.roi],
[y for x, y in self.roi],
color='white',
closed=True
)
plt.draw()
plt.pause(0.001)
plt.close('all')
self.roi_poly = Polygon(self.roi)
# Create a binary mask using the polygon ROI
self.mask = rio.features.geometry_mask(
[self.roi_poly],
out_shape=self.img.shape[:2],
transform=self.rio_transform,
all_touched=False,
invert=False)
# Extract the pixels in the ROI
self.roi_img = np.zeros_like(self.img)
self.roi_img[self.mask] = self.img[self.mask]
# Extract the pixel values from the ROI
self.roi_pixels = np.array(self.roi_img)
I don't get any errors. The function runs as expected, but the figure just remains open and when I try to close it manually there's a spinning rainbow wheel of death. I'm on a Mac working out of Spyder as an editor.

How to show the Gaussian mixture models for clustering an image?

I used the attached code to get the GMM for some images. I also want to show the GMM on the histogram of the image. I already did that. However, I also wanna show the GMM clusters distribution. I attached the output of the GMM on the histogram and another image of what I wanna get.
Thanksenter image description here
# Code for GMM
import os
import matplotlib.pyplot as plt
import numpy as np
import cv2
img = cv2.imread("test.jpg")
#Convert MxNx3 image into Kx3 where K=MxN
img2 = img.reshape((-1,3)) #-1 reshape means, in this case MxN
from sklearn.mixture import GaussianMixture as GMM
#covariance choices, full, tied, diag, spherical
gmm_model = GMM(n_components=6, covariance_type='full').fit(img2) #tied works better than full
gmm_labels = gmm_model.predict(img2)
#Put numbers back to original shape so we can reconstruct segmented image
original_shape = img.shape
segmented = gmm_labels.reshape(original_shape[0], original_shape[1])
cv2.imwrite("test_segmented.jpg", segmented)
gmm_model.means_
gmm_model.covariances_
gmm_model.weights_
print(gmm_model.means_, gmm_model.covariances_, gmm_model.weights_)
data = img2.ravel()
data = data[data != 0]
data = data[data != 1] #Removes background pixels (intensities 0 and 1)
gmm = GMM(n_components = 6)
gmm = gmm.fit(X=np.expand_dims(data,1))
gmm_x = np.linspace(0,255,256)
gmm_y = np.exp(gmm.score_samples(gmm_x.reshape(-1,1)))
#Plot histograms and gaussian curves
fig, ax = plt.subplots()
ax.hist(img.ravel(),255,[2,256], density=True, stacked=True)
ax.plot(gmm_x, gmm_y, color="crimson", lw=2, label="GMM")
ax.set_ylabel("Frequency")
ax.set_xlabel("Pixel Intensity")
plt.legend()
plt. grid(False)
plt.show()

How to extract rgb values of this colorbar image in python?

Image
I want to make a colormap used in the attached image colorbar. So far I tried the code given below but didn't get the result I was looking for.
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
import numpy as np
img = plt.imread('Climat.png')
colors_from_img = img[:, 0, :]
my_cmap = LinearSegmentedColormap.from_list('my_cmap', colors_from_img, N=651)
y = random_sample((100, 100))
imshow(y, cmap=my_cmap);plt.colorbar()
Looking for your suggestions. Thank you in advance.
bicarlsen has given you the correct direction. Restrict the points from which you extract the colors to the colored rectangles:
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
import numpy as np
img = plt.imread('Climat.png')
colors_from_img = img[80::88, 30, :]
my_cmap = LinearSegmentedColormap.from_list('my_cmap', colors_from_img[::-1], N=len(colors_from_img))
y = np.random.random_sample((100, 100))
plt.imshow(y, cmap=my_cmap)
plt.colorbar()
plt.show()
Sample output:
P.S.: Initially, I thought a more general approach with
colors_from_img = np.unique(img[:, 30, :], axis=0)
was possible but as the input image is rasterized, all kinds of mixed colors are present where the black lines separate colored rectangles.

Python: how could this image be properly segmented?

I would like to segment (isolate) the rod-like structures shown in this image:
The best I've managed to do is this
# Imports the libraries.
from skimage import io, filters
import matplotlib.pyplot as plt
import numpy as np
# Imports the image as a numpy array.
img = io.imread('C:/Users/lopez/Desktop/Test electron/test.tif')
# Thresholds the images using a local threshold.
thresh = filters.threshold_local(img,301,offset=0)
binary_local = img > thresh # Thresholds the image
binary_local = np.invert(binary_local) # inverts the thresholded image (True becomes False and vice versa).
# Shows the image.
plt.figure(figsize=(10,10))
plt.imshow(binary_local,cmap='Greys')
plt.axis('off')
plt.show()
Which produces this result
However, as you can see from the segmented image, I haven't managed to isolate the rods. What should be black background is filled with interconnected structures. Is there a way to neatly isolate the rod-like structures from all other elements in the image?
The original image can be downloaded from this website
https://dropoff.nbi.ac.uk/pickup.php
Claim ID: qMNrDHnfEn4nPwB8
Claim Passcode: UkwcYoYfXUfeDto8
Here is my attempt using a Meijering filter. The Meijering filter relies on symmetry when it looks for tubular structures and hence the regions where rods overlap (breaking the symmetry of the tubular shape) are not that well recovered, as can be seen in the overlay below.
Also, there is some random crap that I have trouble getting rid off digitally, but maybe you can clean your prep a bit more before imaging.
#!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
from skimage.io import imread
from skimage.transform import rescale
from skimage.restoration import denoise_nl_means
from skimage.filters import meijering
from skimage.measure import label
from skimage.color import label2rgb
def remove_small_objects(binary_mask, size_threshold):
label_image = label(binary_mask)
object_sizes = np.bincount(label_image.ravel())
labels2keep, = np.where(object_sizes > size_threshold)
labels2keep = labels2keep[1:] # remove the first label, which corresponds to the background
clean = np.in1d(label_image.ravel(), labels2keep).reshape(label_image.shape)
return clean
if __name__ == '__main__':
raw = imread('test.tif')
raw -= raw.min()
raw /= raw.max()
# running everything on the large image took too long for my patience;
raw = rescale(raw, 0.25, anti_aliasing=True)
# smooth image while preserving edges
smoothed = denoise_nl_means(raw, h=0.05, fast_mode=True)
# filter for tubular shapes
sigmas = range(1, 5)
filtered = meijering(smoothed, sigmas=sigmas, black_ridges=False)
# Meijering filter always evaluates to high values at the image frame;
# we hence set the filtered image to zero at those locations
frame = np.ones_like(filtered, dtype=np.bool)
d = 2 * np.max(sigmas) + 1 # this is the theoretical minimum ...
d += 2 # ... but doesn't seem to be enough so we increase d
frame[d:-d, d:-d] = False
filtered[frame] = np.min(filtered)
thresholded = filtered > np.percentile(filtered, 80)
cleaned = remove_small_objects(thresholded, 200)
overlay = raw.copy()
overlay[np.invert(cleaned)] = overlay[np.invert(cleaned)] * 2/3
fig, axes = plt.subplots(2, 3, sharex=True, sharey=True)
axes = axes.ravel()
axes[0].imshow(raw, cmap='gray')
axes[1].imshow(smoothed, cmap='gray')
axes[2].imshow(filtered, cmap='gray')
axes[3].imshow(thresholded, cmap='gray')
axes[4].imshow(cleaned, cmap='gray')
axes[5].imshow(overlay, cmap='gray')
for ax in axes:
ax.axis('off')
fig, ax = plt.subplots()
ax.imshow(overlay, cmap='gray')
ax.axis('off')
plt.show()
If this code makes it into a paper, I want an acknowledgement and a copy of the paper. ;-)

How to compute the gradients of image using Python

I wonder how to use Python to compute the gradients of the image. The gradients include x and y direction. I want to get an x gradient map of the image and a y gradient map of the image. Can anyone tell me how to do this?
Thanks~
I think you mean this:
import numpy as np
from scipy import ndimage
import matplotlib.pyplot as plt
# Create a black image
img=np.zeros((640,480))
# ... and make a white rectangle in it
img[100:-100,80:-80]=1
# See how it looks
plt.imshow(img,cmap=plt.cm.gray)
plt.show()
# Rotate it for extra fun
img=ndimage.rotate(img,25,mode='constant')
# Have another look
plt.imshow(img,cmap=plt.cm.gray)
plt.show()
# Get x-gradient in "sx"
sx = ndimage.sobel(img,axis=0,mode='constant')
# Get y-gradient in "sy"
sy = ndimage.sobel(img,axis=1,mode='constant')
# Get square root of sum of squares
sobel=np.hypot(sx,sy)
# Hopefully see some edges
plt.imshow(sobel,cmap=plt.cm.gray)
plt.show()
Or you can define the x and y gradient convolution kernels yourself and call the convolve() function:
# Create a black image
img=np.zeros((640,480))
# ... and make a white rectangle in it
img[100:-100,80:-80]=1
# Define kernel for x differences
kx = np.array([[1,0,-1],[2,0,-2],[1,0,-1]])
# Define kernel for y differences
ky = np.array([[1,2,1] ,[0,0,0], [-1,-2,-1]])
# Perform x convolution
x=ndimage.convolve(img,kx)
# Perform y convolution
y=ndimage.convolve(img,ky)
sobel=np.hypot(x,y)
plt.imshow(sobel,cmap=plt.cm.gray)
plt.show()
you can use opencv to compute x and y gradients as below:
import numpy as np
import cv2
img = cv2.imread('Desert.jpg')
kernely = np.array([[1,1,1],[0,0,0],[-1,-1,-1]])
kernelx = np.array([[1,0,-1],[1,0,-1],[1,0,-1]])
edges_x = cv2.filter2D(img,cv2.CV_8U,kernelx)
edges_y = cv2.filter2D(img,cv2.CV_8U,kernely)
cv2.imshow('Gradients_X',edges_x)
cv2.imshow('Gradients_Y',edges_y)
cv2.waitKey(0)
We can do it with scikit-image filters module functions too, as shown below:
import matplotlib.pylab as plt
from skimage.io import imread
from skimage.color import rgb2gray
from skimage import filters
im = rgb2gray(imread('../images/cameraman.jpg')) # RGB image to gray scale
plt.gray()
plt.figure(figsize=(20,20))
plt.subplot(221)
plt.imshow(im)
plt.title('original', size=20)
plt.subplot(222)
edges_y = filters.sobel_h(im)
plt.imshow(edges_y)
plt.title('sobel_x', size=20)
plt.subplot(223)
edges_x = filters.sobel_v(im)
plt.imshow(edges_x)
plt.title('sobel_y', size=20)
plt.subplot(224)
edges = filters.sobel(im)
plt.imshow(edges)
plt.title('sobel', size=20)
plt.show()

Categories