Calculating NDVI from a NIR Image of a Plant - python

I'm trying to get the NDVI of a plant image. This is my first time doing this so I'm not sure if the steps I've taken are correct. In the second block, I added a colormap to the NIR Image and then split the channels so I can use the NDVI formula for the NIR channel and the Red channel. In the last block I applied the formula and plotted the image. The resulting values in the second block kinda look off to me, but I'm not sure how check if this is correct.
import os
from PIL import Image
import matplotlib.pyplot as plt
from matplotlib import cm
import numpy as np
from IPython.display import display
import cv2
import cmapy
img_path = os.getcwd() + "/plant.png"
img = Image.open(img_path)
plt.figure(figsize = (10,10))
test_img = img
plt.imshow(test_img)
NIR Image:
test_img2 = cv2.imread(img_path)
img_c = cv2.applyColorMap(test_img2, cmapy.cmap('Reds')).astype(np.int)
_, R, NIR = cv2.split(img_c)
plt.figure(figsize = (10,10))
plt.imshow(img_c)
plt.show()
Adding colormap:
ndvi = (NIR-R)/(NIR+R+0.01)
print(ndvi.max()) 0.9999029220464032
print(ndvi.mean()) 0.10046810466714343
print(ndvi.min()) 0.01999960000799984
plt.figure(figsize = (10,10))
plt.imshow(ndvi)
plt.show()
After calculating NDVI:

Related

Convert imshow spectrogram to image

I would like to save just the wavelet image (no ticks nor labels) shown here to a png file.
I tried to follow the solution posted here for saving a spectrogram plot, but this approach is not working for me.
This is what I get:
This is the code that I have used:
import librosa
import librosa.display
import os
import pywt
import matplotlib.pyplot as plt
import soundfile as sf
import skimage.io
from tftb.generators import anasing
from mpl_toolkits.axes_grid1 import make_axes_locatable
import numpy as np
from ssqueezepy import cwt
from ssqueezepy.visuals import plot, imshow
# Set the path to the Binary Class dataset
fulldatasetpath = 'G:/AudioFile'
input_file = (r'G:/Audiofile.wav')
[data1, sample_rate1] = sf.read(input_file)
#[sample_rate1, data1] = wav.read(input_file);
duration = len(data1)/sample_rate1
time = np.arange(0, duration, 1/sample_rate1) #time vector
#%%############## Take CWT & plot ##################################################
Wx, scales = cwt(data1, 'morlet')
imshow(Wx, abs=1)
plt.show()
Wx = abs(Wx)
#%%############## SAVE TO IMAGE ###########################################
def scale_minmax(X, min=0.0, max=1.0):
X_std = (X - X.min()) / (X.max() - X.min())
X_scaled = X_std * (max - min) + min
return X_scaled
wave1 = np.log(Wx + 1e-9) # add small number to avoid log(0)
# min-max scale to fit inside 8-bit range
img = scale_minmax(Wx, 0, 255).astype(np.uint8)
img = np.flip(img, axis=0) # put low frequencies at the bottom in image
img = 255-img # invert. make black==more energy
out = 'out.png'
# save as PNG
skimage.io.imsave(out, img)
You can set the position of the axis to cover the entire figure, and you can also play with figsize. For example:
import matplotlib.pyplot as plt
import numpy as np
from ssqueezepy import imshow
# test image
img = np.zeros((500, 40000, 3), dtype=int)
for i in range(img.shape[1]):
img[:, i, 0] = int(abs(1 - 2 * i / img.shape[1]) * 255)
# create a figure and set the size
f = plt.figure(figsize=(8, 4))
# add a new axis into which ssqueezepy is going to plot
ax = f.add_subplot()
imshow(img)
# turn off tick labels
ax.axis(False)
# make the axis to cover the entire figure
ax.set_position([0, 0, 1, 1])
f.savefig("result.png")

Python: how could this image be properly segmented?

I would like to segment (isolate) the rod-like structures shown in this image:
The best I've managed to do is this
# Imports the libraries.
from skimage import io, filters
import matplotlib.pyplot as plt
import numpy as np
# Imports the image as a numpy array.
img = io.imread('C:/Users/lopez/Desktop/Test electron/test.tif')
# Thresholds the images using a local threshold.
thresh = filters.threshold_local(img,301,offset=0)
binary_local = img > thresh # Thresholds the image
binary_local = np.invert(binary_local) # inverts the thresholded image (True becomes False and vice versa).
# Shows the image.
plt.figure(figsize=(10,10))
plt.imshow(binary_local,cmap='Greys')
plt.axis('off')
plt.show()
Which produces this result
However, as you can see from the segmented image, I haven't managed to isolate the rods. What should be black background is filled with interconnected structures. Is there a way to neatly isolate the rod-like structures from all other elements in the image?
The original image can be downloaded from this website
https://dropoff.nbi.ac.uk/pickup.php
Claim ID: qMNrDHnfEn4nPwB8
Claim Passcode: UkwcYoYfXUfeDto8
Here is my attempt using a Meijering filter. The Meijering filter relies on symmetry when it looks for tubular structures and hence the regions where rods overlap (breaking the symmetry of the tubular shape) are not that well recovered, as can be seen in the overlay below.
Also, there is some random crap that I have trouble getting rid off digitally, but maybe you can clean your prep a bit more before imaging.
#!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
from skimage.io import imread
from skimage.transform import rescale
from skimage.restoration import denoise_nl_means
from skimage.filters import meijering
from skimage.measure import label
from skimage.color import label2rgb
def remove_small_objects(binary_mask, size_threshold):
label_image = label(binary_mask)
object_sizes = np.bincount(label_image.ravel())
labels2keep, = np.where(object_sizes > size_threshold)
labels2keep = labels2keep[1:] # remove the first label, which corresponds to the background
clean = np.in1d(label_image.ravel(), labels2keep).reshape(label_image.shape)
return clean
if __name__ == '__main__':
raw = imread('test.tif')
raw -= raw.min()
raw /= raw.max()
# running everything on the large image took too long for my patience;
raw = rescale(raw, 0.25, anti_aliasing=True)
# smooth image while preserving edges
smoothed = denoise_nl_means(raw, h=0.05, fast_mode=True)
# filter for tubular shapes
sigmas = range(1, 5)
filtered = meijering(smoothed, sigmas=sigmas, black_ridges=False)
# Meijering filter always evaluates to high values at the image frame;
# we hence set the filtered image to zero at those locations
frame = np.ones_like(filtered, dtype=np.bool)
d = 2 * np.max(sigmas) + 1 # this is the theoretical minimum ...
d += 2 # ... but doesn't seem to be enough so we increase d
frame[d:-d, d:-d] = False
filtered[frame] = np.min(filtered)
thresholded = filtered > np.percentile(filtered, 80)
cleaned = remove_small_objects(thresholded, 200)
overlay = raw.copy()
overlay[np.invert(cleaned)] = overlay[np.invert(cleaned)] * 2/3
fig, axes = plt.subplots(2, 3, sharex=True, sharey=True)
axes = axes.ravel()
axes[0].imshow(raw, cmap='gray')
axes[1].imshow(smoothed, cmap='gray')
axes[2].imshow(filtered, cmap='gray')
axes[3].imshow(thresholded, cmap='gray')
axes[4].imshow(cleaned, cmap='gray')
axes[5].imshow(overlay, cmap='gray')
for ax in axes:
ax.axis('off')
fig, ax = plt.subplots()
ax.imshow(overlay, cmap='gray')
ax.axis('off')
plt.show()
If this code makes it into a paper, I want an acknowledgement and a copy of the paper. ;-)

Matplotlib Colormap showing Incorrect Color

I need to make a colormap with 256 colors from red to white and display the red channel in Python but it looks like this thing it's done wrong and I don't understand why. This is my code:
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
# How to create an array filled with zeros
img = np.zeros([256,256])
colormap1 = np.zeros([256,1])
#image:
for i in range(256):
img[:,i] = i #on all columns I have the same value
#to go from red to white we'll have: [1,0,0]...,[1,0.5,0.5],..[1,1,1]
for i in range(128):
colormap1[i,1] = i/127
#display the thing:
colormap1 = mpl.colors.ListedColormap(colormap1)
plt.figure(), plt.imshow(img, cmap = colormap1)
You can answer it like that :
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
# How to create an array filled with zeros
img = np.zeros([256,256])
colormap = np.zeros([256,3])
#image:
for i in range(256):
img[:,i] = i #on all columns I have the same value
#color map:
for i in range(256):
colormap[i,0] = 1
colormap[i,1] = (i+1)/256
colormap[i,2] = (i+1)/256
#display the thing:
colormap = mpl.colors.ListedColormap(colormap)
plt.figure(), plt.imshow(img, cmap = colormap)
almost like you did in here Colormap it's not composed of correct color.
You just need to write the second part of your code (from red to white) and do it in 256 moves instead of 128.

How to compute the gradients of image using Python

I wonder how to use Python to compute the gradients of the image. The gradients include x and y direction. I want to get an x gradient map of the image and a y gradient map of the image. Can anyone tell me how to do this?
Thanks~
I think you mean this:
import numpy as np
from scipy import ndimage
import matplotlib.pyplot as plt
# Create a black image
img=np.zeros((640,480))
# ... and make a white rectangle in it
img[100:-100,80:-80]=1
# See how it looks
plt.imshow(img,cmap=plt.cm.gray)
plt.show()
# Rotate it for extra fun
img=ndimage.rotate(img,25,mode='constant')
# Have another look
plt.imshow(img,cmap=plt.cm.gray)
plt.show()
# Get x-gradient in "sx"
sx = ndimage.sobel(img,axis=0,mode='constant')
# Get y-gradient in "sy"
sy = ndimage.sobel(img,axis=1,mode='constant')
# Get square root of sum of squares
sobel=np.hypot(sx,sy)
# Hopefully see some edges
plt.imshow(sobel,cmap=plt.cm.gray)
plt.show()
Or you can define the x and y gradient convolution kernels yourself and call the convolve() function:
# Create a black image
img=np.zeros((640,480))
# ... and make a white rectangle in it
img[100:-100,80:-80]=1
# Define kernel for x differences
kx = np.array([[1,0,-1],[2,0,-2],[1,0,-1]])
# Define kernel for y differences
ky = np.array([[1,2,1] ,[0,0,0], [-1,-2,-1]])
# Perform x convolution
x=ndimage.convolve(img,kx)
# Perform y convolution
y=ndimage.convolve(img,ky)
sobel=np.hypot(x,y)
plt.imshow(sobel,cmap=plt.cm.gray)
plt.show()
you can use opencv to compute x and y gradients as below:
import numpy as np
import cv2
img = cv2.imread('Desert.jpg')
kernely = np.array([[1,1,1],[0,0,0],[-1,-1,-1]])
kernelx = np.array([[1,0,-1],[1,0,-1],[1,0,-1]])
edges_x = cv2.filter2D(img,cv2.CV_8U,kernelx)
edges_y = cv2.filter2D(img,cv2.CV_8U,kernely)
cv2.imshow('Gradients_X',edges_x)
cv2.imshow('Gradients_Y',edges_y)
cv2.waitKey(0)
We can do it with scikit-image filters module functions too, as shown below:
import matplotlib.pylab as plt
from skimage.io import imread
from skimage.color import rgb2gray
from skimage import filters
im = rgb2gray(imread('../images/cameraman.jpg')) # RGB image to gray scale
plt.gray()
plt.figure(figsize=(20,20))
plt.subplot(221)
plt.imshow(im)
plt.title('original', size=20)
plt.subplot(222)
edges_y = filters.sobel_h(im)
plt.imshow(edges_y)
plt.title('sobel_x', size=20)
plt.subplot(223)
edges_x = filters.sobel_v(im)
plt.imshow(edges_x)
plt.title('sobel_y', size=20)
plt.subplot(224)
edges = filters.sobel(im)
plt.imshow(edges)
plt.title('sobel', size=20)
plt.show()

Rendering a CSV with pixel values to an image with PyPlot

I try to draw white black line picture as the raw data in CSV.
1 is black and 0 is white.
So I can modify value between 1~0 to draw gray level picture in the future.
I encounter a problem. The output picture resolution does not follow raw data.
Original raw data has 1080*2160 value between 1~0. 1*1 scale is a pixel.
import matplotlib.pyplot as plt
data = pd.read_csv('python line.csv', encoding='big5')
fig, ax = plt.subplots()
heatmap = ax.pcolor(data, cmap=plt.cm.Greys)
ax.invert_yaxis()
ax.xaxis.tick_top()
I think that everything is fine with your code, but the size of the pyplot figure forces the irregular "bands" you are seeing in your image.
If you increase the figure size, this might help your image quality. To do that, you do plt.figure(figsize=(10,20)) for a 10 inch x 20 inch image. The example I have below uses a smaller image, but you should be able to scale it by changing the values.
I suggest you use the Python Image Library (PIL). Here's how I would implement this with tools available in the Anaconda distribution:
from PIL import Image
import random
import pandas as pd
import matplotlib.pyplot as plt
import math
import random
%matplotlib inline
df = pd.DataFrame()
for i in range(200):
#df[i] = [1 if i % 2 == 0 else 0 for i in range(100)]
df[i] = [random.uniform(0, 1) for i in range(100)]
df.to_csv('data.csv', index=False)
df1 = pd.read_csv('data.csv')
im1 = Image.new('RGBA', (200,100),'white')
pixels = []
white = (255,255,255)
black = (0,0,0)
for i, row in df1.iterrows():
for j in range(df1.shape[1]):
c = math.floor(float(row[j])*255)
pixels.append((c,c,c))
im1.putdata(pixels)
plt.figure(figsize=(10,5))
imgplot = plt.imshow(im1)

Categories