I am trying to find the phase spectrum of an image after applying DFT in python, here is the code i have used.
`
import numpy as np
import cv2
from matplotlib import pyplot as plt
img=cv2.imread('/content/drive/My Drive/IP assg2/im1.jpg')
img = cv2.cvtColor(sm1,cv2.COLOR_BGR2GRAY)
dft = cv2.dft(np.float32(img),flags = cv2.DFT_COMPLEX_OUTPUT)
`
I am not sure how to move on from here, as all the tutorials i have come across are related to MATLAB.
You can try this:
import numpy as np
import cv2
from matplotlib import pyplot as plt
img=cv2.imread('input.png')
img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
dft = np.fft.fft2(img)
dft_shift = np.fft.fftshift(dft)
phase_spectrum = np.angle(dft_shift)
ax1 = plt.subplot(1,2,1)
ax1.imshow(img, cmap='gray')
ax2 = plt.subplot(1,2,2)
ax2.imshow(phase_spectrum, cmap='gray')
plt.show()
Related
I want to use PIL.Image to save a figure and I want to use matplotlib cmaps to map the data to a color. I have tried the following:
import matplotlib
matplotlib.use('TkAgg')
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib import cm
import numpy as np
from PIL import Image
M, N = 255, 255
data = np.arange(M*N).reshape((M, N))
cmap_name = 'autumn_r'
cmap_name = cmap_name
cmap = plt.get_cmap(cmap_name)
norm = mpl.colors.Normalize()
scalarMap = cm.ScalarMappable(norm=norm, cmap=cmap)
plt.imshow(data, cmap=cmap)
plt.show()
colors = scalarMap.to_rgba(data)
image = Image.fromarray((colors[:, :, :3]*256).astype(np.uint8))
image.show()
Which plots this in matplotlib:
However, it plots this in the Image:
How can I get PIL.Image to show the same colors as matplotlib?
If its possible to also add the alpha channel, that will be useful
You need to give PIL the same normalisation and cmap you give matplotlib, so it can do the same mapping from 2D array -> normalised -> mapped to cmap.
I rewrote your sample code to be a bit simpler:
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import cm
from PIL import Image
M, N = 255, 255
data = np.arange(M*N).reshape((M, N))
cmap = cm.autumn_r
plt.imshow(data, cmap=cmap)
norm = mpl.colors.Normalize()
Then your answer is:
Image.fromarray(np.uint8(cmap(norm(data))*255)).show()
(Found the solution here, might be a dupe.)
I have been set this assignment:
I don't know what is wrong with my code below:
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from matplotlib import cm
from matplotlib.colors import ListedColormap, LinearSegmentedColormap
img = mpimg.imread('Ca.PNG')
imgplot = plt.imshow(img)
img = mpimg.imread('Ca.PNG')
print(img)
lum_img = img[:, :, 0]
plt.imshow(lum_img)
plt.show()
print(lum_img)
Your code seems fine, maybe just a bit disorganized. Maybe sprinkle in a few comments to help you keep track and think through what you're doing. For example...
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
# Load the image.
img = mpimg.imread('Ca.PNG')
# Take the red channel.
lum_img = img[:, :, 0]
# Plot the single-channel array.
plt.imshow(lum_img, cmap='gray')
plt.show()
This should produce a grayscale plot.
I am using scipy.spatial to calculate the voronoi cell in my image and to stack the result on my image by using:
from scipy.spatial import Voronoi, voronoi_plot_2d
import numpy as np
import cv2
import matplotlib.pyplot as plt
im = cv2.imread('img.tif', 0)
pks = np.loadtxt('pks.csv', delimiter=',', skiprows = 1)
vor = Voronoi(pks)
plt.figure()
plt.imshow(im, cmap='gray')
ax = plt.gca()
voronoi_plot_2d(vor, show_vertices=False, line_colors='y',
line_width=2, line_alpha=0.6, point_size=2, ax=ax)
plt.axis('off')
plt.show()
How to extract each cell into masks as shown below?
I have a figure flame of the form shown below:
I am trying to detect the outer edge of the camera's view and centre the figure so that circular view of the flame is exactly at the centre of the plot. As the position of the circle might change with the image capture date. Sometimes it might be at the upper half, sometimes lower half, etc.
Are there any modules in Python that can detect the view and centre it?
Reproducible code
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
img=mpimg.imread('flame.png')
lum_img = img[:,:,0]
img_plot = plt.imshow(lum_img)
img_plot.set_cmap('jet')
plt.axis('Off')
plt.show()
Adapted from this answer, do an edge detection and robustly fit a circle to the outline using RANSAC:
from __future__ import print_function
from skimage import io, feature, color, measure, draw, img_as_float
import numpy as np
image = img_as_float(color.rgb2gray(io.imread('flame.png')))
edges = feature.canny(image)
coords = np.column_stack(np.nonzero(edges))
model, inliers = measure.ransac(coords, measure.CircleModel,
min_samples=3, residual_threshold=1,
max_trials=1000)
print(model.params)
rr, cc = draw.circle_perimeter(int(model.params[0]),
int(model.params[1]),
int(model.params[2]),
shape=image.shape)
image[rr, cc] = 1
import matplotlib.pyplot as plt
plt.imshow(image, cmap='gray')
plt.scatter(model.params[1], model.params[0], s=50, c='red')
plt.axis('off')
plt.savefig('/tmp/flame_center.png', bbox_inches='tight')
plt.show()
This yields:
I think you have plenty of options. Two easy approaches that come to my mind would be to threshold your input image on a low intensity value which will give you a white circle. Then you could run the Hough transform for circles on it to find the center.
Or you can use the distance transform of the thresholded white pixels and take the maximum of this distance transform:
# code derived from watershed example of scikit-image
# http://scikit-image.org/docs/dev/auto_examples/plot_watershed.html
import numpy as np
import matplotlib.pyplot as plt
from scipy import ndimage as ndi
from skimage.morphology import watershed
from skimage.feature import peak_local_max
from skimage.color import rgb2gray
from skimage.io import imread
img = imread('flame.png')
image = rgb2gray(img) > 0.01
# Now we want to separate the two objects in image
# Generate the markers as local maxima of the distance to the background
distance = ndi.distance_transform_edt(image)
# get global maximum like described in
# http://stackoverflow.com/a/3584260/2156909
max_loc = unravel_index(distance.argmax(), distance.shape)
fig, axes = plt.subplots(ncols=4, figsize=(10, 2.7))
ax0, ax1, ax2, ax3 = axes
ax0.imshow(img,interpolation='nearest')
ax0.set_title('Image')
ax1.imshow(image, cmap=plt.cm.gray, interpolation='nearest')
ax1.set_title('Thresholded')
ax2.imshow(-distance, cmap=plt.cm.jet, interpolation='nearest')
ax2.set_title('Distances')
ax3.imshow(rgb2gray(img), cmap=plt.cm.gray, interpolation='nearest')
ax3.set_title('Detected centre')
ax3.scatter(max_loc[1], max_loc[0], color='red')
for ax in axes:
ax.axis('off')
fig.subplots_adjust(hspace=0.01, wspace=0.01, top=1, bottom=0, left=0,
right=1)
plt.show()
Just to give you an idea how robust this method is, if I pick a very bad threshold (image = rgb2gray(img) > 0.001 -- far too low to get a nice circle), the result is almost the same:
I want to plot a 3D histogram of my RGB image.
Below is my code:
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
from scipy.misc import imread
import pylab
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
img1 = imread('image.jpg')
img_reshaped = img1.reshape(img1.shape[0] * img1.shape[1], img1.shape[2])
hist, edges = np.histogramdd(img_reshaped, bins=(100, 100, 100))
Please tell me how to plot the hist histogram that I have obtained.
Have you taken a look at the 3d histogram example from the matplotlib gallery?
see: http://matplotlib.org/examples/mplot3d/hist3d_demo.html