Transform numpy array to RGB image array - python

Consider the following code:
import numpy as np
rand_matrix = np.random.rand(10,10)
which generates a 10x10 random matrix.
Following code to display as colour map:
import matplotlib.pyplot as plt
plt.imshow(rand_matrix)
plt.show()
I would like to get the RGB numpy array (no axis) from the object obtained from plt.imshow
In other words, if I save the image generated from plt.show, I would like to get the 3D RGB numpy array obtained from:
import matplotlib.image as mpimg
img=mpimg.imread('rand_matrix.png')
But without the need to save and load the image, which is computationally very expensive.
Thank you.

You can save time by saving to a io.BytesIO instead of to a file:
import io
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from PIL import Image
def ax_to_array(ax, **kwargs):
fig = ax.figure
frameon = ax.get_frame_on()
ax.set_frame_on(False)
with io.BytesIO() as memf:
extent = ax.get_window_extent()
extent = extent.transformed(fig.dpi_scale_trans.inverted())
plt.axis('off')
fig.savefig(memf, format='PNG', bbox_inches=extent, **kwargs)
memf.seek(0)
arr = mpimg.imread(memf)[::-1,...]
ax.set_frame_on(frameon)
return arr.copy()
rand_matrix = np.random.rand(10,10)
fig, ax = plt.subplots()
ax.imshow(rand_matrix)
result = ax_to_array(ax)
# view using matplotlib
plt.show()
# view using PIL
result = (result * 255).astype('uint8')
img = Image.fromarray(result)
img.show()

Related

How to retrieve the raw figure data from matplotlib?

I am using matplotlib to generate matrices I can train on. I need to get to the raw figure data.
Saving and reading the .png works fine, but my code runs 10x longer. Another stack overflow asked a similar question and the solution was to grab the canvas, but that related logic generated a numpy error. Here is my mwe.
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.transforms import IdentityTransform
px = 1/plt.rcParams['figure.dpi'] # pixel in inches
fig, ax = plt.subplots(figsize=(384*px, 128*px))
i = 756
plt.text(70, 95, "value {:04d}".format(i), color="black", fontsize=30, transform=IdentityTransform())
plt.axis('off')
plt.savefig("xrtv.png") # I dont want to do this ...
rtv = plt.imread("xrtv.png") # or this, but I want access to what imread returns.
gray = lambda rgb: np.dot(rgb[..., :3], [0.299, 0.587, 0.114])
gray = gray(rtv)
Disabling rendering was a good hint. Consider using a memory buffer to I/O rather than playing with strings. Here is a full example:
import numpy as np
import io
import matplotlib.pyplot as plt
from PIL import Image
# disable rendering and dump to buffer
plt.ioff()
fig,ax = plt.subplots()
ax.plot(np.sin(np.arange(100)))
buf = io.BytesIO()
fig.savefig(buf,format='RGBA')
# plot from buffer
shape = (int(fig.bbox.bounds[-1]),int(fig.bbox.bounds[-2]),-1)
img_array = np.frombuffer(buf.getvalue(),dtype=np.uint8).reshape(shape)
Image.fromarray(img_array)

How to fix BytesIO numpy image array returning blank?

I'm trying to save a Matplotlib plot to an array using BytesIO as suggested here: Matplotlib save plot to NumPy array. Here is my code
import lightkurve
import matplotlib.pyplot as plt
import numpy as np
import io
def download(search):
lc = search.download() # downloads lightcurve as lightcurve object
if lc is not None:
fig,ax = plt.subplots()
ax.scatter(lc.time.value.tolist(), lc.flux.value.tolist(), color='k')
ax.autoscale()
ax.set_xlabel('Time (BTJD)')
ax.set_ylabel('Flux')
fig.show()
io_buf = io.BytesIO()
fig.savefig(io_buf,format="raw")
io_buf.seek(0)
img_arr = np.frombuffer(io_buf.getvalue(),dtype=np.uint8)
io_buf.close()
return img_arr
For some reason, the returned image array only contains the repeated value 255 like so: [255 255 255 ... 255 255 255] suggesting a blank image. I've tried using plt instead of fig, autoscaling the axes in case they weren't showing, and plotting instead with the Lightkurve built-in plotting function lc.plot(ax=ax) but nothing has changed. Does anyone know how to fix this?
I couldn't reproduce your bug. In fact, I ran your code (with some modifications) and the resulting image was exactly like the original image. Did you thoroughly check if your img_arr had only 255s? (e.g., np.unique(img_arr), in my case, len(np.unique(imgarr)) == 231)
import lightkurve
import matplotlib.pyplot as plt
import numpy as np
import io
def download(search):
lc = search.download() # downloads lightcurve as lightcurve object
if lc is not None:
fig,ax = plt.subplots()
ax.scatter(lc.time.value.tolist(), lc.flux.value.tolist(), color='k')
ax.autoscale()
ax.set_xlabel('Time (BTJD)')
ax.set_ylabel('Flux')
fig.show()
io_buf = io.BytesIO()
fig.savefig(io_buf,format="raw")
fig.savefig('test.png') # So I could see the dimensions of the array
io_buf.seek(0)
img_arr = np.frombuffer(io_buf.getvalue(),dtype=np.uint8)
io_buf.close()
return img_arr
# I put something random -- Next time, provide this step so others can more easily debug your code. Never touched lightkurve before
search = lightkurve.search_lightcurve('KIC 757076', author="Kepler", quarter=3)
imgarr = download(search)
fig, ax = plt.subplots()
ax.imshow(imgarr.reshape(288, -1), aspect=4, cmap='gray') # Visualizing the image from the array. Got '288' from the dimensions of the png.
Original plot:
Reconstructed plot:

Why does plt.imshow(im, cmap='gray') not show a grayscale image?

I am attempting to run a DCT transform on an image. I have tried to make my image a grayscale image with the following code:
import numpy as np
import matplotlib.pyplot as plt
import scipy
from numpy import pi
from numpy import sin
from numpy import zeros
from numpy import r_
from scipy import signal
from scipy import misc
import matplotlib.pylab as pylab
#matplotlib inline
pylab.rcParams['figure.figsize'] = (20.0, 7.0)
im = misc.imread("indoorPictureResize.jpg")
#show the image
f = plt.figure()
plt.imshow(im,cmap='gray')
plt.show()
However I receive the image but it's color channel has not changed. Have I done something wrong or is it something I should change?
The array im is probably a 3-D array, with shape (m, n, 3) or (m, n, 4). Check im.shape.
From the imshow docstring: "cmap is ignored if X is 3-D".
To use a colormap, you'll have to pass a 2-D array to imshow. You could, for example, plot one of the color channels such as im[:,:,0], or plot the average over the three channels, im.mean(axis=2). (But if im has shape (m, n, 4), you probably don't want to include the alpha channel in the mean.)
Add the mode in scipy.misc.imread like this:
import numpy as np
import matplotlib.pyplot as plt
import scipy
from numpy import pi
from numpy import sin
from numpy import zeros
from numpy import r_
from scipy import signal
from scipy import misc
import matplotlib.pylab as pylab
#matplotlib inline
pylab.rcParams['figure.figsize'] = (20.0, 7.0)
im = misc.imread("indoorPictureResize.jpg", mode="L")
#show the image
f = plt.figure()
plt.imshow(im,cmap='gray')
plt.show()

Imaging simulation slice and adding a shape

I have been making slice images of a simulation, now I need to add a shape to the image, the slice has a colour map, I add a circle to the slice, I need help with making the circle colour be adjustable by values, and share the same colormap as the slice.The code I use is:
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import csv
def readslice(ndim):
shape = (ndim,ndim,ndim)
fd = open("delta_T_v3_no_halos_z013.40_nf0.898994_useTs0_zetaX-1.0e+00_alphaX-1.0_TvirminX-1.0e+00_aveTb027.03_Pop-1_300_200Mpc", 'rb')
data = np.fromfile(file=fd, dtype= np.dtype('f4')).reshape(shape)
fd.close()
print data
return data
ff = readslice(300)
circle1=plt.Circle((150.0,150.0),50.0)
fig = plt.gcf()
fig.gca().add_artist(circle1)
plt.imshow(ff[0,:,:],cmap = cm.jet)
plt.colorbar()
plt.savefig('picwithcircle.png')
plt.show()

Resizing image in Python

I'm using the code from this question to convert some raw images into png.
import matplotlib.pyplot as plt
import numpy as np
# Parameters.
input_filename = "JPCLN001.IMG"
shape = (2048, 2048) # matrix size
dtype = np.dtype('>u2') # big-endian unsigned integer (16bit)
output_filename = "JPCLN001.PNG"
# Reading.
fid = open(input_filename, 'rb')
data = np.fromfile(fid, dtype)
image = data.reshape(shape)
# Display.
plt.imshow(image, cmap = "gray")
plt.savefig(output_filename)
plt.show()
The thing is, I'm expecting a png size of 2048x2048 but all I'm getting is images under 500x500. Any advice on how to fix this?
If you just want to save the array as a .png without plotting it, you can use matplotlib.image.imsave:
import numpy as np
from matplotlib import pyplot as plt
# some random data
img = np.random.randint(256, size=(2048, 2048))
# creates a 2048 x 2048 .png image
plt.imsave('img.png', img, cmap='gray')

Categories