Numpy uint8_t arrays to vtkImageData - python

I am attempting to take 2D images of either one or three channels and display them in VTK using vtkImageActor. As I understand it, the current frame to be displayed can be updated by invoking SetImageData on vtkImageActor and providing an instance of vtkImageData.
I have set up my visualiser as shown below. However, I am unsure how to build the vtkImageData object from the numpy arrays(this would go in the updateFrames method). The type of my numpy arrays is np.uint8_t.
I am using VTK8.0, Python 3.6 and Numpy 1.13.1
class VTKStreamVisualiser:
def __init__(self, displayRGB):
self.__displayRGB = displayRGB
self.__started = False
#Setup window.
self.__renderWindow = vtk.vtkRenderWindow()
self.__renderWindowInteractor = vtk.vtkRenderWindowInteractor()
self.__renderWindowInteractor.SetRenderWindow(self.__renderWindow)
#To store renderers and actors.
self.__renderers = []
self.__actors = []
#Initialise to None to check if ready when invoking start()
self.__depthImageData = None
self.__rgbImageData = None
#Determine viewport ranges for depth and setup renderer.
xMinDepth = 0.0
xMaxDepth = 0.5 if displayRGB else 1.0
yMin = 0.0
yMax = 1.0
self.__setupRenderer(xMinDepth, yMin, xMaxDepth, yMax)
#Determine viewport ranges for rgb and setup renderer.
if self.__displayRGB:
xMinRGB = xMaxDepth
xMaxRGB = 2.0 * xMinRGB
self.__setupRenderer(xMinRGB, yMin, xMaxRGB, yMax)
def __setupRenderer(self, xMin, yMin, xMax, yMax):
#Setup renderer.
self.__renderers.append(vtk.vtkRenderer())
idx = len(self.__renderers) - 1
self.__renderWindow.AddRenderer(self.__renderers[idx])
self.__renderers[idx].SetViewport(xMin, yMin, xMax, yMax)
self.__actors.append(vtk.vtkImageActor())
self.__renderers[idx].AddActor(self.__actors[idx])
self.__renderers[idx].ResetCamera()
def start(self):
self.__depthImageData is None or (self.__rgbImageData is None and self.__displayRGB):
return None
if self.__started:
return
self.__renderWindowInteractor.Initialize()
self.__renderWindow.Render()
self.__renderWindowInteractor.Start()
self.__started = True
def stop(self):
if not self.__started:
return
self.__renderWindowInteractor.Stop()
self.__renderWindow.Finalize()
self.__renderWindowInteractor.TerminateApp()
self.__started = False
def updateFrames(self, depthFrame, rgbFrame=None):
#Build vtkImageData here from the given numpy uint8_t arrays.
pass
EDIT: I realise that I can manually copy the data over as demonstrated here, which wouldn't be too bad with Cython(assuming I am able to work with vtkImageData in Cython), however it would be preferable to use the numpy arrays directly.

A slightly more complete answer (generalizing to 1-3 channels, different datatypes).
import vtk
import numpy as np
from vtk.util import numpy_support
def numpy_array_as_vtk_image_data(source_numpy_array):
"""
:param source_numpy_array: source array with 2-3 dimensions. If used, the third dimension represents the channel count.
Note: Channels are flipped, i.e. source is assumed to be BGR instead of RGB (which works if you're using cv2.imread function to read three-channel images)
Note: Assumes array value at [0,0] represents the upper-left pixel.
:type source_numpy_array: np.ndarray
:return: vtk-compatible image, if conversion is successful. Raises exception otherwise
:rtype vtk.vtkImageData
"""
if len(source_numpy_array.shape) > 2:
channel_count = source_numpy_array.shape[2]
else:
channel_count = 1
output_vtk_image = vtk.vtkImageData()
output_vtk_image.SetDimensions(source_numpy_array.shape[1], source_numpy_array.shape[0], channel_count)
vtk_type_by_numpy_type = {
np.uint8: vtk.VTK_UNSIGNED_CHAR,
np.uint16: vtk.VTK_UNSIGNED_SHORT,
np.uint32: vtk.VTK_UNSIGNED_INT,
np.uint64: vtk.VTK_UNSIGNED_LONG if vtk.VTK_SIZEOF_LONG == 64 else vtk.VTK_UNSIGNED_LONG_LONG,
np.int8: vtk.VTK_CHAR,
np.int16: vtk.VTK_SHORT,
np.int32: vtk.VTK_INT,
np.int64: vtk.VTK_LONG if vtk.VTK_SIZEOF_LONG == 64 else vtk.VTK_LONG_LONG,
np.float32: vtk.VTK_FLOAT,
np.float64: vtk.VTK_DOUBLE
}
vtk_datatype = vtk_type_by_numpy_type[source_numpy_array.dtype.type]
source_numpy_array = np.flipud(source_numpy_array)
# Note: don't flip (take out next two lines) if input is RGB.
# Likewise, BGRA->RGBA would require a different reordering here.
if channel_count > 1:
source_numpy_array = np.flip(source_numpy_array, 2)
depth_array = numpy_support.numpy_to_vtk(source_numpy_array.ravel(), deep=True, array_type = vtk_datatype)
depth_array.SetNumberOfComponents(channel_count)
output_vtk_image.SetSpacing([1, 1, 1])
output_vtk_image.SetOrigin([-1, -1, -1])
output_vtk_image.GetPointData().SetScalars(depth_array)
output_vtk_image.Modified()
return output_vtk_image

Using the numpy_support library one can convert numpy arrays into a vtk data arrays
from vtk.util import numpy_support
def updateFrames(self, depthFrame, rgbFrame=None):
#Build vtkImageData here from the given numpy uint8_t arrays.
self.__depthImageData = vtk.vtkImageData()
depthArray = numpy_support.numpy_to_vtk(depthFrame.ravel(), deep=True, array_type=vtk.VTK_UNSIGNED_CHAR)
# .transpose(2, 0, 1) may be required depending on numpy array order see - https://github.com/quentan/Test_ImageData/blob/master/TestImageData.py
__depthImageData.SetDimensions(depthFrame.shape)
#assume 0,0 origin and 1,1 spacing.
__depthImageData.SetSpacing([1,1])
__depthImageData.SetOrigin([0,0])
__depthImageData.GetPointData().SetScalars(depthArray)
Should provide a working example of how to generate the depthFrame as a starting point

In case you came looking for code compatible with VTK version 9 and up. Here is this answer modified:
import vtk
import numpy as np
from vtkmodules.vtkCommonDataModel import vtkImageData
from vtkmodules.util import numpy_support
def numpy_array_as_vtk_image_data(source_numpy_array):
"""
:param source_numpy_array: source array with 2-3 dimensions. If used, the third dimension represents the channel count.
Note: Channels are flipped, i.e. source is assumed to be BGR instead of RGB (which works if you're using cv2.imread function to read three-channel images)
Note: Assumes array value at [0,0] represents the upper-left pixel.
:type source_numpy_array: np.ndarray
:return: vtk-compatible image, if conversion is successful. Raises exception otherwise
:rtype vtk.vtkImageData
"""
if len(source_numpy_array.shape) > 2:
channel_count = source_numpy_array.shape[2]
else:
channel_count = 1
output_vtk_image = vtkImageData()
output_vtk_image.SetDimensions(source_numpy_array.shape[1], source_numpy_array.shape[0], channel_count)
vtk_type_by_numpy_type = {
np.uint8: vtk.VTK_UNSIGNED_CHAR,
np.uint16: vtk.VTK_UNSIGNED_SHORT,
np.uint32: vtk.VTK_UNSIGNED_INT,
np.uint64: vtk.VTK_UNSIGNED_LONG if vtk.VTK_SIZEOF_LONG == 64 else vtk.VTK_UNSIGNED_LONG_LONG,
np.int8: vtk.VTK_CHAR,
np.int16: vtk.VTK_SHORT,
np.int32: vtk.VTK_INT,
np.int64: vtk.VTK_LONG if vtk.VTK_SIZEOF_LONG == 64 else vtk.VTK_LONG_LONG,
np.float32: vtk.VTK_FLOAT,
np.float64: vtk.VTK_DOUBLE
}
vtk_datatype = vtk_type_by_numpy_type[source_numpy_array.dtype.type]
source_numpy_array = np.flipud(source_numpy_array)
# Note: don't flip (take out next two lines) if input is RGB.
# Likewise, BGRA->RGBA would require a different reordering here.
if channel_count > 1:
source_numpy_array = np.flip(source_numpy_array, 2)
depth_array = numpy_support.numpy_to_vtk(source_numpy_array.ravel(), deep=True, array_type=vtk_datatype)
depth_array.SetNumberOfComponents(channel_count)
output_vtk_image.SetSpacing([1, 1, 1])
output_vtk_image.SetOrigin([-1, -1, -1])
output_vtk_image.GetPointData().SetScalars(depth_array)
output_vtk_image.Modified()
return output_vtk_image

Related

Overlapping chunks in Xarray dataset for Kernel operations

I try to run a 9x9 pixel kernel across a large satellite image with a custom filter. One satellite scene has ~ 40 GB and to fit it into my RAM, I'm using xarrays options to chunk my dataset with dask.
My filter includes a check if the kernel is complete (i.e. not missing data at the edge of the image). In that case a NaN is returned to prevent a potential bias (and I don't really care about the edges). I now realized, that this introduces not only NaNs at the edges of the image (expected behaviour), but also along the edges of each chunk, because the chunks don't overlap. dask provides options to create chunks with an overlap, but are there any comparable capabilities in xarray? I found this issue, but it doesn't seem like there has been any progress in this regard.
Some sample code (shortened version of my original code):
import numpy as np
import numba
import math
import xarray as xr
#numba.jit("f4[:,:](f4[:,:],i4)", nopython = True)
def water_anomaly_filter(input_arr, window_size = 9):
# check if window size is odd
if window_size%2 == 0:
raise ValueError("Window size must be odd!")
# prepare an output array with NaNs and the same dtype as the input
output_arr = np.zeros_like(input_arr)
output_arr[:] = np.nan
# calculate how many pixels in x and y direction around the center pixel
# are in the kernel
pix_dist = math.floor(window_size/2-0.5)
# create a dummy weight matrix
weights = np.ones((window_size, window_size))
# get the shape of the input array
xn,yn = input_arr.shape
# iterate over the x axis
for x in range(xn):
# determine limits of the kernel in x direction
xmin = max(0, x - pix_dist)
xmax = min(xn, x + pix_dist+1)
# iterate over the y axis
for y in range(yn):
# determine limits of the kernel in y direction
ymin = max(0, y - pix_dist)
ymax = min(yn, y + pix_dist+1)
# extract data values inside the kernel
kernel = input_arr[xmin:xmax, ymin:ymax]
# if the kernel is complete (i.e. not at image edge...) and it
# is not all NaN
if kernel.shape == weights.shape and not np.isnan(kernel).all():
# apply the filter. In this example simply keep the original
# value
output_arr[x,y] = input_arr[x,y]
return output_arr
def run_water_anomaly_filter_xr(xds, var_prefix = "band",
window_size = 9):
variables = [x for x in list(xds.variables) if x.startswith(var_prefix)]
for var in variables[:2]:
xds[var].values = water_anomaly_filter(xds[var].values,
window_size = window_size)
return xds
def create_test_nc():
data = np.random.randn(1000, 1000).astype(np.float32)
rows = np.arange(54, 55, 0.001)
cols = np.arange(10, 11, 0.001)
ds = xr.Dataset(
data_vars=dict(
band_1=(["x", "y"], data)
),
coords=dict(
lon=(["x"], rows),
lat=(["y"], cols),
),
attrs=dict(description="Testdata"),
)
ds.to_netcdf("test.nc")
if __name__ == "__main__":
# if required, create test data
create_test_nc()
# import data
with xr.open_dataset("test.nc",
chunks = {"x": 50,
"y": 50},
) as xds:
xds_2 = xr.map_blocks(run_water_anomaly_filter_xr,
xds,
template = xds).compute()
xds_2["band_1"][:200,:200].plot()
This yields:
enter image description here
You can clearly see the rows and columns of NaNs along the edges of each chunk.
I'm happy for any suggestions. I would love to get the overlapping chunks (or any other solution) within xarray, but I'm also open for other solutions.
You can use Dask's map_blocks as follows:
arr = dask.array.map_overlap(
water_anomaly_filter, xds.band_1.data, dtype='f4', depth=4, window_size=9
).compute()
da = xr.DataArray(arr, dims=xds.band_1.dims, coords=xds.band_1.coords)
Note that you will likely want to tune depth and window_size for your specific application.

What is the best script in python that replicates matlabs imresize3?

I am translating code from MATLAB to python but cannot perfectly replicate the results of MATLAB's imresize3. My input is a 101x101x101 array. First four inputs ([0,0:3,0] or (1,1:4,1)) are: 0.3819 0.4033 0.4336 0.2767. The data input for both languages is identical.
sampleQDNormSmall = imresize3(sampleQDNorm,0.5);
This results in a 51x51x51 array where the first four values (1,1:4,1) for example are: 0.3443 0.2646 0.2700 0.2835
Now I've tried two different pieces of code in python to replicate these results:
from skimage.transform import resize
from skimage.transform import rescale
sampleQDNormSmall = resize(sampleQDNorm,(0.5*sampleQDNorm.shape[0],0.5*sampleQDNorm.shape[1],0.5*sampleQDNorm.shape[2]),order=3,anti_aliasing=True);
sampleQDNormSmall1=rescale(sampleQDNorm,0.5,order=3,anti_aliasing=True)
The first one gives a 51x51x51 array that has the first four values [0,0:3,0] of: 0.3452 0.2669 0.2774 0.3099. Which is very close but not exactly the same numerical outputs. I don't know enough about the optional arguments to know might get me a better result.
The second one gives a 50x50x50 array that has the first four values [0,0:3,0] of: 0.3422 0.2623 0.2810 0.3006. This is a different output array size and also doesn't reproduce the same numerical outputs as the MATLAB code or the other python function
I don't know enough about the optional arguments to know might get me a better result. I know for this type of array, MATLAB's default is cubic interpolation which is why I am using order 3 in python. The default for anti-aliasing in both is true. I have a two bigger arrays that I am having the same issues with: a (873x873x873) array and a bool (873x873x873) array.
The MATLAB code I'm using is considered the "correct answer" for the work I am doing so I am trying to replicate the results as accurately as possible into python. Please let me know what I can try in python to reproduce the correct data.
sampleQDNorm is roughly random decimals between 0 and 1 for [0:100,0:100,0:100] and is padded with zeros on sides [:,:,101],[:,101,:],[101,:,:]
Getting the exact same result as MATLAB imresize3 is challenging.
One reason is that MATLAB enables Antialiasing filter by default, and I can't seem to find the equivalent Python implementation.
The closet existing Python alternatives are described in this post.
scipy.ndimage.zoom supports 3D resizing.
It could be that skimage.transform.resize gives closer result, but none are identical to MATLAB result.
Reimplementing imresize3:
Looking at the MATLAB implementation of imresize3 (MATLAB source code), it is apparent that MATLAB implementation "simply" uses resize along each axis:
Resize (by half) along the vertical axis.
Resize the above result (by half) along the horizontal axis.
Resize the above result (by half) along the depth axis.
Here is a MATLAB codes sample that demonstrates the implementation (using cubic interpolation):
I1 = imread('peppers.png');
I2 = imresize(imread('autumn.tif'), [size(I1, 1), size(I1, 2)]);
I3 = imresize(imread('football.jpg'), [size(I1, 1), size(I1, 2)]);
I4 = imresize(imread('cameraman.tif'), [size(I1, 1), size(I1, 2)]);
I = cat(3, I1, I2, I3, I4);
J = imresize3(I, 0.5, 'cubic', 'Antialiasing', false);
imwrite(I1, '/Tmp/I1.png');
imwrite(I2, '/Tmp/I2.png');
imwrite(I3, '/Tmp/I3.png');
imwrite(I4, '/Tmp/I4.png');
imwrite(J(:,:,1), '/Tmp/J1.png');
imwrite(J(:,:,2), '/Tmp/J2.png');
imwrite(J(:,:,3), '/Tmp/J3.png');
imwrite(J(:,:,4), '/Tmp/J4.png');
imwrite(J(:,:,5), '/Tmp/J5.png');
K = cubicResize3(I, 0.5);
max_abs_diff = max(abs(double(J(:)) - double(K(:))));
disp(['max_abs_diff = ', num2str(max_abs_diff)])
function B = cubicResize3(A, scale)
order = [1 2 3];
B = A;
for k = 1:numel(order)
dim = order(k);
B = cubicResizeAlongDim(B, dim, scale);
end
end
function out = cubicResizeAlongDim(in, dim, scale)
% If dim is 3, permute the input matrix so that the third dimension
% becomes the first dimension. This way, we can resize along the
% third dimensions as though we were resizing along the first dimension.
isThirdDimResize = (dim == 3);
if isThirdDimResize
in = permute(in, [3 2 1]);
dim = 1;
end
if dim == 1
out_rows = round(size(in, 1)*scale);
out_cols = size(in, 2);
else % dim == 2
out_rows = size(in, 1);
out_cols = round(size(in,2)*scale);
end
out = zeros(out_rows, out_cols, size(in, 3), class(in)); % Allocate array for storing the output.
for i = 1:size(in, 3)
% Resize each color plane separately
out(:, :, i) = imresize(in(:, :, i), [out_rows, out_cols], 'bicubic', 'Antialiasing', false);
end
% Permute back so that the original dimensions are restored if we were
% resizing along the third dimension.
if isThirdDimResize
out = permute(out, [3 2 1]);
end
end
The result is max_abs_diff = 0, meaning that cubicResize3 and imresize3 gave the same output.
Note:
The above implementation stores images in Tmp folder to be used a input for testing Python implementation.
Here is a Python implementation using OpenCV:
import numpy as np
import cv2
#from scipy.ndimage import zoom
def cubic_resize_along_dim(inp, dim, scale):
""" Implementation is based on MATLAB source code of resizeAlongDim function """
# If dim is 3, permute the input matrix so that the third dimension
# becomes the first dimension. This way, we can resize along the
# third dimensions as though we were resizing along the first dimension.
is_third_dim_resize = (dim == 2)
if is_third_dim_resize:
inp = np.swapaxes(inp, 2, 0).copy() # in = permute(in, [3 2 1])
dim = 0
if dim == 0:
out_rows = int(np.round(inp.shape[0]*scale)) # out_rows = round(size(in, 1)*scale);
out_cols = inp.shape[1] # out_cols = size(in, 2);
else: # dim == 1
out_rows = inp.shape[0] # out_rows = size(in, 1);
out_cols = int(np.round(inp.shape[1]*scale)) # out_cols = round(size(in,2)*scale);
out = np.zeros((out_rows, out_cols, inp.shape[2]), inp.dtype) # out = zeros(out_rows, out_cols, size(in, 3), class(in)); % Allocate array for storing the output.
for i in range(inp.shape[2]):
# Resize each color plane separately
out[:, :, i] = cv2.resize(inp[:, :, i], (out_cols, out_rows), interpolation=cv2.INTER_CUBIC) # out(:, :, i) = imresize(inp(:, :, i), [out_rows, out_cols], 'bicubic', 'Antialiasing', false);
# Permute back so that the original dimensions are restored if we were
# resizing along the third dimension.
if is_third_dim_resize:
out = np.swapaxes(out, 2, 0) # out = permute(out, [3 2 1]);
return out
def cubic_resize3(a, scale):
b = a.copy()
for k in range(3):
b = cubic_resize_along_dim(b, k, scale)
return b
# Build 3D input image (10 channels with resolution 512x384).
i1 = cv2.cvtColor(cv2.imread('/Tmp/I1.png', cv2.IMREAD_UNCHANGED), cv2.COLOR_BGR2RGB)
i2 = cv2.cvtColor(cv2.imread('/Tmp/I2.png', cv2.IMREAD_UNCHANGED), cv2.COLOR_BGR2RGB)
i3 = cv2.cvtColor(cv2.imread('/Tmp/I3.png', cv2.IMREAD_UNCHANGED), cv2.COLOR_BGR2RGB)
i4 = cv2.imread('/Tmp/I4.png', cv2.IMREAD_UNCHANGED)
im = np.dstack((i1, i2, i3, i4)) # Stack arrays along the third axis
# Read and adjust MATLAB output (out_mat is used as reference for testing).
# out_mat is the result of J = imresize3(I, 0.5, 'cubic', 'Antialiasing', false);
j1 = cv2.imread('/Tmp/J1.png', cv2.IMREAD_UNCHANGED)
j2 = cv2.imread('/Tmp/J2.png', cv2.IMREAD_UNCHANGED)
j3 = cv2.imread('/Tmp/J3.png', cv2.IMREAD_UNCHANGED)
j4 = cv2.imread('/Tmp/J4.png', cv2.IMREAD_UNCHANGED)
j5 = cv2.imread('/Tmp/J5.png', cv2.IMREAD_UNCHANGED)
out_mat = np.dstack((j1, j2, j3, j4, j5)) # Stack arrays along the third axis
#out_py = zoom(im, 0.5, order=3, mode='reflect')
# Execute 3D resize in Python
out_py = cubic_resize3(im, 0.5)
abs_diff = np.absolute(out_mat.astype(np.int16) - out_py.astype(np.int16))
print(f'max_abs_diff = {abs_diff.max()}')
The Python implementation reads the input files stored by MATLAB (and convert from BGR to RGB when required).
The implementation compares the result of cubic_resize3 with the MATLAB output of imresize3.
The maximum difference is 12 (not zero).
Apparently cv2.resize and MATLAB imresize gives slightly different results.
Update:
Replacing:
out[:, :, i] = cv2.resize(inp[:, :, i], (out_cols, out_rows), interpolation=cv2.INTER_CUBIC)
with:
out[:, :, i] = transform.resize(inp[:, :, i], (out_rows, out_cols), order=3, mode='edge', anti_aliasing=False, preserve_range=True)
Reduces the maximum difference to 4.

Sort image as NP array

I'm trying to sort an image by luminosity using NumPy, which I'm new to. I've managed to create a random image and sort it.
def create_image(output, width, height, arr):
array = np.zeros([height, width, 3], dtype=np.uint8)
numOfSwatches = len(arr)
swatchWidth = int(width/ numOfSwatches)
for i in range (0, numOfSwatches):
m = i * swatchWidth
r = (i+1) * swatchWidth
array[:, m:r] = arr[i]
img = Image.fromarray(array)
img.save(output)
Which creates this image:
So far so good. Only now I want to switch from creating random images to loading them and then sorting them.
#!/usr/bin/python3
import numpy as np
from PIL import Image
# --------------------------------------------------------------
def load_image( infilename ) :
img = Image.open( infilename )
img.load()
data = np.asarray( img, dtype = "int32" )
return data
# --------------------------------------------------------------
def lum (r,g,b):
return math.sqrt( .241 * r + .691 * g + .068 * b )
myImageFile = "random_colours.png"
imageNP = load_image(myImageFile)
imageNP.sort(key=lambda rgb: lum(*rgb) )
The image should look like this:
The error I get is TypeError: 'key' is an invalid keyword argument for this function I may have created the NP array incorrectly as it worked when it was a random NP array.
Have not ever used PIL, but the following approach hopefully works (I'm not sure as I can't reproduce your exact examples), and of course there might be more efficient ways to do so.
I'm using your functions, having changed the math.sqrt function to np.sqrt in the lum function - as it is better for vector calculations. By the way, I believe this won't work with an int32 type array (as in your load_image function).
The key part is Numpy's argsort function (last line), which gives the indices that would sort the given array; this is applied to a row of the luminosity array (exploiting simmetry) and later used as indexer of img_array.
# Create random image
np.random.seed(4)
img = create_image('test.png', 75, 75, np.random.random((25,3))*255)
# Convert to Numpy array and calculate luminosity
img_array = np.array(img, dtype = np.uint8)
luminosity = lum(img_array[...,0], img_array[...,1], img_array[...,2])
# Sort by luminosity and convert to image again
img_sorted = Image.fromarray(img_array[:,luminosity[0].argsort()])
The original picture:
And the luminosity-sorted one:

gee 'sampleRectangle()' returning 1x1 array

I'm facing an issue when trying to use 'sampleRectangle()' function in GEE, it is returning 1x1 arrays and I can't seem to find a workaround. Please, see below a python code in which I'm using an approach posted by Justin Braaten. I suspect there's something wrong with the geometry object I'm passing to the function, but at the same time I've tried several ways to check how this argument is behaving and couldn't no spot any major issue.
Can anyone give me a hand trying to understand what is happening?
Thanks!
import json
import ee
import numpy as np
import matplotlib.pyplot as plt
ee.Initialize()
point = ee.Geometry.Point([-55.8571, -9.7864])
box_l8sr = ee.Geometry(point.buffer(50).bounds())
box_l8sr2 = ee.Geometry.Polygon(box_l8sr.coordinates())
# print(box_l8sr2)
# Define an image.
# l8sr_y = ee.Image('LANDSAT/LC08/C01/T1_SR/LC08_038029_20180810')
oli_sr_coll = ee.ImageCollection('LANDSAT/LC08/C01/T1_SR')
## Function to mask out clouds and cloud-shadows present in Landsat images
def maskL8sr(image):
## Bits 3 and 5 are cloud shadow and cloud, respectively.
cloudShadowBitMask = (1 << 3)
cloudsBitMask = (1 << 5)
## Get the pixel QA band.
qa = image.select('pixel_qa')
## Both flags should be set to zero, indicating clear conditions.
mask = qa.bitwiseAnd(cloudShadowBitMask).eq(0)
mask = qa.bitwiseAnd(cloudsBitMask).eq(0)
return image.updateMask(mask)
l8sr_y = oli_sr_coll.filterDate('2019-01-01', '2019-12-31').map(maskL8sr).mean()
l8sr_bands = l8sr_y.select(['B2', 'B3', 'B4']).sampleRectangle(box_l8sr2)
print(type(l8sr_bands))
# Get individual band arrays.
band_arr_b4 = l8sr_bands.get('B4')
band_arr_b3 = l8sr_bands.get('B3')
band_arr_b2 = l8sr_bands.get('B2')
# Transfer the arrays from server to client and cast as np array.
np_arr_b4 = np.array(band_arr_b4.getInfo())
np_arr_b3 = np.array(band_arr_b3.getInfo())
np_arr_b2 = np.array(band_arr_b2.getInfo())
print(np_arr_b4.shape)
print(np_arr_b3.shape)
print(np_arr_b2.shape)
# Expand the dimensions of the images so they can be concatenated into 3-D.
np_arr_b4 = np.expand_dims(np_arr_b4, 2)
np_arr_b3 = np.expand_dims(np_arr_b3, 2)
np_arr_b2 = np.expand_dims(np_arr_b2, 2)
# # print(np_arr_b4.shape)
# # print(np_arr_b5.shape)
# # print(np_arr_b6.shape)
# # Stack the individual bands to make a 3-D array.
rgb_img = np.concatenate((np_arr_b2, np_arr_b3, np_arr_b4), 2)
# print(rgb_img.shape)
# # Scale the data to [0, 255] to show as an RGB image.
rgb_img_test = (255*((rgb_img - 100)/3500)).astype('uint8')
# plt.imshow(rgb_img)
plt.show()
# # # create L8OLI plot
# fig, ax = plt.subplots()
# ax.set(title = "Satellite Image")
# ax.set_axis_off()
# plt.plot(42, 42, 'ko')
# img = ax.imshow(rgb_img_test, interpolation='nearest')
I have the same issue. It seems to have something to do with .mean(), or any reduction of image collections for that matter.
One solution is to reproject after the reduction. For example, you could try adding "reproject" at the end:
l8sr_y = oli_sr_coll.filterDate('2019-01-01', '2019-12-31').map(maskL8sr).mean().reproject(crs = ee.Projection('EPSG:4326'), scale=30)
It should work.

Plot a gamut in cie1931 colour space Python 2.7

Gamut I want to plot in CIE1931 space: https://www.google.co.uk/search?biw=1337&bih=1257&tbm=isch&sa=1&ei=9x3kW7rqBo3ygQb-8aWYBw&q=viewpixx+gamut&oq=viewpixx+gamut&gs_l=img.3...2319.2828.0.3036.5.5.0.0.0.0.76.270.5.5.0....0...1c.1.64.img..0.0.0....0.KT8w80tcZik#imgrc=77Ufw31S6UVlYM
I want to create a triangle plot of the ciexyY colours within the these coordinates: (.119,.113),(.162,.723),(.695,.304) as in the image - with a set luminance Y at 30.0.
I have created a 3d array of xy values between 0-1.
I then created a matrix with 1s inside the triangle and 0s outside the triangle.
I multiplied the triangle matrix by the xyY ndarray.
Then I looped through the xyY ndarray and converted xyY values to rgb, and displayed them.
The result is somewhat close but not correct. I think the error is in the last section when I convert to rgb, but I'm not sure why. This is the current image: https://imgur.com/a/7cWY0FI. Any recommendations would be really appreciated.
from __future__ import division
import numpy as np
from colormath.color_objects import sRGBColor, xyYColor
from colormath.color_conversions import convert_color
import matplotlib.pyplot as plt
def frange(x,y,jump):
while x < y:
yield x
x += jump
def onSameSide(p1,p2, A,B):
cp1 = np.cross(B-A, p1-A)
cp2 = np.cross(B-A, p2-A)
if(np.dot(cp1, cp2) >= 0):
return True
else:
return False
def isPointInTriangle(p,A,B,C):
if(onSameSide(p,A,B,C) and onSameSide(p,B,A,C) and onSameSide(p,C,A,B)):
return True
else:
return False
xlen = 400
ylen = 400
#CIExyY colour space
#Make an array (1,1,3) with each plane representing how x,y,Y vary in the coordinate space
ciexyY = np.zeros((3,xlen,ylen))
ciexyY[2,:,:]=30.0
for x in frange(0,1,1/xlen):
ciexyY[0,:,int(xlen*x)]=x
for y in frange(0,1,1/xlen):
ciexyY[1,int(ylen*y),:]=y
#coordinates from Viewpixx gamut, scaled up to 100
blue=np.array((.119,.113,30.0))
green=np.array((.162,.723,30.0))
red=np.array((.695,.304,30.0))
#scale up to size of image
blue = np.multiply(blue,xlen)
green = np.multiply(green,xlen)
red = np.multiply(red,xlen)
#make an array of zeros and ones to plot the shape of Viewpixx triangle
triangleZeros = np.zeros((xlen,ylen))
for x in frange(0,xlen,1):
for y in frange(0,ylen,1):
if(isPointInTriangle((x,y,0),blue,green,red)):
triangleZeros[x,y]=1
else:
triangleZeros[x,y]=0
#cieTriangle
cieTriangle = np.multiply(ciexyY,triangleZeros)
#convert cieTriangle xyY to rgb
rgbTriangle = np.zeros((3,xlen,ylen))
for x in frange(0,xlen,1):
for y in range(0,ylen,1):
xyYcolour = xyYColor(cieTriangle[0,x,y],cieTriangle[1,x,y],cieTriangle[2,x,y])
rgbColour = convert_color(xyYcolour,sRGBColor)
rgbTriangle[0,x,y] = rgbColour.rgb_r
rgbTriangle[1,x,y] = rgbColour.rgb_g
rgbTriangle[2,x,y] = rgbColour.rgb_b
rgbTriangle = np.transpose(rgbTriangle)
plt.imshow(rgbTriangle)
plt.show()
We have all the common Chromaticity Diagrams in Colour, I would recommend it over python-colormath because Colour is vectorised and thus much faster.
Do you have a render of your current image to share though?
from colour.plotting import plot_chromaticity_diagram_CIE1931
plot_chromaticity_diagram_CIE1931()

Categories