NameError: name 'IMG_H' is not defined - python

I am a new programming Interface. I am using the PIL and Matplotlib libraries for the contract streaching.When I am using the Histogram Equalizer I am getting the error as name 'IMG_H' is not defined.I am also Converting my image to numpy array, calculate the histogram, cumulative sum, mapping and then apply the mapping to create a new image.
You can see my code below -
# HISTOGRAM EQUALIZATION
from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
def make_histogram(img):
""" Take an image and create a historgram from it's luma values """
y_vals = img[:,:,0].flatten()
histogram = np.zeros(256, dtype=int)
for y_index in range(y_vals.size):
histogram[y_vals[y_index]] += 1
return histogram
def make_cumsum(histogram):
""" Create an array that represents the cumulative sum of the histogram """
cumsum = np.zeros(256, dtype=int)
cumsum[0] = histogram[0]
for i in range(1, histogram.size):
cumsum[i] = cumsum[i-1] + histogram[i]
return cumsum
def make_mapping(histogram, cumsum):
mapping = np.zeros(256, dtype=int)
luma_levels = 256
for i in range(histogram.size):
mapping[i] = max(0, round((luma_levels*cumsum[i])/(IMG_H*IMG_W))-1)
return mapping
def apply_mapping(img, mapping):
""" Apply the mapping to our image """
new_image = img.copy()
new_image[:,:,0] = list(map(lambda a : mapping[a], img[:,:,0]))
return new_image
# Load image
pillow_img = Image.open('pout.jpg')
# Convert our image to numpy array, calculate the histogram, cumulative sum,
# mapping and then apply the mapping to create a new image
img = np.array(pillow_img)
histogram = make_histogram(img)
cumsum = make_cumsum(histogram)
mapping = make_mapping(histogram, cumsum)
new_image = apply_mapping(img, mapping)
output_image = Image.fromarray(np.uint8(new_image))
imshow(output_image, cmap='gray')
# Display the old (black) and new (red) histograms next to eachother
x_axis = np.arange(256)
fig = plt.figure()
fig.add_subplot(1,2,1)
plt.bar(x_axis , histogram, color = "black")
fig.add_subplot(1,2,2)
plt.bar(x_axis , make_histogram(new_image), color = "red")
plt.show()

You have this variable here:
mapping[i] = max(0, round((luma_levels*cumsum[i])/(IMG_H*IMG_W))-1)
But you didn't define it (or import) before, therefore you get this error.

for i in range(histogram.size):
mapping[i] = max(0, round((luma_levels*cumsum[i])/(IMG_H*IMG_W))-1)
In above stated line. you are using 2 Variables, IMG_H and IMG_W.
where you defined these variables?
EDITED PART
for i in range(histogram.size):
mapping[i] = max(0, round((luma_levels*cumsum[i])/(IMG_H*IMG_W))-1)
in the above stated line you are using 2 variables try to do multiplication (IMG_H*IMG_W) but you did not define and import these variables in the whole code.
You can do like this.
you can define these variables on the top of the code.
your code shows that these variables are defined for Image width and height
IMG_W = 120 #Any value in integer for Image Width
IMG_H = 124 #Any value in integer for Image Height

Related

Extract N number of patches from an image

I have an image of dimension 155 x 240. Like the following:
I want to extract certain shape of patchs (25 x 25).
I don't want to patch from the whole image.
I want to extract N number of patch from non-zero (not background) area of the image. How can I do that? Any idea or suggestion or implementation will be appreciated. You can try with either Matlab or Python.
Note:
I have generated a random image so that you can process it for patching. image_process variable is that image in this code.
import numpy as np
from scipy.ndimage.filters import convolve
import matplotlib.pyplot as plt
background = np.ones((155,240))
background[78,120] = 2
n_d = 50
y,x = np.ogrid[-n_d: n_d+1, -n_d: n_d+1]
mask = x**2+y**2 <= n_d**2
mask = 254*mask.astype(float)
image_process = convolve(background, mask)-sum(sum(mask))+1
image_process[image_process==1] = 0
image_process[image_process==255] = 1
plt.imshow(image_process)
Lets assume that the pixels values you want to omit is 0.
In this case what you could do, is first find the indices of the non-zero values, then slice the image in the min/max position to get only the desired area, and then simply apply extract_patches_2d with the desired window size and number of patches.
For example, given the dummy image you supplied:
import numpy as np
from scipy.ndimage.filters import convolve
import matplotlib.pyplot as plt
background = np.ones((155,240))
background[78,120] = 2
n_d = 50
y,x = np.ogrid[-n_d: n_d+1, -n_d: n_d+1]
mask = x**2+y**2 <= n_d**2
mask = 254*mask.astype(float)
image_process = convolve(background, mask)-sum(sum(mask))+1
image_process[image_process==1] = 0
image_process[image_process==255] = 1
plt.figure()
plt.imshow(image_process)
plt.show()
from sklearn.feature_extraction.image import extract_patches_2d
x, y = np.nonzero(image_process)
xl,xr = x.min(),x.max()
yl,yr = y.min(),y.max()
only_desired_area = image_process[xl:xr+1, yl:yr+1]
window_shape = (25, 25)
B = extract_patches_2d(only_desired_area, window_shape, max_patches=100) # B shape will be (100, 25, 25)
If you plot the only_desired_area you will get the following image:
This is the main logic if you wish an even tighter bound you should adjust the slicing properly.

gee 'sampleRectangle()' returning 1x1 array

I'm facing an issue when trying to use 'sampleRectangle()' function in GEE, it is returning 1x1 arrays and I can't seem to find a workaround. Please, see below a python code in which I'm using an approach posted by Justin Braaten. I suspect there's something wrong with the geometry object I'm passing to the function, but at the same time I've tried several ways to check how this argument is behaving and couldn't no spot any major issue.
Can anyone give me a hand trying to understand what is happening?
Thanks!
import json
import ee
import numpy as np
import matplotlib.pyplot as plt
ee.Initialize()
point = ee.Geometry.Point([-55.8571, -9.7864])
box_l8sr = ee.Geometry(point.buffer(50).bounds())
box_l8sr2 = ee.Geometry.Polygon(box_l8sr.coordinates())
# print(box_l8sr2)
# Define an image.
# l8sr_y = ee.Image('LANDSAT/LC08/C01/T1_SR/LC08_038029_20180810')
oli_sr_coll = ee.ImageCollection('LANDSAT/LC08/C01/T1_SR')
## Function to mask out clouds and cloud-shadows present in Landsat images
def maskL8sr(image):
## Bits 3 and 5 are cloud shadow and cloud, respectively.
cloudShadowBitMask = (1 << 3)
cloudsBitMask = (1 << 5)
## Get the pixel QA band.
qa = image.select('pixel_qa')
## Both flags should be set to zero, indicating clear conditions.
mask = qa.bitwiseAnd(cloudShadowBitMask).eq(0)
mask = qa.bitwiseAnd(cloudsBitMask).eq(0)
return image.updateMask(mask)
l8sr_y = oli_sr_coll.filterDate('2019-01-01', '2019-12-31').map(maskL8sr).mean()
l8sr_bands = l8sr_y.select(['B2', 'B3', 'B4']).sampleRectangle(box_l8sr2)
print(type(l8sr_bands))
# Get individual band arrays.
band_arr_b4 = l8sr_bands.get('B4')
band_arr_b3 = l8sr_bands.get('B3')
band_arr_b2 = l8sr_bands.get('B2')
# Transfer the arrays from server to client and cast as np array.
np_arr_b4 = np.array(band_arr_b4.getInfo())
np_arr_b3 = np.array(band_arr_b3.getInfo())
np_arr_b2 = np.array(band_arr_b2.getInfo())
print(np_arr_b4.shape)
print(np_arr_b3.shape)
print(np_arr_b2.shape)
# Expand the dimensions of the images so they can be concatenated into 3-D.
np_arr_b4 = np.expand_dims(np_arr_b4, 2)
np_arr_b3 = np.expand_dims(np_arr_b3, 2)
np_arr_b2 = np.expand_dims(np_arr_b2, 2)
# # print(np_arr_b4.shape)
# # print(np_arr_b5.shape)
# # print(np_arr_b6.shape)
# # Stack the individual bands to make a 3-D array.
rgb_img = np.concatenate((np_arr_b2, np_arr_b3, np_arr_b4), 2)
# print(rgb_img.shape)
# # Scale the data to [0, 255] to show as an RGB image.
rgb_img_test = (255*((rgb_img - 100)/3500)).astype('uint8')
# plt.imshow(rgb_img)
plt.show()
# # # create L8OLI plot
# fig, ax = plt.subplots()
# ax.set(title = "Satellite Image")
# ax.set_axis_off()
# plt.plot(42, 42, 'ko')
# img = ax.imshow(rgb_img_test, interpolation='nearest')
I have the same issue. It seems to have something to do with .mean(), or any reduction of image collections for that matter.
One solution is to reproject after the reduction. For example, you could try adding "reproject" at the end:
l8sr_y = oli_sr_coll.filterDate('2019-01-01', '2019-12-31').map(maskL8sr).mean().reproject(crs = ee.Projection('EPSG:4326'), scale=30)
It should work.

Plot a gamut in cie1931 colour space Python 2.7

Gamut I want to plot in CIE1931 space: https://www.google.co.uk/search?biw=1337&bih=1257&tbm=isch&sa=1&ei=9x3kW7rqBo3ygQb-8aWYBw&q=viewpixx+gamut&oq=viewpixx+gamut&gs_l=img.3...2319.2828.0.3036.5.5.0.0.0.0.76.270.5.5.0....0...1c.1.64.img..0.0.0....0.KT8w80tcZik#imgrc=77Ufw31S6UVlYM
I want to create a triangle plot of the ciexyY colours within the these coordinates: (.119,.113),(.162,.723),(.695,.304) as in the image - with a set luminance Y at 30.0.
I have created a 3d array of xy values between 0-1.
I then created a matrix with 1s inside the triangle and 0s outside the triangle.
I multiplied the triangle matrix by the xyY ndarray.
Then I looped through the xyY ndarray and converted xyY values to rgb, and displayed them.
The result is somewhat close but not correct. I think the error is in the last section when I convert to rgb, but I'm not sure why. This is the current image: https://imgur.com/a/7cWY0FI. Any recommendations would be really appreciated.
from __future__ import division
import numpy as np
from colormath.color_objects import sRGBColor, xyYColor
from colormath.color_conversions import convert_color
import matplotlib.pyplot as plt
def frange(x,y,jump):
while x < y:
yield x
x += jump
def onSameSide(p1,p2, A,B):
cp1 = np.cross(B-A, p1-A)
cp2 = np.cross(B-A, p2-A)
if(np.dot(cp1, cp2) >= 0):
return True
else:
return False
def isPointInTriangle(p,A,B,C):
if(onSameSide(p,A,B,C) and onSameSide(p,B,A,C) and onSameSide(p,C,A,B)):
return True
else:
return False
xlen = 400
ylen = 400
#CIExyY colour space
#Make an array (1,1,3) with each plane representing how x,y,Y vary in the coordinate space
ciexyY = np.zeros((3,xlen,ylen))
ciexyY[2,:,:]=30.0
for x in frange(0,1,1/xlen):
ciexyY[0,:,int(xlen*x)]=x
for y in frange(0,1,1/xlen):
ciexyY[1,int(ylen*y),:]=y
#coordinates from Viewpixx gamut, scaled up to 100
blue=np.array((.119,.113,30.0))
green=np.array((.162,.723,30.0))
red=np.array((.695,.304,30.0))
#scale up to size of image
blue = np.multiply(blue,xlen)
green = np.multiply(green,xlen)
red = np.multiply(red,xlen)
#make an array of zeros and ones to plot the shape of Viewpixx triangle
triangleZeros = np.zeros((xlen,ylen))
for x in frange(0,xlen,1):
for y in frange(0,ylen,1):
if(isPointInTriangle((x,y,0),blue,green,red)):
triangleZeros[x,y]=1
else:
triangleZeros[x,y]=0
#cieTriangle
cieTriangle = np.multiply(ciexyY,triangleZeros)
#convert cieTriangle xyY to rgb
rgbTriangle = np.zeros((3,xlen,ylen))
for x in frange(0,xlen,1):
for y in range(0,ylen,1):
xyYcolour = xyYColor(cieTriangle[0,x,y],cieTriangle[1,x,y],cieTriangle[2,x,y])
rgbColour = convert_color(xyYcolour,sRGBColor)
rgbTriangle[0,x,y] = rgbColour.rgb_r
rgbTriangle[1,x,y] = rgbColour.rgb_g
rgbTriangle[2,x,y] = rgbColour.rgb_b
rgbTriangle = np.transpose(rgbTriangle)
plt.imshow(rgbTriangle)
plt.show()
We have all the common Chromaticity Diagrams in Colour, I would recommend it over python-colormath because Colour is vectorised and thus much faster.
Do you have a render of your current image to share though?
from colour.plotting import plot_chromaticity_diagram_CIE1931
plot_chromaticity_diagram_CIE1931()

trouble displaying image HSI converted to RGB python

I've been working in a algorithm to convert RGB to HSI and vice-versa in python 3, which it display the resulted images and each channel using matplotlib.
The trouble is displaying HSI to RGB resulted image: Each channel alone is being displayed correctly, but when it shows the tree channels together I get a weird image.
By the way, when I save the resulted image with OpenCV it shows the image correctly.
Resulted display
What I did, but nothing changed:
Round the values and if it pass 1, give 1 to the pixel
In the conversion HSI to RGB, instead define R, G and B arrays with zeros, define arrays with ones
In the conversion RGB to HSI, change the values between [0,360],[0,1],[0,1] to values between [0,360],[0,255],[0,255] rounded or not
Instead use Jupyter notebook, use collab.research by google or Spider
Execute the code on terminal, but it gives me blank windows
Function to display images:
def show_images(T, cols=1):
N = len(T)
fig = plt.figure()
for i in range(N):
a = fig.add_subplot(np.ceil(N/float(cols)), cols, i+1)
try:
img,title = T[i]
except ValueError:
img,title = T[i], "Image %d" % (i+1)
if(img.ndim == 2):
plt.gray()
plt.imshow(img)
a.set_title(title)
plt.xticks([0,img.shape[1]]), plt.yticks([0,img.shape[0]])
fig.set_size_inches(np.array(fig.get_size_inches()) * N)
plt.show()
Then the main function do this:
image = bgr_to_rgb(cv2.imread("rgb.png"))
img1 = rgb_to_hsi(image)
img2 = hsi_to_rgb(img1)
show_images([(image,"RGB"),
(image[:,:,0],"Red"),
(image[:,:,1],"Green"),
(image[:,:,2],"Blue")], 4)
show_images([(img1,"RGB->HSI"),
(img1[:,:,0],"Hue"),
(img1[:,:,1],"Saturation"),
(img1[:,:,2],"Intensity")], 4)
show_images([(img2,"HSI->RGB"),
(img2[:,:,0],"Red"),
(img2[:,:,1],"Green"),
(img2[:,:,2],"Blue")], 4)
Conversion RGB to HSI:
def rgb_to_hsi(img):
zmax = 255 # max value
# values in [0,1]
R = np.divide(img[:,:,0],zmax,dtype=np.float)
G = np.divide(img[:,:,1],zmax,dtype=np.float)
B = np.divide(img[:,:,2],zmax,dtype=np.float)
# Hue, when R=G=B -> H=90
a = (0.5)*np.add(np.subtract(R,G), np.subtract(R,B)) # (1/2)*[(R-G)+(R-B)]
b = np.sqrt(np.add(np.power(np.subtract(R,G), 2) , np.multiply(np.subtract(R,B),np.subtract(G,B))))
tetha = np.arccos( np.divide(a, b, out=np.zeros_like(a), where=b!=0) ) # when b = 0, division returns 0, so then tetha = 90
H = (180/math.pi)*tetha # convert rad to degree
H[B>G]=360-H[B>G]
# saturation = 1 - 3*[min(R,G,B)]/(R+G+B), when R=G=B -> S=0
a = 3*np.minimum(np.minimum(R,G),B) # 3*min(R,G,B)
b = np.add(np.add(R,G),B) # (R+G+B)
S = np.subtract(1, np.divide(a,b,out=np.ones_like(a),where=b!=0))
# intensity = (1/3)*[R+G+B]
I = (1/3)*np.add(np.add(R,G),B)
return np.dstack((H, zmax*S, np.round(zmax*I))) # values between [0,360], [0,255] e [0,255]
Conversion HSI to RGB:
def f1(I,S): # I(1-S)
return np.multiply(I, np.subtract(1,S))
def f2(I,S,H): # I[1+(ScosH/cos(60-H))]
r = math.pi/180
a = np.multiply(S, np.cos(r*H)) # ScosH
b = np.cos(r*np.subtract(60,H)) # cos(60-H)
return np.multiply(I, np.add(1, np.divide(a,b)) )
def f3(I,C1,C2): # 3I-(C1+C2)
return np.subtract(3*I, np.add(C1,C2))
def hsi_to_rgb(img):
zmax = 255 # max value
# values between[0,360], [0,1] and [0,1]
H = img[:,:,0]
S = np.divide(img[:,:,1],zmax,dtype=np.float)
I = np.divide(img[:,:,2],zmax,dtype=np.float)
R,G,B = np.ones(H.shape),np.ones(H.shape),np.ones(H.shape) # values will be between [0,1]
# for 0 <= H < 120
B[(0<=H)&(H<120)] = f1(I[(0<=H)&(H<120)], S[(0<=H)&(H<120)])
R[(0<=H)&(H<120)] = f2(I[(0<=H)&(H<120)], S[(0<=H)&(H<120)], H[(0<=H)&(H<120)])
G[(0<=H)&(H<120)] = f3(I[(0<=H)&(H<120)], R[(0<=H)&(H<120)], B[(0<=H)&(H<120)])
# for 120 <= H < 240
H = np.subtract(H,120)
R[(0<=H)&(H<120)] = f1(I[(0<=H)&(H<120)], S[(0<=H)&(H<120)])
G[(0<=H)&(H<120)] = f2(I[(0<=H)&(H<120)], S[(0<=H)&(H<120)], H[(0<=H)&(H<120)])
B[(0<=H)&(H<120)] = f3(I[(0<=H)&(H<120)], R[(0<=H)&(H<120)], G[(0<=H)&(H<120)])
# for 240 <= H < 360
H = np.subtract(H,120)
G[(0<=H)&(H<120)] = f1(I[(0<=H)&(H<120)], S[(0<=H)&(H<120)])
B[(0<=H)&(H<120)] = f2(I[(0<=H)&(H<120)], S[(0<=H)&(H<120)], H[(0<=H)&(H<120)])
R[(0<=H)&(H<120)] = f3(I[(0<=H)&(H<120)], G[(0<=H)&(H<120)], B[(0<=H)&(H<120)])
return np.dstack( ((zmax*R) , (zmax*G) , (zmax*B)) ) # values between [0,255]
If you take a look at the imshow documentation of matplotlib, you will see the following lines:
X : array-like or PIL image The image data. Supported array shapes
are:
(M, N): an image with scalar data. The data is visualized using a
colormap. (M, N, 3): an image with RGB values (float or uint8). (M, N,
4): an image with RGBA values (float or uint8), i.e. including
transparency. The first two dimensions (M, N) define the rows and
columns of the image.
The RGB(A) values should be in the range [0 .. 1] for floats or [0 ..
255] for integers. Out-of-range values will be clipped to these
bounds.
Which tells you the ranges that it should be in... In your case, the HSI values go from 0-360 in the Hue which will be clipped to 255 any value above it. That is one of the reasons why OpenCV uses the Hue range from 0-180, to be able to fit it inside the range.
Then the HSI->RGB seems to return the image in float, then it will be clipped in 1.0.
This will happen only for the display, but also if you save the image it will be clipped most probably, maybe it gets saved as a 16 bit image.
Possible solutions:
normalize the values from 0-1 or from 0-255 (this may change the min and max value) and then display it (dont forget to cast it to np.uint8).
Create a range that is always inside the possible values.
This is for display or saving purposes... If you use 0-360 save it at least in 16 bits

Shape recognition with numpy/scipy (perhaps watershed)

My goal is to trace drawings that have a lot of separate shapes in them and to split these shapes into individual images. It is black on white. I'm quite new to numpy,opencv&co - but here is my current thought:
scan for black pixels
black pixel found -> watershed
find watershed boundary (as polygon path)
continue searching, but ignore points within the already found boundaries
I'm not very good at these kind of things, is there a better way?
First I tried to find the rectangular bounding box of the watershed results (this is more or less a collage of examples):
from numpy import *
import numpy as np
from scipy import ndimage
np.set_printoptions(threshold=np.nan)
a = np.zeros((512, 512)).astype(np.uint8) #unsigned integer type needed by watershed
y, x = np.ogrid[0:512, 0:512]
m1 = ((y-200)**2 + (x-100)**2 < 30**2)
m2 = ((y-350)**2 + (x-400)**2 < 20**2)
m3 = ((y-260)**2 + (x-200)**2 < 20**2)
a[m1+m2+m3]=1
markers = np.zeros_like(a).astype(int16)
markers[0, 0] = 1
markers[200, 100] = 2
markers[350, 400] = 3
markers[260, 200] = 4
res = ndimage.watershed_ift(a.astype(uint8), markers)
unique(res)
B = argwhere(res.astype(uint8))
(ystart, xstart), (ystop, xstop) = B.min(0), B.max(0) + 1
tr = a[ystart:ystop, xstart:xstop]
print tr
Somehow, when I use the original array (a) then argwhere seems to work, but after the watershed (res) it just outputs the complete array again.
The next step could be to find the polygon path around the shape, but the bounding box would be great for now!
Please help!
#Hooked has already answered most of your question, but I was in the middle of writing this up when he answered, so I'll post it in the hopes that it's still useful...
You're trying to jump through a few too many hoops. You don't need watershed_ift.
You use scipy.ndimage.label to differentiate separate objects in a boolean array and scipy.ndimage.find_objects to find the bounding box of each object.
Let's break things down a bit.
import numpy as np
from scipy import ndimage
import matplotlib.pyplot as plt
def draw_circle(grid, x0, y0, radius):
ny, nx = grid.shape
y, x = np.ogrid[:ny, :nx]
dist = np.hypot(x - x0, y - y0)
grid[dist < radius] = True
return grid
# Generate 3 circles...
a = np.zeros((512, 512), dtype=np.bool)
draw_circle(a, 100, 200, 30)
draw_circle(a, 400, 350, 20)
draw_circle(a, 200, 260, 20)
# Label the objects in the array.
labels, numobjects = ndimage.label(a)
# Now find their bounding boxes (This will be a tuple of slice objects)
# You can use each one to directly index your data.
# E.g. a[slices[0]] gives you the original data within the bounding box of the
# first object.
slices = ndimage.find_objects(labels)
#-- Plotting... -------------------------------------
fig, ax = plt.subplots()
ax.imshow(a)
ax.set_title('Original Data')
fig, ax = plt.subplots()
ax.imshow(labels)
ax.set_title('Labeled objects')
fig, axes = plt.subplots(ncols=numobjects)
for ax, sli in zip(axes.flat, slices):
ax.imshow(labels[sli], vmin=0, vmax=numobjects)
tpl = 'BBox:\nymin:{0.start}, ymax:{0.stop}\nxmin:{1.start}, xmax:{1.stop}'
ax.set_title(tpl.format(*sli))
fig.suptitle('Individual Objects')
plt.show()
Hopefully that makes it a bit clearer how to find the bounding boxes of the objects.
Use the ndimage library from scipy. The function label places a unique tag on each block of pixels that are within a threshold. This identifies the unique clusters (shapes). Starting with your definition of a:
from scipy import ndimage
image_threshold = .5
label_array, n_features = ndimage.label(a>image_threshold)
# Plot the resulting shapes
import pylab as plt
plt.subplot(121)
plt.imshow(a)
plt.subplot(122)
plt.imshow(label_array)
plt.show()

Categories