Implementing GLCM texture feature with scikit-image and Python - python

I am trying to implement a texture image as described in this tutorial using Python and skimage.
The issue is to move a 7x7 window over a large raster and replace the center of each pixel with the calculated texture from the 7x7 window. I manage to do this with the code below, but I see no other way than looping through each individual pixel, which is very slow.
One software package does that in a few seconds, so there must be some other way ... is there?
Here the code that works but is very slow ...
import matplotlib.pyplot as plt
import gdal, gdalconst
import numpy as np
from skimage.feature import greycomatrix, greycoprops
filename = "//mnt//glaciology//RS2_20140101.jpg"
outfilename = "//home//max//Documents//GLCM_contrast.tif"
sarfile = gdal.Open(filename, gdalconst.GA_ReadOnly)
sarraster = sarfile.ReadAsArray()
#sarraster is satellite image, testraster will receive texture
testraster = np.copy(sarraster)
testraster[:] = 0
for i in range(testraster.shape[0] ):
print i,
for j in range(testraster.shape[1] ):
#windows needs to fit completely in image
if i <3 or j <3:
continue
if i > (testraster.shape[0] - 4) or j > (testraster.shape[0] - 4):
continue
#Calculate GLCM on a 7x7 window
glcm_window = sarraster[i-3: i+4, j-3 : j+4]
glcm = greycomatrix(glcm_window, [1], [0], symmetric = True, normed = True )
#Calculate contrast and replace center pixel
contrast = greycoprops(glcm, 'contrast')
testraster[i,j]= contrast
sarplot = plt.imshow(testraster, cmap = 'gray')
Results:

I had the same problem, different data. Here is a script I wrote that uses parallel processing and a sliding window approach:
import gdal, osr
import numpy as np
from scipy.interpolate import RectBivariateSpline
from numpy.lib.stride_tricks import as_strided as ast
import dask.array as da
from joblib import Parallel, delayed, cpu_count
import os
from skimage.feature import greycomatrix, greycoprops
def im_resize(im,Nx,Ny):
'''
resize array by bivariate spline interpolation
'''
ny, nx = np.shape(im)
xx = np.linspace(0,nx,Nx)
yy = np.linspace(0,ny,Ny)
try:
im = da.from_array(im, chunks=1000) #dask implementation
except:
pass
newKernel = RectBivariateSpline(np.r_[:ny],np.r_[:nx],im)
return newKernel(yy,xx)
def p_me(Z, win):
'''
loop to calculate greycoprops
'''
try:
glcm = greycomatrix(Z, [5], [0], 256, symmetric=True, normed=True)
cont = greycoprops(glcm, 'contrast')
diss = greycoprops(glcm, 'dissimilarity')
homo = greycoprops(glcm, 'homogeneity')
eng = greycoprops(glcm, 'energy')
corr = greycoprops(glcm, 'correlation')
ASM = greycoprops(glcm, 'ASM')
return (cont, diss, homo, eng, corr, ASM)
except:
return (0,0,0,0,0,0)
def read_raster(in_raster):
in_raster=in_raster
ds = gdal.Open(in_raster)
data = ds.GetRasterBand(1).ReadAsArray()
data[data<=0] = np.nan
gt = ds.GetGeoTransform()
xres = gt[1]
yres = gt[5]
# get the edge coordinates and add half the resolution
# to go to center coordinates
xmin = gt[0] + xres * 0.5
xmax = gt[0] + (xres * ds.RasterXSize) - xres * 0.5
ymin = gt[3] + (yres * ds.RasterYSize) + yres * 0.5
ymax = gt[3] - yres * 0.5
del ds
# create a grid of xy coordinates in the original projection
xx, yy = np.mgrid[xmin:xmax+xres:xres, ymax+yres:ymin:yres]
return data, xx, yy, gt
def norm_shape(shap):
'''
Normalize numpy array shapes so they're always expressed as a tuple,
even for one-dimensional shapes.
'''
try:
i = int(shap)
return (i,)
except TypeError:
# shape was not a number
pass
try:
t = tuple(shap)
return t
except TypeError:
# shape was not iterable
pass
raise TypeError('shape must be an int, or a tuple of ints')
def sliding_window(a, ws, ss = None, flatten = True):
'''
Source: http://www.johnvinyard.com/blog/?p=268#more-268
Parameters:
a - an n-dimensional numpy array
ws - an int (a is 1D) or tuple (a is 2D or greater) representing the size
of each dimension of the window
ss - an int (a is 1D) or tuple (a is 2D or greater) representing the
amount to slide the window in each dimension. If not specified, it
defaults to ws.
flatten - if True, all slices are flattened, otherwise, there is an
extra dimension for each dimension of the input.
Returns
an array containing each n-dimensional window from a
'''
if None is ss:
# ss was not provided. the windows will not overlap in any direction.
ss = ws
ws = norm_shape(ws)
ss = norm_shape(ss)
# convert ws, ss, and a.shape to numpy arrays
ws = np.array(ws)
ss = np.array(ss)
shap = np.array(a.shape)
# ensure that ws, ss, and a.shape all have the same number of dimensions
ls = [len(shap),len(ws),len(ss)]
if 1 != len(set(ls)):
raise ValueError(\
'a.shape, ws and ss must all have the same length. They were %s' % str(ls))
# ensure that ws is smaller than a in every dimension
if np.any(ws > shap):
raise ValueError(\
'ws cannot be larger than a in any dimension.\
a.shape was %s and ws was %s' % (str(a.shape),str(ws)))
# how many slices will there be in each dimension?
newshape = norm_shape(((shap - ws) // ss) + 1)
# the shape of the strided array will be the number of slices in each dimension
# plus the shape of the window (tuple addition)
newshape += norm_shape(ws)
# the strides tuple will be the array's strides multiplied by step size, plus
# the array's strides (tuple addition)
newstrides = norm_shape(np.array(a.strides) * ss) + a.strides
a = ast(a,shape = newshape,strides = newstrides)
if not flatten:
return a
# Collapse strided so that it has one more dimension than the window. I.e.,
# the new array is a flat list of slices.
meat = len(ws) if ws.shape else 0
firstdim = (np.product(newshape[:-meat]),) if ws.shape else ()
dim = firstdim + (newshape[-meat:])
# remove any dimensions with size 1
dim = filter(lambda i : i != 1,dim)
return a.reshape(dim), newshape
def CreateRaster(xx,yy,std,gt,proj,driverName,outFile):
'''
Exports data to GTiff Raster
'''
std = np.squeeze(std)
std[np.isinf(std)] = -99
driver = gdal.GetDriverByName(driverName)
rows,cols = np.shape(std)
ds = driver.Create( outFile, cols, rows, 1, gdal.GDT_Float32)
if proj is not None:
ds.SetProjection(proj.ExportToWkt())
ds.SetGeoTransform(gt)
ss_band = ds.GetRasterBand(1)
ss_band.WriteArray(std)
ss_band.SetNoDataValue(-99)
ss_band.FlushCache()
ss_band.ComputeStatistics(False)
del ds
#Stuff to change
if __name__ == '__main__':
win_sizes = [7]
for win_size in win_sizes[:]:
in_raster = #Path to input raster
win = win_size
meter = str(win/4)
#Define output file names
contFile =
dissFile =
homoFile =
energyFile =
corrFile =
ASMFile =
merge, xx, yy, gt = read_raster(in_raster)
merge[np.isnan(merge)] = 0
Z,ind = sliding_window(merge,(win,win),(win,win))
Ny, Nx = np.shape(merge)
w = Parallel(n_jobs = cpu_count(), verbose=0)(delayed(p_me)(Z[k]) for k in xrange(len(Z)))
cont = [a[0] for a in w]
diss = [a[1] for a in w]
homo = [a[2] for a in w]
eng = [a[3] for a in w]
corr = [a[4] for a in w]
ASM = [a[5] for a in w]
#Reshape to match number of windows
plt_cont = np.reshape(cont , ( ind[0], ind[1] ) )
plt_diss = np.reshape(diss , ( ind[0], ind[1] ) )
plt_homo = np.reshape(homo , ( ind[0], ind[1] ) )
plt_eng = np.reshape(eng , ( ind[0], ind[1] ) )
plt_corr = np.reshape(corr , ( ind[0], ind[1] ) )
plt_ASM = np.reshape(ASM , ( ind[0], ind[1] ) )
del cont, diss, homo, eng, corr, ASM
#Resize Images to receive texture and define filenames
contrast = im_resize(plt_cont,Nx,Ny)
contrast[merge==0]=np.nan
dissimilarity = im_resize(plt_diss,Nx,Ny)
dissimilarity[merge==0]=np.nan
homogeneity = im_resize(plt_homo,Nx,Ny)
homogeneity[merge==0]=np.nan
energy = im_resize(plt_eng,Nx,Ny)
energy[merge==0]=np.nan
correlation = im_resize(plt_corr,Nx,Ny)
correlation[merge==0]=np.nan
ASM = im_resize(plt_ASM,Nx,Ny)
ASM[merge==0]=np.nan
del plt_cont, plt_diss, plt_homo, plt_eng, plt_corr, plt_ASM
del w,Z,ind,Ny,Nx
driverName= 'GTiff'
epsg_code=26949
proj = osr.SpatialReference()
proj.ImportFromEPSG(epsg_code)
CreateRaster(xx, yy, contrast, gt, proj,driverName,contFile)
CreateRaster(xx, yy, dissimilarity, gt, proj,driverName,dissFile)
CreateRaster(xx, yy, homogeneity, gt, proj,driverName,homoFile)
CreateRaster(xx, yy, energy, gt, proj,driverName,energyFile)
CreateRaster(xx, yy, correlation, gt, proj,driverName,corrFile)
CreateRaster(xx, yy, ASM, gt, proj,driverName,ASMFile)
del contrast, merge, xx, yy, gt, meter, dissimilarity, homogeneity, energy, correlation, ASM
This script calculates GLCM properties for a defined window size, with no overlap between adjacent windows.

Related

Convolution on Python

So I suppose to calculate the convolution between Fourier Transformed image and the mask.
from scipy import fftpack
import numpy as np
import imageio
from PIL import Image, ImageDraw
import cv2
import matplotlib.pyplot as plt
import math
from scipy.ndimage.filters import convolve
input_image = Image.open('....image....')
input_image=np.array(input_image)
M,N = input_image.shape[0],input_image.shape[1]
FT_img = fftpack.fftshift(fftpack.fft2(input_image))
n = 2; # order value can change this value accordingly
D0 = 60; # cut-off frequency can change this value accordingly
# Designing filter
u = np.arange(0, M)
idx = u > M/2
u[idx] = u[idx] - M
v = np.arange(0, N)
idy = v > N/2
v[idy] = v[idy] - N
V,U = np.meshgrid(v,u)
# Calculating Euclidean Distance
D=np.linalg.norm(V-U)
# determining the filtering mask
H = 1/(1 + (D0/D)**(2*n));
# Convolution between the Fourier Transformed image and the mask
G = convolve(H, FT_img)
And I get "Runtime error:filter weights array has incorrect shape." error at the last line when I run this code snippet. What I understand is H is float and FT_img is array so I cannot perform convolution on these. But I don't know how to solve that.
How can I solve this problem?
calculating distance D, and filter H for each (u, v) this will yield an array with same size of input image, multiplying that array(H the Filter) with the image in Fourier Domain will be equivalent to convolution in the Time domain, and the results will be as following:
import numpy as np
import cv2
import matplotlib.pyplot as plt
# Read Image as Grayscale
img = cv2.imread('input.png', 0)
# Designing filter
#------------------------------------------------------
def butterworth_filter(shape, n=2, D0=60):
'''
n = 2; # order value can change this value accordingly
D0 = 60; # cut-off frequency can change this value accordingly
'''
M, N = shape
# Initialize filter with zeros
H = np.zeros((M, N))
# Traverse through filter
for u in range(0, M):
for v in range(0, N):
# Get euclidean distance from point D(u,v) to the center
D_uv = np.sqrt((u - M / 2) ** 2 + (v - N / 2) ** 2)
# determining the filtering mask
H[u, v] = 1/(1 + (D0/D_uv)**(2*n))
return H
#-----------------------------------------------------
f = np.fft.fft2(img)
fshift = np.fft.fftshift(f)
phase_spectrumR = np.angle(fshift)
magnitude_spectrum = 20*np.log(np.abs(fshift))
# Generate Butterworth Filter
H = butterworth_filter(img.shape)
# Convolution between the Fourier Transformed image and the mask
G = H * fshift
# Obtain the Result
result = np.abs(np.fft.ifft2(np.fft.ifftshift((G))))
plt.subplot(222)
plt.imshow(img, cmap='gray')
plt.title('Original')
plt.axis('off')
plt.subplot(221)
plt.imshow(magnitude_spectrum, cmap='gray')
plt.title('magnitude spectrum')
plt.axis('off')
plt.subplot(223)
plt.imshow(H, "gray")
plt.title("Butterworth Filter")
plt.axis('off')
plt.subplot(224)
plt.imshow(result, "gray")
plt.title("Result")
plt.axis('off')
plt.show()

Deprojecting depth onto original mesh

I am trying to get blender render depth map of an object and then moving it to overlay the original object. Currently I have no issue with rendering the object and extracting it into it's place.
However, I am stuck when trying to position the object into it's original position.
I'm trying to apply inverse camera world matrix to the rendered pointcloud (in blue). Unfortunately, when I apply said camera inverse it doesn't appear nowhere near where I'd expect (in red).
I have attached the entirety of code that I have to replicate this behaviour. I would appreciate it if someone would point me to the right matrix that I should be multiplying the point cloud by.
from mathutils import Vector, Quaternion, Euler, Matrix
import numpy as np
import bpy
def main_script():
clear_scene()
prepare_views()
tmp_path = "/tmp/tmp_render.exr"
scene = get_scene("Scene")
camera = create_camera("Camera")
camera.rotation_euler = Euler([np.pi * 0.5, 0, np.pi * 0.5], "XYZ")
camera.location = Vector([4.5, 0, 1])
bpy.ops.mesh.primitive_monkey_add(
location=(0, 0, 1), rotation=(0, 0, np.pi*0.5), size=1.0)
_w, _h = 640, 480
update_scene()
init_rendering(scene, camera, width=640, height=480)
update_scene()
matrix_K = get_calibration_matrix_K_from_blender(scene, camera.data)
_fy, _fx = matrix_K[0][0], matrix_K[1][1]
_cy, _cx = matrix_K[0][2], matrix_K[1][2]
scene.render.filepath = tmp_path
bpy.ops.render.render(write_still=True)
depth = read_exr(tmp_path, "R")["R"]
depth = np.reshape(convert_to_numpy(depth), [_h, _w])
exr_cloud = depth_to_cloud(
_w, _h, _fx, _fy, _cx, _cy, depth)
exr_cloud = np.reshape(exr_cloud, [-1, 3])
exr_cloud = exr_cloud[(exr_cloud[..., 2] < 100) & (exr_cloud[..., 2] > 0)]
matrix = np.reshape(camera.matrix_world, [4, 4])
matrix = np.linalg.inv(matrix) # why doesn't this place the depth properly
vertices = np.ones([exr_cloud.shape[0], 4], dtype=np.float32)
vertices[:, 0:3] = exr_cloud
vertices = np.array(
[matrix # vertex for vertex in vertices], dtype=np.float32)
vertices = vertices[..., :3]
create_mesh("Suzanne_EXR", exr_cloud, [])
create_mesh("SuzanneT_EXR", vertices, [])
"""
utilities methods required to run the script
"""
def clear_scene():
for scene in bpy.data.scenes:
for obj in scene.objects:
bpy.context.collection.objects.unlink(obj)
def read_exr(path, channels):
import OpenEXR as _OpenEXR
import Imath as _Imath
file = _OpenEXR.InputFile(path)
FLOAT = _Imath.PixelType(_Imath.PixelType.FLOAT)
results = {}
for ch in channels:
results[ch] = file.channel(ch, FLOAT)
file.close()
return results
def convert_to_numpy(data):
import array as _array
return np.array(_array.array("f", data).tolist())
def update_scene():
dg = bpy.context.evaluated_depsgraph_get()
dg.update()
def prepare_views():
preferences = bpy.context.preferences
preferences.view.show_tooltips_python = True
preferences.view.show_developer_ui = True
preferences.view.render_display_type = "NONE"
def init_rendering(scene, camera, width=None, height=None):
def set_rendering_settings(camera, scene, width=640, height=480):
image_settings = scene.render.image_settings
image_settings.file_format = "OPEN_EXR"
image_settings.use_zbuffer = True
scene.render.resolution_x = width
scene.render.resolution_y = height
# scene.render.use_antialiasing = False
scene.use_nodes = True
scene.camera = camera
node_tree = scene.node_tree
nodes = node_tree.nodes
node_render_layers = nodes["Render Layers"]
node_composite = nodes["Composite"]
node_tree.links.clear()
node_tree.links.new(
node_render_layers.outputs["Depth"], node_composite.inputs["Image"])
set_rendering_settings(camera, scene)
def get_scene(name): return bpy.data.scenes[name]
def create_camera(name):
camera = bpy.data.cameras.new(name)
camera.lens = 50
obj = bpy.data.objects.new(name, camera)
bpy.context.collection.objects.link(obj)
return obj
# ---------------------------------------------------------------
# 3x4 P matrix from Blender camera
# ---------------------------------------------------------------
# Build intrinsic camera parameters from Blender camera data
#
# See notes on this in
# blender.stackexchange.com/questions/15102/what-is-blenders-camera-projection-matrix-model
def get_calibration_matrix_K_from_blender(scene, camera):
from mathutils import Matrix as _Matrix
f_in_mm = camera.lens
resolution_x_in_px = scene.render.resolution_x
resolution_y_in_px = scene.render.resolution_y
scale = scene.render.resolution_percentage / 100
sensor_width_in_mm = camera.sensor_width
sensor_height_in_mm = camera.sensor_height
pixel_aspect_ratio = scene.render.pixel_aspect_x / scene.render.pixel_aspect_y
if (camera.sensor_fit == 'VERTICAL'):
# the sensor height is fixed (sensor fit is horizontal),
# the sensor width is effectively changed with the pixel aspect ratio
s_u = resolution_x_in_px * scale / sensor_width_in_mm / pixel_aspect_ratio
s_v = resolution_y_in_px * scale / sensor_height_in_mm
else: # 'HORIZONTAL' and 'AUTO'
# the sensor width is fixed (sensor fit is horizontal),
# the sensor height is effectively changed with the pixel aspect ratio
pixel_aspect_ratio = scene.render.pixel_aspect_x / scene.render.pixel_aspect_y
s_u = resolution_x_in_px * scale / sensor_width_in_mm
s_v = resolution_y_in_px * scale * pixel_aspect_ratio / sensor_height_in_mm
# Parameters of intrinsic calibration matrix K
alpha_u = f_in_mm * s_u
alpha_v = f_in_mm * s_v
u_0 = resolution_x_in_px * scale / 2
v_0 = resolution_y_in_px * scale / 2
skew = 0 # only use rectangular pixels
K = _Matrix(
((alpha_u, skew, u_0),
(0, alpha_v, v_0),
(0, 0, 1)))
return K
def create_mesh(name, vertices, faces):
import bmesh as _bmesh
mesh = bpy.data.meshes.new("Mesh_%s" % name)
mesh.from_pydata(vertices, [], faces)
mesh.update()
obj = bpy.data.objects.new(name, mesh)
bpy.context.collection.objects.link(obj)
bm = _bmesh.new()
bm.from_mesh(mesh)
bm.to_mesh(mesh)
bm.free()
return obj
def depth_to_cloud(w, h, fx, fy, cx, cy, depth):
from numpy import concatenate as _concat
from numpy import indices as _indices
from numpy import newaxis as _newaxis
indices = _indices(depth.shape)
indices_y, indices_x = indices
ys, xs, zs = \
(indices_y - cy) * depth / fy, \
(indices_x - cx) * depth / fx, \
depth
points = _concat([xs[..., _newaxis], ys[..., _newaxis],
zs[..., _newaxis]], axis=2)
return points
if __name__ == "__main__":
raise main_script()
The problem was compound, first I needed to replace my transformed vertex calculation from instead using inverse camera world matrix, to negatively scaled camera world matrix like so
matrix_cam = np.reshape(camera.matrix_world, [4, 4])
mat_scale = np.array(Matrix.Scale(-1, 4))
matrix = matrix_cam # mat_scale
vertices = np.ones([exr_cloud.shape[0], 4], dtype=np.float32)
vertices[:, 0:3] = exr_cloud
vertices = np.array(
[matrix # vertex for vertex in vertices], dtype=np.float32)
vertices = vertices[..., :3]
Additionally, there was an issue with depth decoding which caused the point cloud to be deformed, fixed like so
ys, xs, zs = \
(indices_y - cx) * depth / fx, \
(indices_x - cy) * depth / fy, \
depth

python program to fade an image in radial direction

I am trying to write a program which fades an image in radial direction. which means as we move away from the centre of the image, the pixels fade to black. For this, I have written five different functions:
center: returns coordinate pair (center_y, center_x) of the image center.
radial_distance: returns for image with width w and height h an array with shape (h,w), where the number at index (i,j) gives the euclidean distance from the point (i,j) to the center of the image.
scale: returns a copy of the array 'a' (or image) with its elements scaled to be in the given range.
radial_mask: takes an image as a parameter and returns an array with same height and width filled with values between 0.0 and 1.0.
radial_fade: returns the image multiplied by its radial mask.
The program is:
import numpy as np
import matplotlib.pyplot as plt
def center(a):
y, x = a.shape[:2]
return ((y-1)/2,(x-1)/2) # note the order: (center_y, center_x)
def radial_distance(b):
h, w = b.shape[:2]
y, x = center(b)
o = b[:h,:w,0]
for i in range(h):
for j in range(w):
o[i,j] = np.sqrt((y-i)**2 + (x-j)**2)
return o
def scale(c, tmin=0.0, tmax=1.0):
"""Returns a copy of array 'a' with its values scaled to be in the range
[tmin,tmax]."""
mini, maxi = c.min(), c.max()
if maxi == 0:
return 0
else:
m = (tmax - tmin)/(maxi - mini)
f = tmin - m*mini
return c*m + f
def radial_mask(d):
f = radial_distance(d)
g = scale(f, tmin=0.0, tmax=1.0)
# f = g[:,:,0]
n = 1.0 - g
return n
def radial_fade(l):
f, g = l.shape[:2]
q = l[:f,:g,0]
return q * radial_mask(l)
image = plt.imread("src/painting.png")
fig, ax = plt.subplots(3)
masked = radial_mask(ima)
faded = radial_fade(ima)
ax[0].imshow(ima)
ax[1].imshow(masked)
ax[2].imshow(faded)
plt.show()
there is something wrong somewhere in the code as it does not do the expected job.
One problem is that in
o = b[:h,:w,0]
you're using the same precision as the image that may be integers (e.h. uint8).
You should use for example
o = np.zeros((h, w), np.float32)

Make a total vector for three vectors in python

We have a class that have three functions called(Bdisk, Bhalo,and BX).
all of these functions accept arrays (e.g. shape (1000))not matrices (e.g. shape (2,1000)).
I want to get the total of all these functions( total= Bdisk + Bhalo+BX), total these all functions give the magnetic field in all three components (B_r, B_phi, B_z) for thousand coordinate points (r, phi, z).
the code is here:
import numpy as np
import logging
import warnings
import gmf
signum = lambda x: (x < 0.) * -1. + (x >= 0) * 1.
pi = np.pi
#Class with analytical functions that describe the GMF according to the model of JF12
class GMF(object):
def __init__(self): # self:is automatically set to reference the newly created object that needs to be initialized
self.Rsun = -8.5 # position of the sun along the x axis in kpc
############################################################################
# Disk Parameters
############################################################################
self.bring, self.bring_unc = 0.1,0.1 # floats, field strength in ring at 3 kpc < r < 5 kpc
self.hdisk, self.hdisk_unc = 0.4, 0.03 # float, disk/halo transition height
self.wdisk, self.wdisk_unc = 0.27,0.08 # floats, transition width
self.b = np.array([0.1,3.,-0.9,-0.8,-2.0,-4.2,0.,2.7]) # (8,1)-dim np.arrays, field strength of spiral arms at 5 kpc
self.b_unc = np.array([1.8,0.6,0.8,0.3,0.1,0.5,1.8,1.8]) # uncertainty
self.rx = np.array([5.1,6.3,7.1,8.3,9.8,11.4,12.7,15.5])# (8,1)-dim np.array,dividing lines of spiral lines coordinates of neg. x-axes that intersect with arm
self.idisk = 11.5 * pi/180. # float, spiral arms pitch angle
#############################################################################
# Halo Parameters
#############################################################################
self.Bn, self.Bn_unc = 1.4,0.1 # floats, field strength northern halo
self.Bs, self.Bs_unc = -1.1,0.1 # floats, field strength southern halo
self.rn, self.rn_unc = 9.22,0.08 # floats, transition radius south, lower limit
self.rs, self.rs_unc = 16.7,0. # transition radius south, lower limit
self.whalo, self.whalo_unc = 0.2,0.12 # floats, transition width
self.z0, self.z0_unc = 5.3, 1.6 # floats, vertical scale height
##############################################################################
# Out of plaxe or "X" component Parameters
##############################################################################
self.BX0, self.BX_unc = 4.6,0.3 # floats, field strength at origin
self.ThetaX0, self.ThetaX0_unc = 49. * pi/180., pi/180. # elev. angle at z = 0, r > rXc
self.rXc, self.rXc_unc = 4.8, 0.2 # floats, radius where thetaX = thetaX0
self.rX, self.rX_unc = 2.9, 0.1 # floats, exponential scale length
# striated field
self.gamma, self.gamma_unc = 2.92,0.14 # striation and/or rel. elec. number dens. rescaling
return
##################################################################################
##################################################################################
# Transition function given by logistic function eq.5
##################################################################################
def L(self,z,h,w):
if np.isscalar(z):
z = np.array([z]) # scalar or numpy array with positions (height above disk, z; distance from center, r)
ones = np.ones(z.shape[0])
return 1./(ones + np.exp(-2. *(np.abs(z)- h)/w))
####################################################################################
# return distance from center for angle phi of logarithmic spiral
# r(phi) = rx * exp(b * phi) as np.array
####################################################################################
def r_log_spiral(self,phi):
if np.isscalar(phi): #Returns True if the type of num is a scalar type.
phi = np.array([phi])
ones = np.ones(phi.shape[0])
# self.rx.shape = 8
# phi.shape = p
# then result is given as (8,p)-dim array, each row stands for one rx
# vstack : Take a sequence of arrays and stack them vertically to make a single array
# tensordot(a, b, axes=2):Compute tensor dot product along specified axes for arrays >=1D.
result = np.tensordot(self.rx , np.exp((phi - 3.*pi*ones) / np.tan(pi/2. - self.idisk)),axes = 0)
result = np.vstack((result, np.tensordot(self.rx , np.exp((phi - pi*ones) / np.tan(pi/2. - self.idisk)),axes = 0) ))
result = np.vstack((result, np.tensordot(self.rx , np.exp((phi + pi*ones) / np.tan(pi/2. - self.idisk)),axes = 0) ))
return np.vstack((result, np.tensordot(self.rx , np.exp((phi + 3.*pi*ones) / np.tan(pi/2. - self.idisk)),axes = 0) ))
#############################################################################################
# Disk component in galactocentric cylindrical coordinates (r,phi,z)
#############################################################################################
def Bdisk(self,r,phi,z):
# Bdisk is purely azimuthal (toroidal) with the field strength b_ring
"""
r: N-dim np.array, distance from origin in GC cylindrical coordinates, is in kpc
z: N-dim np.array, height in kpc in GC cylindrical coordinates
phi:N-dim np.array, polar angle in GC cylindircal coordinates, in radian
Bdisk: (3,N)-dim np.array with (r,phi,z) components of disk field for each coordinate tuple
|Bdisk|: N-dim np.array, absolute value of Bdisk for each coordinate tuple
"""
if (not r.shape[0] == phi.shape[0]) and (not z.shape[0] == phi.shape[0]):
warnings.warn("List do not have equal shape! returning -1", RuntimeWarning)
return -1
# Return a new array of given shape and type, filled with zeros.
Bdisk = np.zeros((3,r.shape[0])) # Bdisk vector in r, phi, z
ones = np.ones(r.shape[0])
r_center = (r >= 3.) & (r < 5.1)
r_disk = (r >= 5.1) & (r <= 20.)
Bdisk[1,r_center] = self.bring
# Determine in which arm we are
# this is done for each coordinate individually
if np.sum(r_disk):
rls = self.r_log_spiral(phi[r_disk])
rls = np.abs(rls - r[r_disk])
arms = np.argmin(rls, axis = 0) % 8
# The magnetic spiral defined at r=5 kpc and fulls off as 1/r ,the field direction is given by:
Bdisk[0,r_disk] = np.sin(self.idisk)* self.b[arms] * (5. / r[r_disk])
Bdisk[1,r_disk] = np.cos(self.idisk)* self.b[arms] * (5. / r[r_disk])
Bdisk *= (ones - self.L(z,self.hdisk,self.wdisk)) # multiplied by L
return Bdisk, np.sqrt(np.sum(Bdisk**2.,axis = 0)) # the Bdisk, the normalization
# axis=0 : sum over index 0(row)
# axis=1 : sum over index 1(columns)
##############################################################################################
# Halo component
###############################################################################################
def Bhalo(self,r,z):
# Bhalo is purely azimuthal (toroidal), i.e. has only a phi component
if (not r.shape[0] == z.shape[0]):
warnings.warn("List do not have equal shape! returning -1", RuntimeWarning)
return -1
Bhalo = np.zeros((3,r.shape[0])) # Bhalo vector in r, phi, z rows: r, phi and z component
ones = np.ones(r.shape[0])
m = ( z != 0. )
# SEE equation 6.
Bhalo[1,m] = np.exp(-np.abs(z[m])/self.z0) * self.L(z[m], self.hdisk, self.wdisk) * \
( self.Bn * (ones[m] - self.L(r[m], self.rn, self.whalo)) * (z[m] > 0.) \
+ self.Bs * (ones[m] - self.L(r[m], self.rs, self.whalo)) * (z[m] < 0.) )
return Bhalo , np.sqrt(np.sum(Bhalo**2.,axis = 0))
##############################################################################################
# BX component (OUT OF THE PLANE)
###############################################################################################
def BX(self,r,z):
#BX is purely ASS and poloidal, i.e. phi component = 0
if (not r.shape[0] == z.shape[0]):
warnings.warn("List do not have equal shape! returning -1", RuntimeWarning)
return -1
BX= np.zeros((3,r.shape[0])) # BX vector in r, phi, z rows: r, phi and z component
m = np.sqrt(r**2. + z**2.) >= 1.
bx = lambda r_p: self.BX0 * np.exp(-r_p / self.rX) # eq.7
thetaX = lambda r,z,r_p: np.arctan(np.abs(z)/(r - r_p)) # eq.10
r_p = r[m] *self.rXc/(self.rXc + np.abs(z[m] ) / np.tan(self.ThetaX0)) # eq 9
m_r_b = r_p > self.rXc # region with constant elevation angle
m_r_l = r_p <= self.rXc # region with varying elevation angle
theta = np.zeros(z[m].shape[0])
b = np.zeros(z[m].shape[0])
r_p0 = (r[m])[m_r_b] - np.abs( (z[m])[m_r_b] ) / np.tan(self.ThetaX0) # eq.8
b[m_r_b] = bx(r_p0) * r_p0/ (r[m])[m_r_b] # the field strength in the constant elevation angle (b_x(r_p)r_p/r)
theta[m_r_b] = self.ThetaX0 * np.ones(theta.shape[0])[m_r_b]
b[m_r_l] = bx(r_p[m_r_l]) * (r_p[m_r_l]/(r[m])[m_r_l] )**2. # the field strength with varying elevation angle (b_x(r_p)(r_p/r)**2)
theta[m_r_l] = thetaX((r[m])[m_r_l] ,(z[m])[m_r_l] ,r_p[m_r_l])
mz = (z[m] == 0.)
theta[mz] = np.pi/2.
BX[0,m] = b * (np.cos(theta) * (z[m] >= 0) + np.cos(pi*np.ones(theta.shape[0]) - theta) * (z[m] < 0))
BX[2,m] = b * (np.sin(theta) * (z[m] >= 0) + np.sin(pi*np.ones(theta.shape[0]) - theta) * (z[m] < 0))
return BX, np.sqrt(np.sum(BX**2.,axis=0))
then, I create three arrays, one for r, one for phi, one for z. Each of these arrays has (e.g: thousand elements). like this:
import gmf
gmfm = gmf.GMF()
x = np.linspace(-20.,20.,100)
y = np.linspace(-20.,20.,100)
z = np.linspace(-1.,1.,x.shape[0])
xx,yy = np.meshgrid(x,y)
rr = np.sqrt(xx**2. + yy**2.)
theta = np.arctan2(yy,xx)
for i,r in enumerate(rr[:]):
Bdisk, Babs_d = gmfm.Bdisk(r,theta[i],z)
Bhalo, Babs_h = gmfm.Bhalo(r,z)
BX, Babs_x = gmfm.BX(r,z)
Btotal = Bdisk + Bhalo + BX
but I am getting when I make the addition of the three functions Btotal= Bdisk + Bhalo+BX) in 2d matrix with 3 rows and 100 columns.
My question is how can I add these three functions together to get Btotal in shape (n,) e.g( shape(100,)
because as I said in the beginning the three functions accept accept arrays (e.g. shape (1000) )then when we adding the three functions together we have to get the total also in the same shape (shape (n,)?
I do not know how can I do it, could you please tell me how can I make it.
thank you for your cooperation.
You need to correct the indention, for example in the def Bdisk method.
More importantly in
for i,r in enumerate(rr[:]):
Bdisk, Babs_d = gmfm.Bdisk(r,theta[i],z)
Bhalo, Babs_h = gmfm.Bhalo(r,z)
BX, Babs_x = gmfm.BX(r,z)
Btotal = Bdisk + Bhalo + BX
are you doing this addition for each iteration, or once at the end of the loop? You aren't accumulating any values over iterations. You are just throwing away the old ones, leaving you with the final iteration.
As for adding the array - it appears that all your arrays are initialed like:
Bdisk = np.zeros((3,r.shape[0]))
If that's what the method returns, then
Bdisk + Bhalo + BX
will just sum the corresponding elements of each array, resulting in a Btotal with the same shape. If you don't not like the shape of Btotal then change how Bdisk is calculated, because it has the same shape.

NumPy IFFT introducing black bars in OaA Convolution Algorithm

I'm having trouble diagnosing and fixing this error. I'm trying to write the OaA algorithm, described in this paper.
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Quick implementation of several convolution algorithms to compare times
"""
import numpy as np
import _kernel
from tqdm import trange, tqdm
from PIL import Image
from scipy.misc import imsave
from time import time, sleep
class convolve(object):
""" contains methods to convolve two images """
def __init__(self, image_array, kernel, back_same_size=True):
self.array = image_array
self.kernel = kernel
# Store these values as they will be accessed a _lot_
self.__rangeX_ = self.array.shape[0]
self.__rangeY_ = self.array.shape[1]
self.__rangeKX_ = self.kernel.shape[0]
self.__rangeKY_ = self.kernel.shape[1]
# Ensure the kernel is suitable to convolve the image
if (self.__rangeKX_ >= self.__rangeX_ or \
self.__rangeKY_ >= self.__rangeY_):
raise ValueError('Must submit suitably-sized arrays')
if (back_same_size):
# pad array for convolution
self.__offsetX_ = self.__rangeKX_ // 2
self.__offsetY_ = self.__rangeKY_ // 2
self.array = np.lib.pad(self.array, \
[(self.__offsetY_, self.__offsetY_), \
(self.__offsetX_, self.__offsetX_)],\
mode='constant', constant_values=0)
# Update these
self.__rangeX_ = self.array.shape[0]
self.__rangeY_ = self.array.shape[1]
else:
self.__offsetX_ = 0
self.__offsetY_ = 0
# to be returned instead of the originals
self.__arr_ = np.zeros([self.__rangeX_, self.__rangeY_])\
def spaceConv(self):
""" normal convolution, O(N^2*n^2). This is usually too slow """
# this is the O(N^2) part of this algorithm
for i in trange(self.__rangeX_):
for j in xrange(self.__rangeY_):
# Now the O(n^2) portion
total = 0.0
for k in xrange(self.__rangeKX_):
for t in xrange(self.__rangeKY_):
total += \
self.kernel[k][t] * self.array[i+k][j+t]
# Update entry in self.__arr_, which is to be returned
# http://stackoverflow.com/a/38320467/3928184
self.__arr_[i][j] = total
return self.__arr_[self.__offsetX_\
:self.__rangeX_ - self.__offsetX_,\
self.__offsetY_\
:self.__rangeY_ - self.__offsetY_]
def spaceConvDot(self):
""" Exactly the same as the former method """
def dot(ind, jnd):
""" perform a simple 'dot product' between the 2
dimensional image subsets. """
total = 0.0
# This is the O(n^2) part of the algorithm
for k in xrange(self.__rangeKX_):
for t in xrange(self.__rangeKY_):
total += \
self.kernel[k][t] * self.array[k + ind, t + jnd]
return total
# this is the O(N^2) part of the algorithm
for i in trange(self.__rangeX_):
for j in xrange(self.__rangeY_):
self.__arr_[i][j] = dot(i, j)
return self.__arr_[self.__offsetX_\
:self.__rangeX_ - self.__offsetX_,\
self.__offsetY_\
:self.__rangeY_ - self.__offsetY_]
def OAconv(self):
""" faster convolution algorithm, O(N^2*log(n)). """
from numpy.fft import fft2 as FFT, ifft2 as iFFT
# solve for the total padding along each axis
diffX = (self.__rangeKX_ - self.__rangeX_ + \
self.__rangeKX_ * (self.__rangeX_ //\
self.__rangeKX_)) % self.__rangeKX_
diffY = (self.__rangeKY_ - self.__rangeY_ + \
self.__rangeKY_ * (self.__rangeY_ //\
self.__rangeKY_)) % self.__rangeKY_
# padding on each side, i.e. left, right, top and bottom;
# centered as well as possible
right = diffX // 2
left = diffX - right
bottom = diffY // 2
top = diffY - bottom
# pad the array
self.array = np.lib.pad(self.array, \
((left, right), (top, bottom)), \
mode='constant', constant_values=0)
divX = self.array.shape[0] / float(self.__rangeKX_)
divY = self.array.shape[1] / float(self.__rangeKY_)
# Let's just make sure...
if not (divX % 1.0 == 0.0 or divY % 1.0 == 0.0):
raise ValueError('Image not partitionable (?)')
else:
divX = int(divX)
divY = int(divY)
# a list of tuples to partition the array by
subsets = [(i*self.__rangeKX_, (i + 1)*self.__rangeKX_,\
j*self.__rangeKY_, (j + 1)*self.__rangeKY_)\
for i in xrange(divX) \
for j in xrange(divY)]
# padding for individual blocks in the subsets list
padX = self.__rangeKX_ // 2
padY = self.__rangeKY_ // 2
self.__arr_ = np.lib.pad(self.__arr_, \
((left + padX, right + padX), \
(top + padY, bottom + padY)), \
mode='constant', constant_values=0)
kernel = np.pad(self.kernel, \
[(padY, padY), (padX, padX)], \
mode='constant', constant_values=0)
# We only need to do this once
trans_kernel = FFT(kernel)
# transform each partition and OA on conv_image
for tup in tqdm(subsets):
# slice and pad the array subset
subset = self.array[tup[0]:tup[1], tup[2]:tup[3]]
subset = np.lib.pad(subset, \
[(padY, padY), (padX, padX)],\
mode='constant', constant_values=0)
trans_subset = FFT(subset)
# multiply the two arrays entrywise
subset = trans_kernel * trans_subset
space = iFFT(subset).real
# overlap with indices and add them together
self.__arr_[tup[0]:tup[1] + 2 * padX, \
tup[2]:tup[3] + 2 * padY] += space
# crop image and get it back, convolved
return self.__arr_[self.__offsetX_ + padX + left \
:padX + left + self.__rangeX_ \
- self.__offsetX_, \
self.__offsetY_ + padY + bottom \
:padY + bottom + self.__rangeY_ \
- self.__offsetY_]
def OSconv(self):
""" Convolve an image using OS """
from numpy.fft import fft2 as FFT, ifft2 as iFFT
pass
def builtin(self):
""" Convolves using SciPy's convolution function - extremely
fast """
from scipy.ndimage.filters import convolve
return convolve(self.array, self.kernel)
if __name__ == '__main__':
try:
import pyplot as plt
except ImportError:
import matplotlib.pyplot as plt
image = np.array(Image.open('spider.jpg'))
image = np.rot90(np.rot90(np.rot90(image.T[0])))
times = []
#for i in range(3, 21, 2):
kern = _kernel.Kernel()
kern = kern.Kg2(11, 11, sigma=2.5, muX=0.0, muY=0.0)
kern /= np.sum(kern) # normalize volume
conv = convolve(image, kern)
#
# # Time the result of increasing kernel size
# _start = time()
convolved = conv.OAconv()
#convolved = conv.builtin()
# _end = time()
# times.append(_end - _start)
#x = np.array(range(3, 21, 2))
#plt.plot(range(3, 21, 2), times)
#plt.title('Kernel Size vs. spaceConv time', fontsize=12)
#plt.xlabel('Kernel Size (px)', fontsize=12)
#plt.ylabel('Time (s)', fontsize=12)
#plt.xticks(x, x)
#plt.show()
#conv = convolve(image[:2*kern.shape[0],:5*kern.shape[1]], kern)
plt.imshow(convolved, interpolation='none', cmap='gray')
plt.show()
#imsave('spider2', convolved, format='png')
But now, when I call it, I get black bars in a test image as follows:
Here's an example Gaussian kernel I am using.
[[ 0. 0.02390753 0.03476507 0.02390753 0. ]
[ 0.02390753 0.06241541 0.07990366 0.06241541 0.02390753]
[ 0.03476507 0.07990366 0.10040324 0.07990366 0.03476507]
[ 0.02390753 0.06241541 0.07990366 0.06241541 0.02390753]
[ 0. 0.02390753 0.03476507 0.02390753 0. ]]
I believe I've narrowed the problem down to the multiplication and IFFT
space = np.real(IFFT(transformed_kernel*transformed_subset))
I think it has something to do with the IFFT of discrete Gaussian kernel (for some reason). It's odd because if I just plot
space = np.real(IFFT(transformed_subset))
I get the following (no black bars, and it pieces it back together fine):
And if I plot the opposite, i.e.
space = np.real(IFFT(transformed_kernel))
I again get no black bars, and it appears to place them in the right places.
What am I missing? I've been staring at this for days, editing indices and whatnot, but I can't get rid of this tessellation :(
Your problem is, that your kernel seems to be in the middle:
But it is not - there is also a shift (11, 11) involved, which becomes obvious when you look at the results of the convolution. The center of your kernel should be in (0,0) and (because of modulo) your kernel should look like this:
So I changed your code a little (sorry never used np so much, but I hope you
can get the gist):
... your code ...
kernel = np.pad(self.kernel, \
[(padY, padY), (padX, padX)], \
mode='constant', constant_values=0)
#Move the kernel center to the origin:
new_kernel=np.full_like(kernel, 0)
X,Y=kernel.shape
X_2=X//2
Y_2=Y//2
for x in xrange(X):
for y in xrange(Y):
n_x=(x+X_2)%X
n_y=(y+Y_2)%Y
new_kernel[n_x,n_y]=kernel[x,y]
# We only need to do this once
trans_kernel = FFT(new_kernel)# take the transform of the shifted kernel
.... your code ......
And voilĂ :
There is no black grid!

Categories