How to best optimize calculations iterated over NxM grid in Python - python

Working in Python, I am doing some physics calculations over an NxM grid of values, where N goes from 1 to 3108 and M goes from 1 to 2304 (this corresponds to a large image). I need calculate a value at each and every point in this space, which totals ~ 7 million calculations. My current approach is painfully slow, and I am wondering if there is a way to complete this task and it not take hours...
My first approach was just to use nested for loops, but this seemed like the least efficient way to solve my problem. I have tried using NumPy's nditer and iterating over each axis individually, but I've read that it doesn't actually speed up my computations. Rather than looping through each axis individually, I also tried making a 3-D array and looping through the outer axis as shown in Brian's answer here How can I, in python, iterate over multiple 2d lists at once, cleanly? . Here is the current state of my code:
import numpy as np
x,y = np.linspace(1,3108,num=3108),np.linspace(1,2304,num=2304) # x&y dimensions of image
X,Y = np.meshgrid(x,y,indexing='ij')
all_coords = np.dstack((X,Y)) # moves to 3-D
all_coords = all_coords.astype(int) # sets coords to int
For reference, all_coords looks like this:
array([[[1.000e+00, 1.000e+00],
[1.000e+00, 2.000e+00],
[1.000e+00, 3.000e+00],
...,
[1.000e+00, 2.302e+03],
[1.000e+00, 2.303e+03],
[1.000e+00, 2.304e+03]],
[[2.000e+00, 1.000e+00],
[2.000e+00, 2.000e+00],
[2.000e+00, 3.000e+00],
...,
[2.000e+00, 2.302e+03],
[2.000e+00, 2.303e+03],
[2.000e+00, 2.304e+03]],
and so on. Back to my code...
'''
- below is a function that does a calculation on the full grid using the distance between x0,y0 and each point on the grid.
- the function takes x0,y0 and returns the calculated values across the grid
'''
def do_calc(x0,y0):
del_x, del_y = X-x0, Y-y0
np.seterr(divide='ignore', invalid='ignore')
dmx_ij = (del_x/((del_x**2)+(del_y**2))) # x component
dmy_ij = (del_y/((del_x**2)+(del_y**2))) # y component
return dmx_ij,dmy_ij
# now the actual loop
def do_loop():
dmx,dmy = 0,0
for pair in all_coords:
for xi,yi in pair:
DM = do_calc(xi,yi)
dmx,dmy = dmx+DM[0],dmy+DM[1]
return dmx,dmy
As you might see, this code takes an incredibly long time to run... If there is any way to modify my code such that it doesn't take hours to complete, I would be extremely interested in knowing how to do that. Thanks in advance for the help.

Here is a method that gives a 10,000x speedup at N=310, M=230. As the method scales better than the original code I'd expect a factor of more than a million at the full problem size.
The method exploits the shift invariance of the problem. For example, del_x**2 is essentially the same up to shift at each call of do_calc, so we compute it only once.
If the output of do_calc is weighted before summation the problem is no longer fully translation invariant, and this method doesn't work anymore. The result, however, can then be expressed in terms of linear convolution. At N=310, M=230 this still leaves us with a more than 1,000x speedup. And, again, this will be more at full problem size
Code for original problem
import numpy as np
#N, M = 3108, 2304
N, M = 310, 230
### OP's code
x,y = np.linspace(1,N,num=N),np.linspace(1,M,num=M) # x&y dimensions of image
X,Y = np.meshgrid(x,y,indexing='ij')
all_coords = np.dstack((X,Y)) # moves to 3-D
all_coords = all_coords.astype(int) # sets coords to int
'''
- below is a function that does a calculation on the full grid using the distance between x0,y0 and each point on the grid.
- the function takes x0,y0 and returns the calculated values across the grid
'''
def do_calc(x0,y0):
del_x, del_y = X-x0, Y-y0
np.seterr(divide='ignore', invalid='ignore')
dmx_ij = (del_x/((del_x**2)+(del_y**2))) # x component
dmy_ij = (del_y/((del_x**2)+(del_y**2))) # y component
return np.nan_to_num(dmx_ij), np.nan_to_num(dmy_ij)
# now the actual loop
def do_loop():
dmx,dmy = 0,0
for pair in all_coords:
for xi,yi in pair:
DM = do_calc(xi,yi)
dmx,dmy = dmx+DM[0],dmy+DM[1]
return dmx,dmy
from time import time
t = [time()]
### pp's code
x, y = np.ogrid[-N+1:N-1:2j*N - 1j, -M+1:M-1:2j*M - 1J]
den = x*x + y*y
den[N-1, M-1] = 1
xx = x / den
yy = y / den
for zz in xx, yy:
zz[N:] -= zz[:N-1]
zz[:, M:] -= zz[:, :M-1]
XX = xx.cumsum(0)[N-1:].cumsum(1)[:, M-1:]
YY = yy.cumsum(0)[N-1:].cumsum(1)[:, M-1:]
t.append(time())
### call OP's code for reference
X_OP, Y_OP = do_loop()
t.append(time())
# make sure results are equal
assert np.allclose(XX, X_OP)
assert np.allclose(YY, Y_OP)
print('pp {}\nOP {}'.format(*np.diff(t)))
Sample run:
pp 0.015251636505126953
OP 149.1642508506775
Code for weighted problem:
import numpy as np
#N, M = 3108, 2304
N, M = 310, 230
values = np.random.random((N, M))
x,y = np.linspace(1,N,num=N),np.linspace(1,M,num=M) # x&y dimensions of image
X,Y = np.meshgrid(x,y,indexing='ij')
all_coords = np.dstack((X,Y)) # moves to 3-D
all_coords = all_coords.astype(int) # sets coords to int
'''
- below is a function that does a calculation on the full grid using the distance between x0,y0 and each point on the grid.
- the function takes x0,y0 and returns the calculated values across the grid
'''
def do_calc(x0,y0, v):
del_x, del_y = X-x0, Y-y0
np.seterr(divide='ignore', invalid='ignore')
dmx_ij = (del_x/((del_x**2)+(del_y**2))) # x component
dmy_ij = (del_y/((del_x**2)+(del_y**2))) # y component
return v*np.nan_to_num(dmx_ij), v*np.nan_to_num(dmy_ij)
# now the actual loop
def do_loop():
dmx,dmy = 0,0
for pair, vv in zip(all_coords, values):
for (xi,yi), v in zip(pair, vv):
DM = do_calc(xi,yi, v)
dmx,dmy = dmx+DM[0],dmy+DM[1]
return dmx,dmy
from time import time
from scipy import signal
t = [time()]
x, y = np.ogrid[-N+1:N-1:2j*N - 1j, -M+1:M-1:2j*M - 1J]
den = x*x + y*y
den[N-1, M-1] = 1
xx = x / den
yy = y / den
XX, YY = (signal.fftconvolve(zz, values, 'valid') for zz in (xx, yy))
t.append(time())
X_OP, Y_OP = do_loop()
t.append(time())
assert np.allclose(XX, X_OP)
assert np.allclose(YY, Y_OP)
print('pp {}\nOP {}'.format(*np.diff(t)))
Sample run:
pp 0.12683939933776855
OP 158.35225439071655

Related

Plotting the mean square displacement of a 2D random walk as a function of δt

I've already created a code for random walk of 10000 steps and then repeated it 12 times and stored each run in a separate text file (which was required in the question). I then calculated the mean square displacement of it(not sure if it's done correct). I now need to 'plot my Mean Square Displacement as a function of δt, including errorbars σ = std(MSD)/√N, where std(MSD) is the standard deviation among the different runs and N is the number of runs.' and then compute the diffusion constant D from the curve and check that D = 2 (∆/dt) where dt = 1.
Here is my code so far:
import numpy as np
import matplotlib.pyplot as plt
import random as rd
import math
a = (np.zeros((10000, 2), dtype=np.float))
def randwalk(x,y):
theta= 2*math.pi*rd.random()
x+=math.cos(theta); # This uses the equation given, since we are told the spatial unit = 1
y+=math.sin(theta);
return (x,y)
x, y = 0.,0.
for i in range(10000): # Using for loop and range function to initialize the array
x, y = randwalk(x,y)
a[i,:] = x,y
fn_base = "random_walk_%i.txt" # Saves each run in a numbered text file, fn_base is a varaible to hold format
N = 12
for j in range(N):
rd.seed(j) # seed(j) explicitly sets the seed to random numbers
x , y = 0., 0.
for i in range(10000):
x, y = randwalk(x,y)
a[i,:] = x, y
fn = fn_base % j
np.savetxt(fn, a)
destinations = np.zeros((12, 2), dtype=np.float)
for j in range(12):
x, y = 0., 0.
for i in range(10000):
x, y = randwalk(x, y)
destinations[j] = x, y
square_distances = destinations[:,0] ** 2 + destinations[:,1] ** 2
m_s_d = np.mean(square_distances)
I think that to do it I just have to plot the msd against the number of steps? But I'm not sure how to do this. I saw a similar question on stackoverflow but the code for it is different than mine and I don't understand how to use that for my code.
I tried to do next
plt.figure()
t = 10000
plt.plot(m_s_d, t)
plt,show()
But this gives an error as the dimensions are not equal.
Edit ** I think my issue is that I am trying to plot it against number of steps when I should be plotting it against the change in time. However I can’t work out how to calculate the change in time dt?
Apologies in advance is question isn't formulated well, I am fairly new to computing. Thank you.

Calculating mean value of a 2D array as a function of distance from the center in Python

I'm trying to calculate the mean value of a quantity(in the form of a 2D array) as a function of its distance from the center of a 2D grid. I understand that the idea is that I identify all the array elements that are at a distance R from the center, and then add them up and divide by the number of elements. However, I'm having trouble actually identifying an algorithm to go about doing this.
I have attached a working example of the code to generate the 2d array below. The code is for calculating some quantities that are resultant from gravitational lensing, so the way the array is made is irrelevant to this problem, but I have attached the entire code so that you could create the output array for testing.
import numpy as np
import multiprocessing
import matplotlib.pyplot as plt
n = 100 # grid size
c = 3e8
G = 6.67e-11
M_sun = 1.989e30
pc = 3.086e16 # parsec
Dds = 625e6*pc
Ds = 1726e6*pc #z=2
Dd = 1651e6*pc #z=1
FOV_arcsec = 0.0001
FOV_arcmin = FOV_arcsec/60.
pix2rad = ((FOV_arcmin/60.)/float(n))*np.pi/180.
rad2pix = 1./pix2rad
Renorm = (4*G*M_sun/c**2)*(Dds/(Dd*Ds))
#stretch = [10, 2]
# To create a random distribution of points
def randdist(PDF, x, n):
#Create a distribution following PDF(x). PDF and x
#must be of the same length. n is the number of samples
fp = np.random.rand(n,)
CDF = np.cumsum(PDF)
return np.interp(fp, CDF, x)
def get_alpha(args):
zeta_list_part, M_list_part, X, Y = args
alpha_x = 0
alpha_y = 0
for key in range(len(M_list_part)):
z_m_z_x = (X - zeta_list_part[key][0])*pix2rad
z_m_z_y = (Y - zeta_list_part[key][1])*pix2rad
alpha_x += M_list_part[key] * z_m_z_x / (z_m_z_x**2 + z_m_z_y**2)
alpha_y += M_list_part[key] * z_m_z_y / (z_m_z_x**2 + z_m_z_y**2)
return (alpha_x, alpha_y)
if __name__ == '__main__':
# number of processes, scale accordingly
num_processes = 1 # Number of CPUs to be used
pool = multiprocessing.Pool(processes=num_processes)
num = 100 # The number of points/microlenses
r = np.linspace(-n, n, n)
PDF = np.abs(1/r)
PDF = PDF/np.sum(PDF) # PDF should be normalized
R = randdist(PDF, r, num)
Theta = 2*np.pi*np.random.rand(num,)
x1= [R[k]*np.cos(Theta[k])*1 for k in range(num)]
y1 = [R[k]*np.sin(Theta[k])*1 for k in range(num)]
# Uniform distribution
#R = np.random.uniform(-n,n,num)
#x1= np.random.uniform(-n,n,num)
#y1 = np.random.uniform(-n,n,num)
zeta_list = np.column_stack((np.array(x1), np.array(y1))) # List of coordinates for the microlenses
x = np.linspace(-n,n,n)
y = np.linspace(-n,n,n)
X, Y = np.meshgrid(x,y)
M_list = np.array([0.1 for i in range(num)])
# split zeta_list, M_list, X, and Y
zeta_list_split = np.array_split(zeta_list, num_processes, axis=0)
M_list_split = np.array_split(M_list, num_processes)
X_list = [X for e in range(num_processes)]
Y_list = [Y for e in range(num_processes)]
alpha_list = pool.map(
get_alpha, zip(zeta_list_split, M_list_split, X_list, Y_list))
alpha_x = 0
alpha_y = 0
for e in alpha_list:
alpha_x += e[0]
alpha_y += e[1]
alpha_x_y = 0
alpha_x_x = 0
alpha_y_y = 0
alpha_y_x = 0
alpha_x_y, alpha_x_x = np.gradient(alpha_x*rad2pix*Renorm,edge_order=2)
alpha_y_y, alpha_y_x = np.gradient(alpha_y*rad2pix*Renorm,edge_order=2)
det_A = 1 - alpha_y_y - alpha_x_x + (alpha_x_x)*(alpha_y_y) - (alpha_x_y)*(alpha_y_x)
abs = np.absolute(det_A)
I = abs**(-1.)
O = np.log10(I+1)
plt.contourf(X,Y,O,100)
The array of interest is O, and I have attached a plot of how it should look like. It can be different based on the random distribution of points.
What I'm trying to do is to plot the mean values of O as a function of radius from the center of the grid. In the end, I want to be able to plot the average O as a function of distance from center in a 2d line graph. So I suppose the first step is to define circles of radius R, based on X and Y.
def circle(x,y):
r = np.sqrt(x**2 + y**2)
return r
Now I just have to figure out a way to find all the values of O, that have the same indices as equivalent values of R. Kinda confused on this part and would appreciate any help.
You can find the geometric coordinates of a circle with center (0,0) and radius R as such:
phi = np.linspace(0, 1, 50)
x = R*np.cos(2*np.pi*phi)
y = R*np.sin(2*np.pi*phi)
these values however will not fall on the regular pixel grid but in between.
In order to use them as sampling points you can either round the values and use them as indexes or interpolate the values from the near pixels.
Attention: The pixel indexes and the x, y are not the same. In your example (0,0) is at the picture location (50,50).

Rotating 1D numpy array of radial intensities into 2D array of spacial intensities

I have a numpy array filled with intensity readings at different radii in a uniform circle (for context, this is a 1D radiative transfer project for protostellar formation models: while much better models exist, my supervisor wasnts me to have the experience of producing one so I understand how others work).
I want to take that 1d array, and "rotate" it through a circle, forming a 2D array of intensities that could then be shown with imshow (or, with a bit of work, aplpy). The final array needs to be 2d, and the projection needs to be Cartesian, not polar.
I can do it with nested for loops, and I can do it with lookup tables, but I have a feeling there must be a neat way of doing it in numpy or something.
Any ideas?
EDIT:
I have had to go back and recreate my (frankly horrible) mess of for loops and if statements that I had before. If I really tried, I could probably get rid of one of the loops and one of the if statements by condensing things down. However, the aim is not to make it work with for loops, but see if there is a built in way to rotate the array.
impB is an array that differs slightly from what I stated it was before. Its actually just a list of radii where particles are detected. I then bin those into radius bins to get the intensity (or frequency if you prefer) in each radius. R is the scale factor for my radius as I run the model in a dimensionless way. iRes is a resolution scale factor, essentially how often I want to sample my radial bins. Everything else should be clear.
radJ = np.ndarray(shape=(2*iRes, 2*iRes)) # Create array of 2xRadius square
for i in range(iRes):
n = len(impB[np.where(impB[:] < ((i+1.) * (R / iRes)))]) # Count number of things within this radius +1
m = len(impB[np.where(impB[:] <= ((i) * (R / iRes)))]) # Count number of things in this radius
a = (((i + 1) * (R / iRes))**2 - ((i) * (R / iRes))**2) * math.pi # A normalisation factor based on area.....dont ask
for x in range(iRes):
for y in range(iRes):
if (x**2 + y**2) < (i * iRes)**2:
if (x**2 + y**2) >= (i * iRes)**2: # Checks for radius, and puts in cartesian space
radJ[x+iRes,y+iRes] = (n-m) / a # Put in actual intensity bins
radJ[x+iRes,-y+iRes] = (n-m) / a
radJ[-x+iRes,y+iRes] = (n-m) / a
radJ[-x+iRes,-y+iRes] = (n-m) / a
Nested loops are a simple approach for that. With ri_data_r and y containing your radius values (difference to the middle pixel) and the array for rotation, respectively, I would suggest:
from scipy import interpolate
import numpy as np
y = np.random.rand(100)
ri_data_r = np.linspace(-len(y)/2,len(y)/2,len(y))
interpol_index = interpolate.interp1d(ri_data_r, y)
xv = np.arange(-1, 1, 0.01) # adjust your matrix values here
X, Y = np.meshgrid(xv, xv)
profilegrid = np.ones(X.shape, float)
for i, x in enumerate(X[0, :]):
for k, y in enumerate(Y[:, 0]):
current_radius = np.sqrt(x ** 2 + y ** 2)
profilegrid[i, k] = interpol_index(current_radius)
print(profilegrid)
This will give you exactly what you are looking for. You just have to take in your array and calculate an symmetric array ri_data_r that has the same length as your data array and contains the distance between the actual data and the middle of the array. The code is doing this automatically.
I stumbled upon this question in a different context and I hope I understood it right. Here are two other ways of doing this. The first uses skimage.transform.warp with interpolation of desired order (here we use order=0 Nearest-neighbor). This method is slower but more precise and needs less memory then the second method.
The second one does not use interpolation, therefore is faster but also less precise and needs way more memory because it stores each 2D array containing one tilt until the end, where they are averaged with np.nanmean().
The difference between both solutions stemmed from the problem of handling the center of the final image where the tilts overlap the most, i.e. the first one would just add values with each tilt ending up out of the original range. This was "solved" by clipping the matrix in each step to a global_min and global_max (consult the code). The second one solves it by taking the mean of the tilts where they overlap, which forces us to use the np.nan.
Please, read the Example of usage and Sanity check sections in order to understand the plot titles.
Solution 1:
import numpy as np
from skimage.transform import warp
def rotate_vector(vector, deg_angle):
# Credit goes to skimage.transform.radon
assert vector.ndim == 1, 'Pass only 1D vectors, e.g. use array.ravel()'
center = vector.size // 2
square = np.zeros((vector.size, vector.size))
square[center,:] = vector
rad_angle = np.deg2rad(deg_angle)
cos_a, sin_a = np.cos(rad_angle), np.sin(rad_angle)
R = np.array([[cos_a, sin_a, -center * (cos_a + sin_a - 1)],
[-sin_a, cos_a, -center * (cos_a - sin_a - 1)],
[0, 0, 1]])
# Approx. 80% of time is spent in this function
return warp(square, R, clip=False, output_shape=((vector.size, vector.size)))
def place_vectors(vectors, deg_angles):
matrix = np.zeros((vectors.shape[-1], vectors.shape[-1]))
global_min, global_max = 0, 0
for i, deg_angle in enumerate(deg_angles):
tilt = rotate_vector(vectors[i], deg_angle)
global_min = tilt.min() if global_min > tilt.min() else global_min
global_max = tilt.max() if global_max < tilt.max() else global_max
matrix += tilt
matrix = np.clip(matrix, global_min, global_max)
return matrix
Solution 2:
Credit for the idea goes to my colleague Michael Scherbela.
import numpy as np
def rotate_vector(vector, deg_angle):
assert vector.ndim == 1, 'Pass only 1D vectors, e.g. use array.ravel()'
square = np.ones([vector.size, vector.size]) * np.nan
radius = vector.size // 2
r_values = np.linspace(-radius, radius, vector.size)
rad_angle = np.deg2rad(deg_angle)
ind_x = np.round(np.cos(rad_angle) * r_values + vector.size/2).astype(np.int)
ind_y = np.round(np.sin(rad_angle) * r_values + vector.size/2).astype(np.int)
ind_x = np.clip(ind_x, 0, vector.size-1)
ind_y = np.clip(ind_y, 0, vector.size-1)
square[ind_y, ind_x] = vector
return square
def place_vectors(vectors, deg_angles):
matrices = []
for deg_angle, vector in zip(deg_angles, vectors):
matrices.append(rotate_vector(vector, deg_angle))
matrix = np.nanmean(np.array(matrices), axis=0)
return np.nan_to_num(matrix, copy=False, nan=0.0)
Example of usage:
r = 100 # Radius of the circle, i.e. half the length of the vector
n = int(np.pi * r / 8) # Number of vectors, e.g. number of tilts in tomography
v = np.ones(2*r) # One vector, e.g. one tilt in tomography
V = np.array([v]*n) # All vectors, e.g. a sinogram in tomography
# Rotate 1D vector to a specific angle (output is 2D)
angle = 45
rotated = rotate_vector(v, angle)
# Rotate each row of a 2D array according to its angle (output is 2D)
angles = np.linspace(-90, 90, num=n, endpoint=False)
inplace = place_vectors(V, angles)
Sanity check:
These are just simple checks which by no means cover all possible edge cases. Depending on your use case you might want to extend the checks and adjust the method.
# I. Sanity check
# Assuming n <= πr and v = np.ones(2r)
# Then sum(inplace) should be approx. equal to (n * (2πr - n)) / π
# which is an area that should be covered by the tilts
desired_area = (n * (2 * np.pi * r - n)) / np.pi
covered_area = np.sum(inplace)
covered_frac = covered_area / desired_area
print(f'This method covered {covered_frac * 100:.2f}% '
'of the area which should be covered in total.')
# II. Sanity check
# Assuming n <= πr and v = np.ones(2r)
# Then a circle M with radius m <= r should be the largest circle which
# is fully covered by the vectors. I.e. its mean should be no less than 1.
# If n = πr then m = r.
# m = n / π
m = int(n / np.pi)
# Code for circular mask not included
mask = create_circular_mask(2*r, 2*r, center=None, radius=m)
m_area = np.mean(inplace[mask])
print(f'Full radius r={r}, radius m={m}, mean(M)={m_area:.4f}.')
Code for plotting:
import matplotlib.pyplot as plt
plt.figure(figsize=(16, 8))
plt.subplot(121)
rotated = np.nan_to_num(rotated) # not necessary in case of the first method
plt.title(
f'Output of rotate_vector(), angle={angle}°\n'
f'Sum is {np.sum(rotated):.2f} and should be {np.sum(v):.2f}')
plt.imshow(rotated, cmap=plt.cm.Greys_r)
plt.subplot(122)
plt.title(
f'Output of place_vectors(), r={r}, n={n}\n'
f'Covered {covered_frac * 100:.2f}% of the area which should be covered.\n'
f'Mean of the circle M is {m_area:.4f} and should be 1.0.')
plt.imshow(inplace)
circle=plt.Circle((r, r), m, color='r', fill=False)
plt.gcf().gca().add_artist(circle)
plt.gcf().gca().legend([circle], [f'Circle M (m={m})'])

Matrix vector multiplication where the vector has been interpolated - Python

I have used the finite element method to approximate the laplace equation and thus have turned it into a matrix system AU = F where A is the stiffness vector and solved for U (not massively important for my question).
I have now got my approximation U, which when i find AU i should get the vector F (or at least similar) where F is:
AU gives the following plot for x = 0 to x = 1 (say, for 20 nodes):
I then need to interpolate U to a longer vector and find AU (for a bigger A too, but not interpolating that). I interpolate U by the following:
U_inter = interp1d(x,U)
U_rich = U_inter(longer_x)
which seems to work okay until i multiply it with the longer A matrix:
It seems each spike is at a node of x (i.e. the nodes of the original U). Does anybody know what could be causing this? The following is my code to find A, U and F.
import numpy as np
import math
import scipy
from scipy.sparse import diags
import scipy.sparse.linalg
from scipy.interpolate import interp1d
import matplotlib
import matplotlib.pyplot as plt
def Poisson_Stiffness(x0):
"""Finds the Poisson equation stiffness matrix with any non uniform mesh x0"""
x0 = np.array(x0)
N = len(x0) - 1 # The amount of elements; x0, x1, ..., xN
h = x0[1:] - x0[:-1]
a = np.zeros(N+1)
a[0] = 1 #BOUNDARY CONDITIONS
a[1:-1] = 1/h[1:] + 1/h[:-1]
a[-1] = 1/h[-1]
a[N] = 1 #BOUNDARY CONDITIONS
b = -1/h
b[0] = 0 #BOUNDARY CONDITIONS
c = -1/h
c[N-1] = 0 #BOUNDARY CONDITIONS: DIRICHLET
data = [a.tolist(), b.tolist(), c.tolist()]
Positions = [0, 1, -1]
Stiffness_Matrix = diags(data, Positions, (N+1,N+1))
return Stiffness_Matrix
def NodalQuadrature(x0):
"""Finds the Nodal Quadrature Approximation of sin(pi x)"""
x0 = np.array(x0)
h = x0[1:] - x0[:-1]
N = len(x0) - 1
approx = np.zeros(len(x0))
approx[0] = 0 #BOUNDARY CONDITIONS
for i in range(1,N):
approx[i] = math.sin(math.pi*x0[i])
approx[i] = (approx[i]*h[i-1] + approx[i]*h[i])/2
approx[N] = 0 #BOUNDARY CONDITIONS
return approx
def Solver(x0):
Stiff_Matrix = Poisson_Stiffness(x0)
NodalApproximation = NodalQuadrature(x0)
NodalApproximation[0] = 0
U = scipy.sparse.linalg.spsolve(Stiff_Matrix, NodalApproximation)
return U
x = np.linspace(0,1,10)
rich_x = np.linspace(0,1,50)
U = Solver(x)
A_rich = Poisson_Stiffness(rich_x)
U_inter = interp1d(x,U)
U_rich = U_inter(rich_x)
AUrich = A_rich.dot(U_rich)
plt.plot(rich_x,AUrich)
plt.show()
comment 1:
I added a Stiffness_Matrix = Stiffness_Matrix.tocsr() statement to avoid an efficiency warning. FE calculations are complex enough that I'll have to print out some intermediate values before I can identify what is going on.
comment 2:
plt.plot(rich_x,A_rich.dot(Solver(rich_x))) plots nice. The noise you get is the result of the difference between the inperpolated U_rich and the true solution: U_rich-Solver(rich_x).
comment 3:
I don't think there's a problem with your code. The problem is with idea that you can test an interpolation this way. I'm rusty on FE theory, but I think you need to use the shape functions to interpolate, not a simple linear one.
comment 4:
Intuitively, with A_rich.dot(U_rich) you are asking, what kind of forcing F would produce U_rich. Compared to Solver(rich_x), U_rich has flat spots, regions where it's value is less than the true solution. What F would produce that? One that is spiky, with NodalQuadrature(x) at the x points, but near zero values in between. That's what your plot is showing.
A higher order interpolation will eliminate the flat spots, and produce a smoother back calculated F. But you really need to revisit the FE theory.
You might find it instructive to look at
plt.plot(x,NodalQuadrature(x))
plt.plot(rich_x, NodalQuadrature(rich_x))
The second plot is much smoother, but only about 1/5 as high.
Better yet look at:
plt.plot(rich_x,AUrich,'-*') # the spikes
plt.plot(x,NodalQuadrature(x),'o') # original forcing
plt.plot(rich_x, NodalQuadrature(rich_x),'+') # new forcing
In the model the forcing isn't continuous, it is a value at each node. With more nodes (rich_x) the magnitude at each node is less.

Why is my 2D interpolant generating a matrix with swapped axes in SciPy?

I solve a differential equation with vector inputs
y' = f(t,y), y(t_0) = y_0
where y0 = y(x)
using the explicit Euler method, which says that
y_(i+1) = y_i + h*f(t_i, y_i)
where t is a time vector, h is the step size, and f is the right-hand side of the differential equation.
The python code for the method looks like this:
for i in np.arange(0,n-1):
y[i+1,...] = y[i,...] + dt*myode(t[i],y[i,...])
The result is a k,m matrix y, where k is the size of the t dimension, and m is the size of y.
The vectors y and t are returned.
t, x, and y are passed to scipy.interpolate.RectBivariateSpline(t, x, y, kx=1, ky=1):
g = scipy.interpolate.RectBivariateSpline(t, x, y, kx=1, ky=1)
The resulting object g takes new vectors ti,xi ( g(p,q) ) to give y_int, which is y interpolated at the points defined by ti and xi.
Here is my problem:
The documentation for RectBivariateSpline describes the __call__ method in terms of x and y:
__call__(x, y[, mth]) Evaluate spline at the grid points defined by the coordinate arrays
The matplotlib documentation for plot_surface uses similar notation:
Axes3D.plot_surface(X, Y, Z, *args, **kwargs)
with the important difference that X and Y are 2D arrays which are generated by numpy.meshgrid().
When I compute simple examples, the input order is the same in both and the result is exactly what I would expect. In my explicit Euler example, however, the initial order is ti,xi, yet the surface plot of the interpolant output only makes sense if I reverse the order of the inputs, like so:
ax2.plot_surface(xi, ti, u, cmap=cm.coolwarm)
While I am glad that it works, I'm not satisfied because I cannot explain why, nor why (apart from the array geometry) it is necessary to swap the inputs. Ideally, I would like to restructure the code so that the input order is consistent.
Here is a working code example to illustrate what I mean:
# Heat equation example with explicit Euler method
import numpy as np
import matplotlib.pyplot as mplot
import matplotlib.cm as cm
import scipy.sparse as sp
import scipy.interpolate as interp
from mpl_toolkits.mplot3d import Axes3D
import pdb
# explicit Euler method
def eev(myode,tspan,y0,dt):
# Preprocessing
# Time steps
tspan[1] = tspan[1] + dt
t = np.arange(tspan[0],tspan[1],dt,dtype=float)
n = t.size
m = y0.shape[0]
y = np.zeros((n,m),dtype=float)
y[0,:] = y0
# explicit Euler recurrence relation
for i in np.arange(0,n-1):
y[i+1,...] = y[i,...] + dt*myode(t[i],y[i,...])
return y,t
# generate matrix A
# u'(t) = A*u(t) + g*u(t)
def a_matrix(n):
aa = sp.diags([1, -2, 1],[-1,0,1],(n,n))
return aa
# System of ODEs with finite differences
def f(t,u):
dydt = np.divide(1,h**2)*A.dot(u)
return dydt
# homogenous Dirichlet boundary conditions
def rbd(t):
ul = np.zeros((t,1))
return ul
# Initial value problem -----------
def main():
# Metal rod
# spatial discretization
# number of inner nodes
m = 20
x0 = 0
xn = 1
x = np.linspace(x0,xn,m+2)
# Step size
global h
h = x[1]-x[0]
# Initial values
u0 = np.sin(np.pi*x)
# A matrix
global A
A = a_matrix(m)
# Time
t0 = 0
tend = 0.2
# Time step width
dt = 0.0001
tspan = [t0,tend]
# Test r for stability
r = np.divide(dt,h**2)
if r <= 0.5:
u,t = eev(f,tspan,u0[1:-1],dt)
else:
print('r = ',r)
print('r > 0.5. Explicit Euler method will not be stable.')
# Add boundary values back
rb = rbd(t.size)
u = np.hstack((rb,u,rb))
# Interpolate heat values
# Create interpolant. Note the parameter order
fi = interp.RectBivariateSpline(t, x, u, kx=1, ky=1)
# Create vectors for interpolant
xi = np.linspace(x[0],x[-1],100)
ti = np.linspace(t0,tend,100)
# Compute function values from interpolant
u_int = fi(ti,xi)
# Change xi, ti in to 2D arrays
xi,ti = np.meshgrid(xi,ti)
# Create figure and axes objects
fig3 = mplot.figure(1)
ax3 = fig3.gca(projection='3d')
print('xi.shape =',xi.shape,'ti.shape =',ti.shape,'u_int.shape =',u_int.shape)
# Plot surface. Note the parameter order, compare with interpolant!
ax3.plot_surface(xi, ti, u_int, cmap=cm.coolwarm)
ax3.set_xlabel('xi')
ax3.set_ylabel('ti')
main()
mplot.show()
As I can see you define :
# Change xi, ti in to 2D arrays
xi,ti = np.meshgrid(xi,ti)
Change this to :
ti,xi = np.meshgrid(ti,xi)
and
ax3.plot_surface(xi, ti, u_int, cmap=cm.coolwarm)
to
ax3.plot_surface(ti, xi, u_int, cmap=cm.coolwarm)
and it works fine (if I understood well ).

Categories