Ok, this is going to be really long. I am trying to calculate water levels in an analytical solution in a polar grid. It depends on both r and theta as well as this variable j. What I am trying to do is essentially calculate a water level at a specific r, theta point in my grid based in a given equation. This equation has a portion where it sums over infinite values of j. Part of the equation is here Equation image
Some of the code is as follows:
"""
Plot idealized solution for water levels in a quarter circle domain with
constant bathymetry
"""
import numpy as np
import matplotlib.pyplot as plt
#from pylab import meshgrid,cm,imshow,contour,clabel,colorbar,axis,title,show
# establish parameters
Ho = 300 #m
g = 9.81 #m/s2
r1 = 1000 #m
r2 = 10000 #m
rr = np.arange(r1,r2,10)
#radial size of domain
phi = np.pi/2
#theta is the angle in radians at a specific location within the domain
#theta = np.pi/4
theta = np.arange(0, phi, np.pi/360)#varies
Theta = theta[1:180]
zeta = [0] * len(rr) * len(Theta)
#converting from wind speed to wind shear stress
U = 10
Cd = (1/1000) * ((3/4) + (U/15))
Roair = 1.225 #kg/m3
Rowater = 997 #kg/m3
W = (Roair/Rowater) * Cd * (U**2)
#wind shear in m^2/s^2 in the 0 direction (W to E)
Wo = np.sqrt((W**2)/2)
#wind shear in m^2/s^2 in the phi direction
Wphi = np.sqrt((W**2)/2)
zeta = np.zeros((len(rr), len(Theta)))
#determines the bathymetry
a_star = []
n = 0
kappa = (1-n)**(0.5)
for t in range(len(Theta)):
a_star.append ( ( (np.sin(phi)) / (g*Ho*kappa* np.sin(kappa*Theta[t])) ) )
#first half of equation 19 that does not depend on j
for r in range(len(rr)):
for t in range(len(Theta)):
zeta[r,t] = ( (a_star[t] * (rr[r]**(1-n)))*(Wo*np.cos(((1-n)**(0.5))*Theta[t]) + Wphi*np.cos(((1-n)**(0.5))*(Theta[t]-phi))) )
#second half of equation 19 for j=0
ajbj = []
for t in range(len(Theta)):
j = 0
Djo = np.sin(( ( (1-n)**(0.5) ) * phi ) ) / ( (1-n)**(0.5) * (phi) )
Ejo = (np.sin(phi)) / (phi)
ajbj.append ( (r2**(1-n)) * (-a_star[t] * Djo))
for r in range(len(rr)):
zeta[r,t] = zeta[r,t] + (ajbj[t])*(Wo+Wphi)
#second half of equation 19 for j=1,2,3 (summation)
sj = []
tj = []
Dj = []
Ej = []
r1EogH = []
astarD = []
tjr1r2 = []
sjr2 = []
aj = []
bj = []
jj = [1,2,3]
for j in range(len(jj)):
sj.append(- (n/2) + np.sqrt( ( (n/2)**2) + ( (jj[j]*np.pi / phi)**2) ) )
tj.append (- (n/2) - np.sqrt( ( (n/2)**2) + ( (jj[j]*np.pi / phi)**2) ) )
Dj.append ( (2* ((-1)**jj[j]) * ((1-n)**(0.5)) * phi * np.sin( ((1-n)**(0.5)) * phi )) / ( (1-n) * (phi**2) - (jj[j]**2) * (np.pi**2) ) )
Ej.append ( (2* ((-1)**jj[j]) * phi * np.sin(phi) ) / ( (phi**2) - (jj[j]**2) * (np.pi**2) ) )
r1EogH.append ( ( (r1**(1-n)) * Ej[j] ) / ( g * Ho ) )
tjr1r2.append ( tj[j] * (r1**tj[j]) * (r2**sj[j]) )
sjr2.append ( sj[j] * (r2**tj[j]) )
for t in range(len(Theta)):
#astarD.append ( a_star[t] * Dj[j] )
aj.append ( ( a_star[t]*Dj[j] * ( ( tj[j] * (r1**tj[j]) * (r2**(1-n)) ) - ( (r2**tj[j]) * (r1**(1-n)) ) ) + ( r1EogH[j] * r2**tj[j] ) ) / ( (sjr2[j] * (r1**sj[j])) - tjr1r2[j] ) )
bj.append ( ( -1* a_star[t]*Dj[j] * ( ( sj[j] * (r1**sj[j]) * (r2**(1-n)) ) - ( (r2**sj[j]) * (r1**(1-n)) ) ) - ( r1EogH[j] * r2**sj[j] ) ) / ( (sjr2[j] * (r2**sj[j])) - tjr1r2[j] ) )
for r in range(len(rr)):
zeta[r,t] = zeta[r,t] + ( ( (aj[j] * rr[r]**(sj[j])) + (bj[j] * rr[r]**(tj[j])) ) * (Wo*np.cos( (jj[j]*np.pi*Theta[t])/phi) + Wphi*np.cos( (jj[j]*np.pi*(Theta[t]-phi))/phi)) )
x,y = np.meshgrid(Theta, rr)
X = Theta
Y = rr
fig = plt.figure()
ax = fig.add_subplot(111, polar='True')
ax.pcolormesh(X, Y, zeta) #X,Y & data2D must all be same dimensions
ax.set_thetamin(0)
ax.set_thetamax(90)
plot = ax.pcolor(zeta)
fig.colorbar(plot)
plt.show()
Solution for above
The values I am getting for zeta are much to large. They should be more like the values in the below solution for theta PI/4. I know the equations are complex. What I want is for a point in the grid say r=1000, theta= pi/4 for it to calculate the zeta value for j=1, j=2 and j=3 and then sum those all together and then do the same thing at each point in the grid. I am wondering if I just need to structure my loops differently? Or not use the .append function? Does anyone have any suggestions?
DONE FOR THETA PI/4
"""
Plot idealized solution for water levels in a quarter circle domain with
constant bathymetry
"""
import numpy as np
import matplotlib.pyplot as plt
#from pylab import meshgrid,cm,imshow,contour,clabel,colorbar,axis,title,show
# establish parameters
Ho = 300 #m
g = 9.81 #m/s2
r1 = 1000 #m
r2 = 10000 #m
rr = np.arange(r1,r2,10)
zeta = [0] * len(rr)
#radial size of domain
phi = np.pi/2
#theta is the angle in radians at a specific location within the domain
theta = np.pi/4
#converting from wind speed to wind shear stress
U = 10
Cd = (1/1000) * ((3/4) + (U/15))
Roair = 1.225 #kg/m3
Rowater = 997 #kg/m3
W = (Roair/Rowater) * Cd * (U**2)
#wind shear in m^2/s^2 in the 0 direction (W to E)
Wo = np.sqrt((W**2)/2)
#wind shear in m^2/s^2 in the phi direction
Wphi = np.sqrt((W**2)/2)
#determines the bathymetry
n = 0
kappa = (1-n)**(0.5)
a_star = ( (np.sin(phi)) / (g*Ho*kappa* np.sin(kappa*theta)) )
#first half of equation 19 that does not depend on j
for r in range(len(rr)):
zeta[r] = ( a_star * (rr[r]**(1-n))*(Wo*np.cos(((1-n)**(0.5))*theta) + Wphi*np.cos(((1-n)**(0.5))*(theta-phi))) )
#plt.xlabel("rr")
#plt.ylabel("zeta")
###
#plt.plot(rr,zeta, label = 'LHS eq 19')
#second half of equation 19 for j=0
for r in range(len(rr)):
j = 0
Djo = np.sin(( ( (1-n)**(0.5) ) * phi ) ) / ( (1-n)**(0.5) * (phi) )
Ejo = (np.sin(phi)) / (phi)
ajbj = (r2**(1-n)) * (-a_star * Djo)
zeta[r] = zeta[r] + (ajbj)*(Wo+Wphi)
#plt.xlabel("rr")
#plt.ylabel("zeta")
###
#plt.plot(rr,zeta, label = 'j=0')
#second half of equation 19 for j=1,2,3 (summation)
sj = []
tj = []
Dj = []
Ej = []
r1EogH = []
astarD = []
tjr1r2 = []
sjr2 = []
aj = []
bj = []
jj = [1,2,3]
for j in range(len(jj)):
sj.append(- (n/2) + np.sqrt( ( (n/2)**2) + ( (jj[j]*np.pi / phi)**2) ) )
tj.append (- (n/2) - np.sqrt( ( (n/2)**2) + ( (jj[j]*np.pi / phi)**2) ) )
Dj.append ( (2* ((-1)**jj[j]) * ((1-n)**(0.5)) * phi * np.sin( ((1-n)**(0.5)) * phi )) / ( (1-n) * (phi**2) - (jj[j]**2) * (np.pi**2) ) )
Ej.append ( (2* ((-1)**jj[j]) * phi * np.sin(phi) ) / ( (phi**2) - (jj[j]**2) * (np.pi**2) ) )
r1EogH.append ( ( (r1**(1-n)) * Ej[j] ) / ( g * Ho ) )
astarD.append ( a_star * Dj[j] )
tjr1r2.append ( tj[j] * (r1**tj[j]) * (r2**sj[j]) )
sjr2.append ( sj[j] * (r2**tj[j]) )
aj.append ( ( astarD[j] * ( ( tj[j] * (r1**tj[j]) * (r2**(1-n)) ) - ( (r2**tj[j]) * (r1**(1-n)) ) ) + ( r1EogH[j] * r2**tj[j] ) ) / ( sjr2[j] * (r1**sj[j]) - tjr1r2[j] ) )
bj.append (- ( astarD[j] * ( ( sj[j] * (r1**sj[j]) * (r2**(1-n)) ) - ( (r2**sj[j]) * (r1**(1-n)) ) ) - ( r1EogH[j] * r2**sj[j] ) ) / ( sjr2[j] * (r2**sj[j]) - tjr1r2[j] ) )
for r in range(len(rr)):
zeta[r] = zeta[r] + ( ( (aj[j] * rr[r]**(sj[j])) + (bj[j] * rr[r]**(tj[j])) ) * (Wo*np.cos( (jj[j]*np.pi*theta)/phi) + Wphi*np.cos( (jj[j]*np.pi*(theta-phi))/phi)) )
plt.xlabel("rr")
plt.ylabel("zeta")
plt.title("Wind In at 45 degrees")
#
plt.plot(rr,zeta, label = 'Ho=100m')
plt.legend(loc='upper right')
Solution for theta 45degrees
Suggest writing a function zeta (r, t, j) that you can test with some hand-calculated values:
def zeta(r, theta, j):
#your complex calculations here
assert zeta(4000, pi/4, 2) == 0.00043 #This is a test to be sure your formulae
# are right (eyeballed from your graph)
Then iterate over rr and theta to fill the array:
zeta_out = np.zeros((len(rr), len(Theta)))
for r in range(len(rr)):
for t in range(len(theta)):
zeta_out[r, t] = np.mean(zeta(rr[r], theta[t], j) for j in (1,2,3))
Unfortunately I am not a math wizard. I tried to run your code to see what output it got prior to me trying to refactor it in some way. However, there are errors because things like sj from sj.append() are not defined in the snippet provided. I do not know how to read the equation provided in your image (math courses were many years ago and not used since college).
As for advice, without fully understanding your problem, the best I can offer you is to consider making functions. This allows you/others to see the code more clearly. Another advantage is for when something changes (seems unlikely with an equation, but perhaps you want to change your input type at some point in the future) the changes are made in smaller pieces.
def calculate_j(point_or_other_necessary_input):
""" do the maths for j """
return some_equation_or_value_for_j
def calculate_t(point_or_other_necessary_input):
""" do the maths for t """
return some_equation_or_value_for_t
def calculate_r(point_or_other_necessary_input):
""" do the maths for r """
return some_equation_or_value_for_r
def process_grid(zeta_grid):
""" process each point in the provided grid"""
for point in zeta_grid:
# or whatever makes sense to combine the values/equations
calculated_sum = calculate_j(point) + calculate_t(point) + calculate_r(point)
# store value in some reasonable way, probably another grid
return calculated_points
print(process_grid(zeta))
After further evaluation, and more info from your edit, I have to ask if you really mean to do:
jj = [1,2,3]
for j in range(len(jj)):
I'm guessing, without really being sure, that you want j to be 1 then 2, then 3, no? Currently you are really getting 0, 1, 2 because you are iterating over the length of the list, not the items in the list. This is significant not in the first portion where you are getting the values of jj with jj[j] but it is in the portion for r where you use j instead of jj[j] such as in:
zeta[r] = zeta[r] + ( ( (aj[j] * rr[r]**(sj[j])) + (bj[j] * rr[r]**(tj[j])) )
As stated, that could be your intended behavior to only use 0,1,2 instead of 1,2,3. As for why the values might be too large, do you intend to calculate r for all of rr 3 times? You have it nested inside of the first for loop which means it is executing in every iteration of the for j in ... loop. So it is adding to itself (zeta[r] = zeta[r] + ...) some large number of times. (Again, not a math person) but isn't that running len(rr)^3 times or 3^len(rr) times? Some number of times more than possibly intended.
Related
I have the following code. I know it's long and complex, however it takes 1.5 mins on my laptop to run. I would greatly appreciate any help towards finding the problem causing the error at the end - the plotting part.I didn't find anything on Google related to this error message:
TypeError: unsupported operand type(s) for : 'QuadMesh' and 'float'
from scipy import interpolate
from scipy.fft import fft, ifft
from scipy.constants import c, epsilon_0
import numpy as np, math
from matplotlib import pyplot as plt
lambda_0 = 800 * 10**(-9)
omega_0 = 2*np.pi*c / lambda_0
delta_lambda = 50.0 * 10**(-9)
delta_Tau = (1.47 * 10**(-3) * (lambda_0*10**9)**2 / (delta_lambda*10**9)) * 10**(-15) #
delta_omega_PFT = (4*np.log(2) / delta_Tau) # equivalent (equal) to: ((4*ln(2)) / (2*pi)) * (2*pi/delta_Tau) = 0.441 * (2*pi/delta_Tau)
F = 2.0 # focal length in meters, as from Liu 2017
def G(omegas):
# return ( np.sqrt(np.pi/(2*np.log(2))) * tau * np.exp( -(tau**2/(8*np.log(2))) * (omegas-omega_0)**2 ) ) # why is this here?
return np.exp( -(delta_Tau**2/(8*np.log(2))) * (omegas-omega_0)**2 )
xsi = 0.1 * (1.0 * 10**(-15)) / (1.0 * 10**(-3))
def phase_c(xs, omegas):
return ( xsi * np.reshape(xs, (xs.shape[0], 1)) * np.reshape((omegas-omega_0), (1, omegas.shape[0])) )
E0 = np.sqrt( (0.2*10**4 * 8 * np.sqrt(np.log(2))) / (delta_Tau*np.sqrt(np.pi)*c*epsilon_0/2.0) ) * np.sqrt(2*np.pi*np.log(2)) / delta_omega_PFT
def f(xi, omega): # the prefactors from Eq. (5) of Li et al. (2017) (the ones pre-multiplying the Fraunhoffer integral)
first = omega * np.exp(1j * (omega/c) * F) / (1j * 2*np.pi*c*F) # only function of omega. first is shape (omega.shape[0], )
omega = np.reshape(omega, (1, omega.shape[0]))
xi = np.reshape(xi, (xi.shape[0], 1))
second = np.exp(1j * (omega/c) * xi**2 / (2*F)) # second is shape (xi.shape[0], omega.shape[0]).
return (first * second) # returned is shape (xi.shape[0], omega.shape[0])
x0 = 0.0
delta_x = 196.0 # obtained from N=10, N_x=8*10^3, xi_max=10um, F=2m
xmin_PFT = x0 - delta_x #
xmax_PFT = x0 + delta_x #
num_xs_PFT = 8 * 10**3
xs_PFT = np.linspace(xmin_PFT, xmax_PFT, num_xs_PFT)
sampling_spacing_xs_PFT = np.true_divide( (xmax_PFT-xmin_PFT), num_xs_PFT)
num_omegas_focus = 5 * 10**2
maximum_time = 100.0 * 10**(-15)
N = math.ceil( (np.pi*num_omegas_focus)/(2*delta_omega_PFT*maximum_time) ) - 1
omega_max_focus = omega_0 + N*delta_omega_PFT
omega_min_focus = omega_0 - N*delta_omega_PFT
omegas_focus = np.linspace(omega_min_focus, omega_max_focus, num_omegas_focus) # shape (num_omegas_focus, )
sampling_spacing_omegas_focus = np.true_divide((omega_max_focus-omega_min_focus) , num_omegas_focus)
Es_x_omega = np.multiply( (E0 * G(omegas_focus)) ,
(np.exp(1j*phase_c(xs_PFT, omegas_focus))) # phase_c uses xsi, the PFT coefficient
)
# Es_x_omega holds across columns (vertically downwards) the x-dependence and across rows (horizontally) the omega-dependence
# Es_x_omega is shape (num_xs_PFT, num_omegas_focus)
Bprime_data_real = np.empty((Es_x_omega.shape[0], Es_x_omega.shape[1])) # this can be rewritten in a more Pythonic way
Bprime_data_imag = np.empty((Es_x_omega.shape[0], Es_x_omega.shape[1]))
for i in range(Es_x_omega.shape[1]): # for all the columns (all omegas)
# Perform FFT wrt x (so go from x to Kappa (a scaled spatial frequency))
intermediate = fft(Es_x_omega[:, i])
Bprime_data_real[:, i] = np.real(intermediate) * sampling_spacing_xs_PFT # multiplication by \Delta, see my docu above
Bprime_data_imag[:, i] = np.imag(intermediate) * sampling_spacing_xs_PFT # multiplication by \Delta, see my docu above
if i % 10000 == 0:
print("We have done fft number {}".format(i) + " out of {}".format(Es_x_omega.shape[1]) + "ffts")
# Bprime is function of (Kappa, omega): across rows the omega dependence, across columns the Kappa dependence.
# Get the Kappas:
returned_freqs = np.fft.fftfreq(num_xs_PFT, sampling_spacing_xs_PFT) # shape (num_xs_PFT, )
Kappas_ugly = 2*np.pi * returned_freqs # shape (num_xs_PFT, ), but unordered in terms of the magnitude of the values! see https://numpy.org/doc/stable/reference/generated/numpy.fft.fftfreq.html
Kappas_pretty = 2*np.pi * np.fft.fftshift(returned_freqs)
indices = (Kappas_ugly == Kappas_pretty[:, None]).argmax(1) # shape (num_xs_PFT, )
indices = indices.reshape((indices.shape[0], 1)) # needed for adapting Dani Mesejo's answer: he reordered based on 1D slices laid horizontally, here I reorder based on 1D slices laid vertically.
# see my notebook for visuals, 22-23 Nov 2021
hold_real = Bprime_data_real.shape[1]
hold_imag = Bprime_data_imag.shape[1]
Bprime_data_real_pretty = np.take_along_axis(Bprime_data_real, np.tile(indices, (1, hold_real)), axis=0) # adapted from Dani Mesejo's answer
Bprime_data_imag_pretty = np.take_along_axis(Bprime_data_imag, np.tile(indices, (1, hold_imag)), axis=0) # adapted from Dani Mesejo's answer
print(Bprime_data_real_pretty.shape) # shape (num_xs_PFT, num_omegas_focus), similarly for Bprime_data_imag_pretty
Bprime_real = interpolate.RectBivariateSpline(Kappas_pretty, omegas_focus, Bprime_data_real_pretty) # this CTOR creates an object faster (which can also be queried faster)
Bprime_imag = interpolate.RectBivariateSpline(Kappas_pretty, omegas_focus, Bprime_data_imag_pretty) # than interpolate.interp2d() does.
print("We have the interpolators!")
# Prepare for the aim: plot E versus time (horizontal axis) and xi (vertical axis).
xi_min = -5.0 * 10**(-6) # um
xi_max = 5.0 * 10**(-6) # um
num_xis = 5000
xis = np.linspace(xi_min, xi_max, num_xis)
print("We are preparing now!")
Es_Kappa_omega_without_prefactor = np.empty((xis.shape[0], omegas_focus.shape[0]), dtype=complex)
for j in range(Es_Kappa_omega_without_prefactor.shape[0]): # for each row
for i in range(Es_Kappa_omega_without_prefactor.shape[1]): # for each column
Es_Kappa_omega_without_prefactor[j, i] = Bprime_real(omegas_focus[i]*xis[j] /(c*F), omegas_focus[i]) + 1j*Bprime_imag(omegas_focus[i]*xis[j] /(c*F), omegas_focus[i])
if ((i + j*Es_Kappa_omega_without_prefactor.shape[1]) % 30000 == 0):
print("We have done iter number {}".format(i + j*Es_Kappa_omega_without_prefactor.shape[1])
+ " out of {}".format(Es_Kappa_omega_without_prefactor.shape[0] * Es_Kappa_omega_without_prefactor.shape[1]) + " iterations in querying the interpolators")
Es_Kappa_omega = np.multiply( f(xis, omegas_focus), # f(xis, omegas_focus) is shape (xis.shape[0], omegas_focus.shape[0])
Es_Kappa_omega_without_prefactor # Es_Kappa_omega_without_prefactor is shape (xis.shape[0], omegas_focus.shape[0])
) # the obtained variable is shape (xis.shape[0], omegas_focus.shape[0])
# Do IFT of Es_Kappa_omega w.r.t. omega to go from FD (omega) to TD (time t).
Es_Kappa_time = np.empty_like(Es_Kappa_omega, dtype=complex) # shape (xis.shape[0], omegas_focus.shape[0])
# Along columns (so vertically) the xi dependence, along rows (horizontally), the omega dependence
for i in range(Es_Kappa_omega.shape[0]): # for each row (for each xi)
Es_Kappa_time[i, :] = ifft(Es_Kappa_omega[i, :]) * (sampling_spacing_omegas_focus/(2*np.pi)) * num_omegas_focus # 1st multiplication is by Delta, 2nd multiplication is by N
if i % 10000 == 0:
print("We have done ifft number {}".format(i) + " out of a total of {}".format(Es_Kappa_omega.shape[0]) + " iffts")
returned_times_ugly = np.fft.fftfreq(num_omegas_focus, d=(sampling_spacing_omegas_focus/(2*np.pi))) # shape (num_omegas_focus, )
returned_times_pretty = np.fft.fftshift(returned_times_ugly) # order the returned "frequencies" (here "frequencies" = times because it's IFT (so from FD to TD))
indices = (returned_times_ugly == returned_times_pretty[:, None]).argmax(1)
Es_Kappa_time = np.take_along_axis(Es_Kappa_time, np.tile(indices, (Es_Kappa_time.shape[0], 1)), axis=1) # this is purely Dani Mesejo's answer
returned_times_pretty_mesh, xis_mesh = np.meshgrid(returned_times_pretty, xis)
fig, ax = plt.subplots()
c = ax.pcolormesh(returned_times_pretty_mesh, xis_mesh, np.real(Es_Kappa_time), cmap='viridis')
fig.colorbar(c, ax=ax, label=r'$[V/m]$')
ax.set_xlabel("t [s]")
ax.set_ylabel("xi [m]")
plt.show()
fig, ax = plt.subplots()
ax.imshow(np.multiply(np.square(np.real(Es_Kappa_time)), (c*epsilon_0)), cmap='viridis')
ax.set_xlabel("t [s]")
ax.set_ylabel("xi [m]")
plt.show()
I have tried many forms to be introduced in the plotting part. It fails with:
c = ax.pcolormesh(returned_times_pretty_mesh, xis_mesh, (c*epsilon_0)*np.real(Es_Kappa_time)**2, cmap='viridis')
fig.colorbar(c, ax=ax, label=r'$[V/m]$')
Fails with:
160 fig, ax = plt.subplots()
--> 161 ax.imshow(np.multiply(np.real(Es_Kappa_time)**2, (c*epsilon_0)), cmap='viridis')
I ran out of ideas of what I might introduce there.
Thank you!
It looks like you're reassigning c to the return value from ax.pcolormesh(...) after you import c from scipy.constants.
I have a function that contains several parameters and I want to examine each parameter effect on the function by using multiple values, the problem is that I find it so difficult to change the value of a parameter and change it each time and plot.
import numpy as np
import matplotlib.pyplot as plt
B = 0
B1 = 0.01
phi = 1
Da = 10
M = 0
Re = 100
dP = 1
a = phi * ( (1/Da) + M**2 )
c = 1 * phi * Re * dP
k = ( c * np.exp( 0.5 * np.sqrt(a)) ) / ( (a) * ( np.sqrt(a) * B * ( np.exp(0.5 * np.sqrt(a)) )**2 - np.sqrt(a) * B + ( np.exp(0.5 * np.sqrt(a)) )**2 + 1) )
x = np.linspace(-0.5,0.5,100)
f = lambda x: (c/a) - k * ( np.exp( x * np.sqrt(a) )+ np.exp( -1 * x * np.sqrt(a) ))
fp = f(x)
plt.plot(fp,x)
plt.xlabel('fp')
plt.ylabel('x')
plt.xlim(0, 13)
plt.ylim(-0.5, 0.5)
plt.legend(['Initial case','Initial case,B=0.01'])
plt.grid(True)
plt.show()
You could try creating a function which will accept whatever parameters you want. Here we can place default parameters in the function declaration, and then below create a list of values for Da. Loop over that list and plot, then after the loop finishes set your plot specifics and display the plot.
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = [12, 8]
# Function defined with default values if none are passed
def math_func(B=0,B1=.01,phi=1,Da=10,M=0,Re=100,Dp=1):
a = phi * ( (1/Da) + M**2 )
c = 1 * phi * Re * dP
k = ( c * np.exp( 0.5 * np.sqrt(a)) ) / ( (a) * ( np.sqrt(a) * B * ( np.exp(0.5 * np.sqrt(a)) )**2 - np.sqrt(a) * B + ( np.exp(0.5 * np.sqrt(a)) )**2 + 1) )
x = np.linspace(-0.5,0.5,100)
f = lambda x: (c/a) - k * ( np.exp( x * np.sqrt(a) )+ np.exp( -1 * x * np.sqrt(a) ))
return f(x),x
# All of the values you want to check for this parameter
Da_range = [.01,.1,1,10]
for Da_n in Da_range:
# Plot all of your lines
fp,x = math_func(Da=Da_n)
plt.plot(fp,x, label=f'Da={Da_n}')
# Set Plot Details
plt.xlabel('fp')
plt.ylabel('x')
plt.xlim(0, 13)
plt.ylim(-0.5, 0.5)
plt.legend(loc="upper right")
plt.grid(True)
plt.show()
Output
I'm trying to implement the DOSNES algorithm from this publication but in Python for a project. I found this Matlab Implementation which works well but I probably mistranslated one or more steps in my code (mainly with axis I guess) because I clearly don't reach the same result. This is the part I'm strugglering with in Matlab:
P(1:n + 1:end) = 0; % set diagonal to zero
P = 0.5 * (P + P'); '% symmetrize P-values
P = max(P ./ sum(P(:)), realmin); % make sure P-values sum to one
const = sum(P(:) .* log(P(:))); % constant in KL divergence
ydata = .0001 * randn(n, no_dims);
y_incs = zeros(size(ydata));
gains = ones(size(ydata));
% Run the iterations
for iter=1:max_iter
% Compute joint probability that point i and j are neighbors
sum_ydata = sum(ydata .^ 2, 2);
num = 1 ./ (1 + bsxfun(#plus, sum_ydata, bsxfun(#plus, sum_ydata', -2 * (ydata * ydata')))); % Student-t distribution
num(1:n+1:end) = 0; % set diagonal to zero
Q = max(num ./ sum(num(:)), realmin); % normalize to get probabilities
% Compute the gradients (faster implementation)
L = (P - Q) .* num;
y_grads = 4 * (diag(sum(L, 1)) - L) * ydata;
% Update the solution
gains = (gains + .2) .* (sign(y_grads) ~= sign(y_incs)) ... % note that the y_grads are actually -y_grads
+ (gains * .8) .* (sign(y_grads) == sign(y_incs));
gains(gains < min_gain) = min_gain;
y_incs = momentum * y_incs - epsilon * (gains .* y_grads);
ydata = ydata + y_incs;
% Spherical projection
ydata = bsxfun(#minus, ydata, mean(ydata, 1));
r_mean = mean(sqrt(sum(ydata.^2,2)),1);
ydata = bsxfun(#times, ydata, r_mean./ sqrt(sum(ydata.^2,2)) );
% Update the momentum if necessary
if iter == mom_switch_iter
momentum = final_momentum;
end
% Print out progress
if ~rem(iter, 10)
cost = const - sum(P(:) .* log(Q(:)));
disp(['Iteration ' num2str(iter) ': error is ' num2str(cost)]);
end
end
and this is my python version :
no_dims = 3
n = X.shape[0]
min_gain = 0.01
momentum = 0.5
final_momentum = 0.8
epsilon = 500
mom_switch_iter = 250
max_iter = 1000
P[np.diag_indices_from(P)] = 0.
P = ( P + P.T )/2
P = np.max(P / np.sum(P), axis=0)
const = np.sum( P * np.log(P) )
ydata = 1e-4 * np.random.random(size=(n, no_dims))
y_incs = np.zeros(shape=ydata.shape)
gains = np.ones(shape=ydata.shape)
for iter in range(max_iter):
sum_ydata = np.sum(ydata**2, axis = 1)
bsxfun_1 = sum_ydata.T + -2*np.dot(ydata, ydata.T)
bsxfun_2 = sum_ydata + bsxfun_1
num = 1. / ( 1 + bsxfun_2 )
num[np.diag_indices_from(num)] = 0.
Q = np.max(num / np.sum(num), axis=0)
L = (P - Q) * num
t = np.diag( L.sum(axis=0) ) - L
y_grads = 4 * np.dot( t , ydata )
gains = (gains + 0.2) * ( np.sign(y_grads) != np.sign(y_incs) ) \
+ (gains * 0.8) * ( np.sign(y_grads) == np.sign(y_incs) )
# gains[np.where(np.sign(y_grads) != np.sign(y_incs))] += 0.2
# gains[np.where(np.sign(y_grads) == np.sign(y_incs))] *= 0.8
gains = np.clip(gains, a_min = min_gain, a_max = None)
y_incs = momentum * y_incs - epsilon * gains * y_grads
ydata += y_incs
ydata -= ydata.mean(axis=0)
alpha = np.sqrt(np.sum(ydata ** 2, axis=1))
r_mean = np.mean(alpha)
ydata = ydata * (r_mean / alpha).reshape(-1, 1)
if iter == mom_switch_iter:
momentum = final_momentum
if iter % 10 == 0:
cost = const - np.sum( P * np.log(Q) )
print( "Iteration {} : error is {}".format(iter, cost) )
If you want to do trials, you can download the repository here which uses Iris Dataset and an attached library. test.py is my test implementation with Iris dataset and visu.py is the result the paper has on MNIST dataset but restricted to 1000k random points.
Many thanks for your support,
Nicolas
EDIT 1
This is the final code working as expected :
P[np.diag_indices_from(P)] = 0.
P = ( P + P.T )/2
P = P / np.sum(P)
const = np.sum(xlogy(P, P))
ydata = 1e-4 * np.random.random(size=(n, no_dims))
y_incs = np.zeros(shape=ydata.shape)
gains = np.ones(shape=ydata.shape)
for iter in range(max_iter):
sum_ydata = np.sum(ydata**2, axis = 1)
bsxfun_1 = sum_ydata.T + -2*np.dot(ydata, ydata.T)
bsxfun_2 = sum_ydata + bsxfun_1
num = 1. / ( 1 + bsxfun_2 )
num[np.diag_indices_from(num)] = 0.
Q = num / np.sum(num)
L = (P - Q) * num
t = np.diag( L.sum(axis=0) ) - L
y_grads = 4 * np.dot( t , ydata )
gains = (gains + 0.2) * ( np.sign(y_grads) != np.sign(y_incs) ) \
+ (gains * 0.8) * ( np.sign(y_grads) == np.sign(y_incs) )
gains = np.clip(gains, a_min = min_gain, a_max = None)
y_incs = momentum * y_incs - epsilon * gains * y_grads
ydata += y_incs
ydata -= ydata.mean(axis=0)
alpha = np.sqrt(np.sum(ydata ** 2, axis=1))
r_mean = np.mean(alpha)
ydata = ydata * (r_mean / alpha).reshape(-1, 1)
if iter == mom_switch_iter:
momentum = final_momentum
if iter % 10 == 0:
cost = const - np.sum( xlogy(P, Q) )
print( "Iteration {} : error is {}".format(iter, cost) )
Right at the beginning you seem to replace a nonreducing max in matlab (it has two arguments, so it will compare those one by one and return a full size P) with a reducing max in python (axis=0 will reduce along this axis, meaning that the result will have one dimension less).
My advice, however, is to leave out the max altogether because it looks pretty much like an amateurish attempt of sidestepping the problem of p log p being defined at 0 only via taking the limit p->0 which using L'Hopital's rule can be shown to be 0, whereas the computer will returm NaN when asked to compute 0 * log(0).
The proper way of going about this is using scipy.special.xlogy which treats 0 correctly.
This is a 4th order Runge-Kutta method I've made to eventually graph some differential equations.
The goal is to create a 4 by 100,000x.1 array that gives me the value of x, y, dx, dy at every point in the timestep, so that I can graph any equation with those 4 parameters.
#Assumptions
x0, y0 = -.250, .433
x1, y1 = -.250,-.433
x2, y2 = .500, .000
R = .2
C = .5
d = .25
#Imports
import numpy as np
import matplotlib.pyplot as plt
import scipy.integrate as intgr
import math
#ag = [[ x0, y0], [ x1, y1], [ x2, y2]]
mag = [[-.250,.433], [-.250,-.433], [.500,.000]]
def der( xin, t ):
mag = [[-.250,.433],[-.250,-.433],[.500,.000]]
x = xin[0]
y = xin[1]
vx = xin[2]
vy = xin[3]
dx = vx
dy = vy
vx2 = 0
vy2 = 0
vx1 = -R * vx - C * x
vy1 = -R * vy - C * y
for i in range( mag.__len__() - 1 ):
vx2 = vx2 + ( ( mag[i][0] - x )
/ ( ( mag[i][0] - x )**2
+ ( mag[i][1] - y )**2
+ d**2
)**1.5
)
vy2 = vy2 + ( ( mag[i][1] - y )
/ ( ( mag[i][0] - x )**2
+ ( mag[i][1] - y )**2
+ d**2
)**1.5
)
vx3 = vx1 + vx2
vy3 = vy1 + vy2
array = [dx,dy,vx3,vy3]
return array
dt = .1
t = np.arange( 0, 100000, dt )
xzero = [.2, .2, 0, 0]
def RK4( func, xzero, t ):
rows = xzero.__len__()
columns = t.__len__()
x = np.zeros( ( rows, columns ) )
x_t = 0
ind = 0
x[:,ind] = xzero
dt = t[1] - t[0]
for time in t[0:len( t ) - 1]:
ind = ind + 1
K1 = dt * func( x[:,ind-1], time )
K2 = dt * func( x[:,ind-1] + .5 * K1, time + .5 * dt )
K3 = dt * func( x[:,ind-1] + .5 * K2, time + .5 * dt )
K4 = dt * func( x[:,ind-1] + K3, time + dt )
x[:,ind] = x[:,ind-1] + ( 1.0 / 6.0 ) * ( K1
+ 2 * K2
+ 2 * K3
+ K4
)
return x
print( RK4( func = der, xzero = xzero, t = t ) )
Produces a numpy float 64 error
I'm not exactly sure why but some variable in my code isn't being interpreted as a number?
Thanks for the help in advance and let me know if I should provide more code or a larger context.
The error message:
You are trying to multiply a floating point number with an instance of a list.
This kind of operation is actually well defined for integers, where you get the concatenation of multiple copies of the input list ( Given a = [1, 2, 3]; print( 2*a ) returns [1, 2, 3, 1, 2, 3] ). Thus the error message.
Solution:
You will want to use numpy consistently and especially the vector arithmetic that its array object provides.
As a first point, the return of the ODE function from RK4() should be rather articulated
as:
return np.array( [dx, dy, vx3, vy3] )
Is this the best way to go about placing an object along the vector created by two other objects in the scene? I hope you guys can help me make this more efficient as it seems very redundant and long for such a simple concept.
Thanks
import maya.cmds as cmds
import random
cmds.select(all=True)
cmds.delete()
#------------------------------TEST SCENE SETUP
def genPos():
x = random.uniform(-5,5)
y = random.uniform(0,5)
z = random.uniform(-5,5)
return (x, y, z)
a = cmds.spaceLocator(n='ctrl_00')
b = cmds.spaceLocator(n='ctrl_00')
cmds.xform(a, t=(genPos()) )
cmds.xform(b, t=(genPos()) )
cmds.createDisplayLayer(name="Ctrls")
cmds.editDisplayLayerMembers('Ctrls', a, b)
cmds.setAttr('Ctrls.color' ,14)
cmds.select(clear=True)
#-----------------------THE SCRIPT
def normlizedVector(vecA,vecB,offset):
nX = vecB[0] - vecA[0]
nY = vecB[1] - vecA[1]
nZ = vecB[2] - vecA[2]
#vectorLength = distance vecA vecB
# find the distance between the two supplied point3 values
distX = pow( (vecA[0] - vecB[0] ) , 2.0 )
distY = pow( (vecA[1] - vecB[1] ) , 2.0 )
distZ = pow( (vecA[2] - vecB[2] ) , 2.0 )
vecLength = sqrt(distX + distY + distZ)
# the normalized vector is calculated by dividing the X, Y and Z coordinates by the length
calcX = nX / vecLength
calcY = nY / vecLength
calcZ = nZ / vecLength
# project point along vector, offset by a given value
ptX = vecB[0] + (calcX * offset)
ptY = vecB[1] + (calcY * offset)
ptZ = vecB[2] + (calcZ * offset)
return (ptX, ptY, ptZ)
posA = cmds.xform(a,q=1,ws=1,rp=1)
posB = cmds.xform(b,q=1,ws=1,rp=1)
pt = normlizedVector(posA,posB,10)
c = cmds.spaceLocator(n='main')
cmds.xform(c, t=(pt) )
posC = cmds.xform(c,q=1,ws=1,rp=1)
cmds.distanceDimension( sp=posB, ep=posC )