How to use argmin() and find minimum value from array - python

I'm new to python so the code may not be the best. I'm trying to find the minimum Total Cost (TotalC) and the corresponding m,k and xM values that go with this minimum cost. I'm not sure how to do this. I have tried using min(TotalC) however this gives an error within the loop or outside the loop only returns the value of TotalC and not the corresponding m, k, and xM values. Any help would be appreciated. This section is at the end of the code, I have included my entire code.
I have tried using
minIndex = TotalC.argmin()
but I'm not sure how to use it and it only returns 0 each time.
import numpy as np
import matplotlib.pyplot as plt
def Load(x):
Fpeak = (1000 + (9*(x**2) - (183*x))) *1000 #Fpeak in N
td = (20 - ((0.12)*(x**2)) + (4.2*(x))) / 1000 #td in s
return Fpeak, td
#####################################################################################################
####################### Part 2 ########################
def displacement(m,k,x,dt): #Displacement function
Fpeak, td = Load(x) #Load Function from step 1
w = np.sqrt(k/m) # Natural circular frequency
T = 2 * np.pi /w #Natural period of blast (s)
time = np.arange(0,2*T,0.001) #Time array with range (0 - 2*T) with steps of 2*T/100
zt = [] #Create a lsit to store displacement values
for t in time:
if (t <= td):
zt.append((Fpeak/k) * (1 - np.cos(w*t)) + (Fpeak/(k*td)) * ((np.sin(w*t)/w) - t))
else:
zt.append((Fpeak/(k*w*td)) * (np.sin(w*t) - np.sin(w*(t-td))) - ((Fpeak/k) * np.cos(w*t)))
zmax=max(zt) #Find the max displacement from the list of zt values
return zmax #Return max displacement
k = 1E6
m = 200
dt = 0.0001
x = 0
z = displacement(m,k,x,dt)
###################################################################################
############### Part 3 #######################
# k = 1E6 , m = 200kg , Deflection = 0.1m
k_values = np.arange(1E6, 7E6, ((7E6-1E6)/10)) #List of k values between min and max (1E6 and 7E6).
m_values = np.arange(200,1200,((1200-200)/10)) #List of m values between min and max 200kg and 1200kg
xM = []
for k in k_values: # values of k
for m in m_values: # values of m within k for loop
def bisector(m,k,dpoint,dt): #dpoint = decimal point accuracy
xL = 0
xR = 10
xM = (xL + xR)/2
zmax = 99
while round(zmax, dpoint) !=0.1:
zmax = displacement(m,k,xM,dt)
if zmax > 0.1:
xL = xM
xM = (xL + xR)/2
else:
xR = xM
xM = (xL + xR)/2
return xM
xM = bisector(m, k, 4, 0.001)
print('xM value =',xM)
#####################################################
#######Step 4
def cost (m,k,xM):
Ck = 900 + 825*((k/1E6)**2) - (1725*(k/1E6))
Cm = 10*m - 2000
Cx = 2400*((xM**2)/4)
TotalC = Ck + Cm + Cx
minIndex = TotalC.argmin(0)
print(minIndex)
return TotalC
TotalC = cost(m, k, xM)
minIndex = TotalC.argmin()
print(minIndex)
print([xM, m, k, TotalC])

argmin() returns the index of a minimum value. If you are looking for the minimum itself, try using .min(). There is also a possibility that 0 is the lowest value in Your array so bear that in mind

Related

Stuck on Value Function Iteration simulation

I am trying to simulate a model that solves elastic labor supply in macroeconomics—trying to do it by Value Function Iteration. However, I am getting an error that says
runfile('C:/Users/sence/untitled0.py', wdir='C:/Users/sence')
C:\Users\sence\untitled0.py:52: RuntimeWarning: invalid value encountered in subtract
if np.max(np.abs(V - V_new)) < tol:
My code is the following.
import numpy as np
import matplotlib.pyplot as plt
# Set parameters
beta = 0.95 # discount factor
rho = 0.1 # utility weight on leisure
alpha = 0.3 # capital share
k_min = 0 # minimum capital level
k_max = 10 # maximum capital level
n_min = 0 # minimum labor supply
n_max = 1 # maximum labor supply
tol = 1e-6 # convergence tolerance
# Define grid over capital
num_points = 100
k_grid = np.linspace(k_min, k_max, num_points)
# Initialize value function
V = np.zeros(num_points)
# Iterate over value function
while True:
# Initialize value function for this iteration
V_new = np.zeros(num_points)
# Iterate over capital levels
for i, k in enumerate(k_grid[:-1]):
# Initialize maximum value and optimal labor supply
max_value = -np.inf
opt_n = n_min
# Iterate over labor supply levels
for n in np.linspace(n_min, n_max, num_points):
# Compute consumption
c = k * alpha * n * (1 - alpha) - k_grid[i + 1]
# Compute value at this point
if c > 0 and 0 < n < 1:
value = np.log(c) + rho * np.log(1 - n) + beta * V[i + 1]
else:
value = -np.inf
# Update maximum value and optimal labor supply
if value > max_value:
max_value = value
opt_n = n
# Update value function for this iteration
V_new[i] = max_value
# Check for convergence
if np.max(np.abs(V - V_new)) < tol:
break
else:
V = V_new
# Compute policy functions
c_policy = k_grid[:-1] * alpha * opt_n * (1 - alpha) - k_grid[1:]
n_policy = opt_n
# Plot policy functions
plt.plot(k_grid, k_grid, 'k--', label='45 degree line')
plt.plot(k_grid[:-1], c_policy, label='Consumption')
plt.plot(k_grid, n_policy, label='Labor supply')
plt.xlabel('Capital')
plt.ylabel('Policy function')
plt.legend()
plt.show()
k_path = [5] # start at k0 = 5
for t in range(20):
# Compute consumption
c = k_path[-1] * alpha * n_policy * (1 - alpha) - k_path[-1]
k_path.append(k_path[-1] * (1 - n_policy) + c)
plt.plot(k_path)
plt.xlabel('Time')
plt.ylabel('Capital')
plt.show()
I don't know where I made a mistake. I solved the model correctly, and everything should work smoothly, yet I keep getting errors. Any help would be appreciated. I am unable to solve it on my own.

How to find the velocity used to find the maximum distance

Here is my code:
# Libraries
import numpy as np
from scipy.integrate import odeint
from scipy.integrate import quad
# Constant parameters
m_Rb = 1.443*10**-25 #mass of rubidium 87
k_b = 1.38*10**-23
hbar = 1.05*10**-34
L = 38.116*10**6 #spontaneous decay rate
epsilon_0 = 8.85418782*10**-12 #permittivity of free space
# Changable paramaters
lmbda = 700*10**-9 #wavelength of laser light
k = (2*np.pi)/lmbda #wavevector of laser light
V = 1.25*10**-4 #volume of MOT space
length = 5*10**-2 #length of MOT
Bohr = 9.274*10**-24 #Bohr magneton value
B = 5*10**-4 #magnetic field strength
# Scattering force
I_sat = 1 #intensity
n0 = 1 #refraction constant for medium
E_0 = ((2*I_sat)/(3*10**8*n0*epsilon_0))
Rabi = (E_0/hbar) * (4.2275*1.6*10**-19*0.529*10**-10)
n = 20
delta_omega = np.array([-20*10**6, -15*10**6, -10*10**6, -5*10**6])
def F(v):
i = 0
R_i = []
while i<len(delta_omega):
delta = delta_omega[i] + (k*v)
R_scat = L/2 * (Rabi**2/2)/(delta**2+(Rabi**2/2)+(L**2/4)) # scattering rate
R_i.append(R_scat)
i = i+1
R = np.sum(R_i)
R_total = (L*R)/(2*R + L)
def dXdt(t, X):
G, E = X
dGdt = E*(L+R_total) - G*R_total
dEdt = G*R_total - E*(L+R_total)
return [dGdt, dEdt]
t = np.linspace(0, 5, n)
solve = odeint(dXdt, [1, 0], t, tfirst=True)
G = solve.T[0]
E = solve.T[1]
F = hbar*k*(G-E)*R_total
return F
#stopping distance
ds = []
dt = 0.00001
x = np.zeros(n)
time = np.zeros(n)
#v = np.linspace(-700, 700, n)
v = np.zeros(n)
time[0] = 0
x[0] = 0
v[0] = 800
a = 0
print(n)
for a in np.arange(1, n):
time[a]=dt*a
solve = F(v=a)
F_int=solve[1]
v[a] = v[a-1] - dt*(F_int/m_Rb)*x[a-1]
x[a] = x[a-1] + dt*v[a-1]
for s in x:
if abs(s)<=length and abs(s)>0:
ds.append(abs(s))
# Capture velocity
dss = np.max(ds)
I want to find the value of velocity for which ds is at its maximum value (i.e. what's the maximum velocity that a particle can have and not exceed 0.05) but I don't know how to do this. I have calculated the distances travelled by a number of particles, and then made an array (ds) containing the particles which stop within the value of length. dss gives the maximum distance a particle has travelled, but it is the corresponding velocity for this value that I want to find.

Monte Carlo simulation of a system of Lennard-Jones + FENE potential

I want to generate two linear chains of 20 monomers each at some distance to each other. The following code generates a single chain. Could someone help me with how to generate the second chain?
The two chains are fixed to a surface i.e the first monomer of the chain is fixed and the rest of the monomers move freely in x-y-z directions but the z component of the monomers should be positive.
Something like this:
import numpy as np
import numba as nb
#import pandas as pd
#nb.jit()
def gen_chain(N):
x = np.zeros(N)
y = np.zeros(N)
z = np.linspace(0, (N)*0.9, num=N)
return np.column_stack((x, y, z)), np.column_stack((x1, y1, z1))
#coordinates = np.loadtxt('2GN_50_T_10.txt', skiprows=199950)
#return coordinates
#nb.jit()
def lj(rij2):
sig_by_r6 = np.power(sigma**2 / rij2, 3)
sig_by_r12 = np.power(sigma**2 / rij2, 6)
lje = 4 * epsilon * (sig_by_r12 - sig_by_r6)
return lje
#nb.jit()
def fene(rij2):
return (-0.5 * K * np.power(R, 2) * np.log(1 - ((np.sqrt(rij2) - r0) / R)**2))
#nb.jit()
def total_energy(coord):
# Non-bonded energy.
e_nb = 0.0
for i in range(N):
for j in range(i - 1):
ri = coord[i]
rj = coord[j]
rij = ri - rj
rij2 = np.dot(rij, rij)
if (rij2 < rcutoff_sq):
e_nb += lj(rij2)
# Bonded FENE potential energy.
e_bond = 0.0
for i in range(1, N):
ri = coord[i]
rj = coord[i - 1] # Can be [i+1] ??
rij = ri - rj
rij2 = np.dot(rij, rij)
e_bond += fene(rij2)
return e_nb + e_bond
#nb.jit()
def move(coord):
trial = np.ndarray.copy(coord)
for i in range(1, N):
while True:
delta = (2 * np.random.rand(3) - 1) * max_delta
trial[i] += delta
#while True:
if trial[i,2] > 0.0:
break
trial[i] -= delta
return trial
#nb.jit()
def accept(delta_e):
beta = 1.0 / T
if delta_e < 0.0:
return True
random_number = np.random.rand(1)
p_acc = np.exp(-beta * delta_e)
if random_number < p_acc:
return True
return False
if __name__ == "__main__":
# FENE potential parameters.
K = 40.0
R = 0.3
r0 = 0.7
# L-J potential parameters
sigma = 0.5716
epsilon = 1.0
# MC parameters
N = 20 # Numbers of monomers
rcutoff = 2.5 * sigma
rcutoff_sq = rcutoff * rcutoff
max_delta = 0.01
n_steps = 100000
T = 10
# MAIN PART OF THE CODE
coord = gen_chain(N)
energy_current = total_energy(coord)
traj = open('2GN_20_T_10.xyz', 'w')
traj_txt = open('2GN_20_T_10.txt', 'w')
for step in range(n_steps):
if step % 1000 == 0:
traj.write(str(N) + '\n\n')
for i in range(N):
traj.write("C %10.5f %10.5f %10.5f\n" % (coord[i][0], coord[i][1], coord[i][2]))
traj_txt.write("%10.5f %10.5f %10.5f\n" % (coord[i][0], coord[i][1], coord[i][2]))
print(step, energy_current)
coord_trial = move(coord)
energy_trial = total_energy(coord_trial)
delta_e = energy_trial - energy_current
if accept(delta_e):
coord = coord_trial
energy_current = energy_trial
traj.close()
I except the chain of particles to collapse into a globule.
There is some problem with the logic of the MC you are implementing.
To perform a MC you need to ATTEMPT a move, evaluate the energy of the new state and then accept/reject according to a random number.
In your code there is not the slightest sign of the attempt to move a particle.
You need to move one (or more of them), evaluate the energy, and then update your coordinates.
By the way, I suppose this is not your entire code. There are many parameters that are not defined like the "k" and the "R0" in your fene potential
The FENE potential models bond interactions. What your code is saying is that all particles within the cutoff are bonded by FENE springs, and that the bonds are not fixed but rather defined by the cutoff. With a r_cutoff = 3.0, larger than equilibrium distance of the LJ well, you are essentially considering that each particle is bonded to potentially many others. You are treating the FENE potential as a non-bonded one.
For the bond interactions you should ignore the cutoff and only evaluate the energy for the actual pairs that are bonded according to your topology, which means that first you need to define a topology. I suggest generating a linear molecule of N atoms in a box big enough to contain the whole stretched molecule, and consider the i-th atom as bonded to the (i-1)-th atom, with i = 2, ..., N. In this way the topology is well defined and persistent. Then consider both interactions separately, non-bonded and bond, and add them at the end.
Something like this, in pseudo-code:
e_nb = 0
for particle i = 1 to N:
for particle j = 1 to i-1:
if (dist(i, j) < rcutoff):
e_nb += lj(i, j)
e_bond = 0
for particle i = 2 to N:
e_bond += fene(i, i-1)
e_tot = e_nb + e_bond
Below you can find a modified version of your code. To make things simpler, in this version there is no box and no boundary conditions, just a chain in free space. The chain is initialized as a linear sequence of particles each distant 80% of R0 from the next, since R0 is the maximum length of the FENE bond. The code considers that particle i is bonded with i+1 and the bond is not broken. This code is just a proof of concept.
#!/usr/bin/python
import numpy as np
def gen_chain(N, R):
x = np.linspace(0, (N-1)*R*0.8, num=N)
y = np.zeros(N)
z = np.zeros(N)
return np.column_stack((x, y, z))
def lj(rij2):
sig_by_r6 = np.power(sigma/rij2, 3)
sig_by_r12 = np.power(sig_by_r6, 2)
lje = 4.0 * epsilon * (sig_by_r12 - sig_by_r6)
return lje
def fene(rij2):
return (-0.5 * K * R0**2 * np.log(1-(rij2/R0**2)))
def total_energy(coord):
# Non-bonded
e_nb = 0
for i in range(N):
for j in range(i-1):
ri = coord[i]
rj = coord[j]
rij = ri - rj
rij2 = np.dot(rij, rij)
if (rij2 < rcutoff):
e_nb += lj(rij2)
# Bonded
e_bond = 0
for i in range(1, N):
ri = coord[i]
rj = coord[i-1]
rij = ri - rj
rij2 = np.dot(rij, rij)
e_bond += fene(rij2)
return e_nb + e_bond
def move(coord):
trial = np.ndarray.copy(coord)
for i in range(N):
delta = (2.0 * np.random.rand(3) - 1) * max_delta
trial[i] += delta
return trial
def accept(delta_e):
beta = 1.0/T
if delta_e <= 0.0:
return True
random_number = np.random.rand(1)
p_acc = np.exp(-beta*delta_e)
if random_number < p_acc:
return True
return False
if __name__ == "__main__":
# FENE parameters
K = 40
R0 = 1.5
# LJ parameters
sigma = 1.0
epsilon = 1.0
# MC parameters
N = 50 # number of particles
rcutoff = 3.5
max_delta = 0.01
n_steps = 10000000
T = 1.5
coord = gen_chain(N, R0)
energy_current = total_energy(coord)
traj = open('traj.xyz', 'w')
for step in range(n_steps):
if step % 1000 == 0:
traj.write(str(N) + '\n\n')
for i in range(N):
traj.write("C %10.5f %10.5f %10.5f\n" % (coord[i][0], coord[i][1], coord[i][2]))
print(step, energy_current)
coord_trial = move(coord)
energy_trial = total_energy(coord_trial)
delta_e = energy_trial - energy_current
if accept(delta_e):
coord = coord_trial
energy_current = energy_trial
traj.close()
The code prints the current configuration at each step, you can just load it up on VMD and see how it behaves. The bonds will not show correctly at first on VMD, you must use a bead representation for the particles and define the bonds manually or with a script within VMD. In any case, you don't need to see the bonds to notice that the chain does not collapse.
Please bear in mind that if you want to simulate a chain at a certain density, you need to be careful to generate the correct topology. I recommend the EMC package to efficiently generate polymers at the desired thermodynamic conditions. It is by no means a trivial problem, especially for larger chains.
By the way, your code had an error in the FENE energy evaluation. rij2 is already squared, you squared it again.
Below you can see how the total energy as a function of the number of steps behaves for T = 1.0, N = 20, rcutoff = 3.5, and also the last current configuration after 10 thousand steps.
And below for N = 50, T = 1.5, max_delta = 0.01, K = 40, R = 1.5, rcutoff = 3.5, and 10 million steps. This is the last current configuration.
The full "trajectory", which isn't really a trajectory since this is MC, you can find here (it's under 6 MB).

Need help vectorizing a mathematical model written in python

I've recently written a python implementation of the Beddington DeAngelis model which is used for modeling populations of predators and preys.
My issue is that the code is extremely slow. 10.000 iterations take 230 seconds when this program has to be able to iterate 1 million times in a reasonable time frame.
I understand that I could just rewrite in C, since this is mostly just math, but I really want to learn how to properly vectorize a program like this in python.
The situation, to simplify, is that I have two arrays of shapes 200x200 and I need to iterate every element of each array while using the same index element from the other array plus some surrounding elements of the same array. So for example if I'm working on a[1][1], I will also need:
b[1][1]
a[0][1]
a[0][-1]
a[1][0]
a[-1][0]
The entire operation should be fully vectorizable, because I am changing all 200x200x2 elements in a single time step.
So how would I call this function to get these indexes?
Any advice would be greatly appreciated.
Full code for context: (it looks intimidating, but is actually really straightforward)
import numpy as np
import copy
import time
def get_cell_zero_flux(m,i,j,x,y,prey):
"""
Fetch an array element that could be outside of the border
"""
if i >= x or i < 0 or j >= y or j < 0:
if prey: return 0.43058
else: return 0.718555
return m[i][j]
def get_laplacian(n,i,j,x,y,neighbors,h,prey):
"""
Generate the laplacian value for the given element
"""
total = 0
for ng in neighbors:
cell = get_cell_zero_flux(n,i+ng[0],j+ng[1],x,y,prey)
total += cell
return (total - 4*n[i][j]) / (h**2)
def next_n(n,p,nl,pl,d11,d12,d21,d22,t,r,e,beta,k,ni,w,b):
"""
Integrate prey population function
"""
return n + t * (r * ( 1 - n / k ) * n
- beta * n / ( b + n + w * p ) * p + d11 * nl + d12 * pl)
def next_p(n,p,nl,pl,d11,d12,d21,d22,t,r,e,beta,k,ni,w,b):
"""
Integrate predator population function
"""
return p + t * (e * beta * n / ( b + n + w * p )
* p - ni * p + d21 * nl + d22 * pl)
def generate_preys(x,y,epsilon,n_start):
"""
Generate the initial population of preys
"""
n = np.random.rand(x, y)
n = np.interp(n,(n.min(),n.max()),(-epsilon/2,epsilon/2))
n = n + n_start
return n
def generate_predators(x,y,p_start):
"""
Generate the initial population of predators
"""
p = np.ones((x,y))
p.fill(p_start)
return p
def generate_n(n0,n,p,x,y,neighbors,h,d11,d12,t,r,e,beta,k,ni,w,b):
"""
Vectorized element iteration attempt for preys
"""
i,j = np.where(n==n0) # this wouldnt work, need the current element
n_laplacian = get_laplacian(n,i,j,x,y,neighbors,h,True)
p_laplacian = get_laplacian(p,i,j,x,y,neighbors,h,False)
p0 = p[i,j]
return next_n(n0,p0,laplacian,d11,d12,t,r,e,beta,k,ni,w,b)
def generate_p(p0,p,n,x,y,neighbors,h,d21,d22,t,r,e,beta,k,ni,w,b):
"""
Vectorized element iteration attempt for predators
"""
i,j = np.where(p==p0) # this wouldnt work, need the current element
n_laplacian = get_laplacian(n,i,j,x,y,neighbors,h,True)
p_laplacian = get_laplacian(p,i,j,x,y,neighbors,h,False)
n0 = n[i,j]
return next_p(n0,p0,n_laplacian,
p_laplacian,d11,d12,d21,d22,t,r,e,beta,k,ni,w,b)
def generate_system(x,y,h,d11,d12,d21,d22,t,r,e,
beta,k,ni,w,b,ite,n_start,p_start,epsilon):
"""
System generation
"""
# Initial distribution
n = generate_preys(x,y,epsilon,n_start)
p = generate_predators(x,y,p_start)
#n = n.tolist()
#p = p.tolist()
ps = []
ns = []
# neighbor list for easy laplacian neighbor fetch
neighbors = [[-1,0],[1,0],[0,1],[0,-1]]
t1 = time.time()
for it in range(ite):
# record each iteration
old_n = copy.copy(n)
old_p = copy.copy(p)
ns.append(old_n)
ps.append(old_p)
# main array element iteration for prey and predator arrays
for i in range(x):
for j in range(y):
n_laplacian = get_laplacian(old_n,i,j,x,y,neighbors,h,True)
p_laplacian = get_laplacian(old_p,i,j,x,y,neighbors,h,False)
n0 = old_n[i][j]
p0 = old_p[i][j]
n[i][j] = next_n(n0,p0,n_laplacian,p_laplacian,
d11,d12,d21,d22,t,r,e,beta,k,ni,w,b)
p[i][j] = next_p(n0,p0,n_laplacian,p_laplacian,
d11,d12,d21,d22,t,r,e,beta,k,ni,w,b)
"""
n = generate_n(old_n,old_n,old_p,x,y,neighbors,
h,d11,d12,t,r,e,beta,k,ni,w,b)
p = generate_p(old_p,old_p,old_n,x,y,neighbors,
h,d21,d22,t,r,e,beta,k,ni,w,b)
"""
t2 = time.time()
print(t2-t1)
return ns,ps
ns,ps = generate_system(x=50,y=50,h=0.25,d11=0.01,d12=0.0115,d21=0.01,d22=1,
t=0.01,r=0.5,e=1,beta=0.6,k=2.6,ni=0.25,w=0.4,b=0.3154,
ite=10,n_start=0.43058,p_start=0.718555,epsilon=0.001)
Expected output is calculating 1 million iterations in a few minutes on a 200x200 grid, but taking 230 seconds just for 10.000 in a 40x40 grid
EDIT
I managed to vectorize the whole program. Performance boost was 400x fold. WOW
Here is the new code:
import numpy as np
import copy
import time
def next_n(n,p,nl,pl,d11,d12,d21,d22,t,r,e,beta,k,ni,w,b):
"""
Integrate prey population function
"""
return n + t * (r * ( 1 - n / k ) * n
- beta * n / ( b + n + w * p ) * p + d11 * nl + d12 * pl)
def next_p(n,p,nl,pl,d11,d12,d21,d22,t,r,e,beta,k,ni,w,b):
"""
Integrate predator population function
"""
return p + t * (e * beta * n / ( b + n + w * p )
* p - ni * p + d21 * nl + d22 * pl)
def generate_preys(x,y,epsilon,n_start):
"""
Generate the initial population of preys
"""
n = np.random.rand(x, y)
n = np.interp(n,(n.min(),n.max()),(-epsilon/2,epsilon/2))
n = n + n_start
n[0,:] = n_start
n[-1:,:] = n_start
n[:,0] = n_start
n[:,-1:] = n_start
return n
def generate_predators(x,y,p_start):
"""
Generate the initial population of predators
"""
p = np.ones((x,y))
p.fill(p_start)
return p
def get_laps(a,x,y,h):
center = a[1:-1,1:-1]
left = a[1:-1,0:-2]
right = a[1:-1,2:]
top = a[0:-2,1:-1]
bottom = a[2:,1:-1]
return (left+right+top+bottom - 4*center) / (h**2)
def generate_system(x,y,h,d11,d12,d21,d22,t,r,e,
beta,k,ni,w,b,ite,n_start,p_start,epsilon):
"""
System generation
"""
# Initial distribution
n = generate_preys(x+2,y+2,epsilon,n_start)
p = generate_predators(x+2,y+2,p_start)
ps = []
ns = []
t1 = time.time()
for it in range(ite):
if it % 10000 == 0:
print(f"iterations passed: {it}")
ns.append(copy.copy(n))
ps.append(copy.copy(p))
# record each iteration
nl = get_laps(n,x,y,h)
pl = get_laps(p,x,y,h)
nc = n[1:-1,1:-1]
pc = p[1:-1,1:-1]
n[1:-1,1:-1] = next_n(nc,pc,nl,pl,d11,d12,d21,d22,t,r,e,beta,k,ni,w,b)
p[1:-1,1:-1] = next_p(nc,pc,nl,pl,d11,d12,d21,d22,t,r,e,beta,k,ni,w,b)
t2 = time.time()
print(f"Time taken: {t2-t1}")
return ns,ps
ns,ps = generate_system(x=200,y=200,h=0.25,d11=0.01,d12=0.0115,d21=0.01,d22=1,
t=0.01,r=0.5,e=1,beta=0.6,k=2.6,ni=0.25,w=0.4,b=0.3154,
ite=100,n_start=0.43058,p_start=0.718555,epsilon=0.001)

How to Fix Index Error in Differential Equation?

I am trying to create a program that solves the mass-spring-damper system using backward differentiating, the only problem is that I am running into an index error that I am not sure how to solve:
import numpy as np
import matplotlib.pyplot as plt
def MSD_Solver(m,b,K):
#input: m = mass, b = damping ratio, K = spring constant
#output: (t,x) time vs position
tinitial = 0
tfinal = 15
step = .005
t = np.linspace(tinitial,tfinal,step)
x = np.zeros_like(t)
x[0]=0
x[1]=0
for k in range (len(t)-1): # extra element so subtract by 1
x[k] = (t**2)/((m+b)*t+(t**2)*k) + (x[k-2](-m))/((m+b)*t+(t**2)*k) + (x[k-1]((2*m)+(b*t)))/((m+b)*t+(t**2)*k)
return plt.plot(t,x)
print(MSD_Solver(1,.5,5)),MSD_Solver(1,1,5),MSD_Solver(1,2,5)
plt.show()
The linspace doc shows that the third argument is the number of items, not the step. Your step value got truncated to 0, so the returned array for t was empty. As a result, x has no elements, and x[0] is out of range.
Try this:
tinitial = 0
tfinal = 15
step = .005
num = (tfinal - tinitial) / step + 1
t = np.linspace(tinitial,tfinal,num)
This will get you to the semantic errors in your complex computation.
You want, probably(?), use first and second order difference quotients to discretize
m*x''(t) + b*x'(t) + K*x(t) = 1
to
m*(x[j+1]-2*x[j]+x[j-1]) + 0.5*dt*b*(x[j+1]-x[j-1]) + dt^2*K*x[j] = dt**2
so that
x[j+1] = ( dt**2 + (2*m-K*dt**2)*x[j] - (m-0.5*dt*b)*x[j-1] ) / (m+0.5*dt*b)
In code
def MSD_Solver(m,b,K):
#input: m = mass, b = damping ratio, K = spring constant
#output: (t,x) time vs position
tinitial = 0
tfinal = 15
step = .005
t = np.arange(tinitial,tfinal,step)
x = np.zeros_like(t)
dt = t[1]-t[0] # use the actual time step
x[0:2] = [ 0, 0]
for j in range(1,len(t)-1):
x[j+1] = ( dt**2 + (2*m-K*dt**2)*x[j] - (m-0.5*dt*b)*x[j-1] ) / (m+0.5*dt*b)
return t,x
t,x = MSD_Solver(1,.5,5)
plt.plot(t,x); plt.show();

Categories