Grid Search over function - python
The function HH_model(I,area_factor) has as return value the number of spikes which are triggered by n runs. Assuming 1000 runs, there are 157 times that max(v[]-v_rest) > 60, then the return value of HH_model(I,area_factor) is 157.
Now I know value pairs from another model - the x-values are related to the stimulus I, while the y-values are the number of spikes.
I have written these values as a comment under the code. I want to choose my input parameters I and area_factor in a way that the error to the data is as small as possible. I have no idea how I should do this optimization.
import matplotlib.pyplot as py
import numpy as np
import scipy.optimize as optimize
# HH parameters
v_Rest = -65 # in mV
gNa = 1200 # in mS/cm^2
gK = 360 # in mS/cm^2
gL = 0.3*10 # in mS/cm^2
vNa = 115 # in mV
vK = -12 # in mV
vL = 10.6 # in mV
#Number of runs
runs = 1000
c = 1 # in uF/cm^2
ROOT = False
def HH_model(I,area_factor):
count = 0
t_end = 10 # in ms
delay = 0.1 # in ms
duration = 0.1 # in ms
dt = 0.0025 # in ms
area_factor = area_factor
#geometry
d = 2 # diameter in um
r = d/2 # Radius in um
l = 10 # Length of the compartment in um
A = (1*10**(-8))*area_factor # surface [cm^2]
I = I
C = c*A # uF
for j in range(0,runs):
# Introduction of equations and channels
def alphaM(v): return 12 * ((2.5 - 0.1 * (v)) / (np.exp(2.5 - 0.1 * (v)) - 1))
def betaM(v): return 12 * (4 * np.exp(-(v) / 18))
def betaH(v): return 12 * (1 / (np.exp(3 - 0.1 * (v)) + 1))
def alphaH(v): return 12 * (0.07 * np.exp(-(v) / 20))
def alphaN(v): return 12 * ((1 - 0.1 * (v)) / (10 * (np.exp(1 - 0.1 * (v)) - 1)))
def betaN(v): return 12 * (0.125 * np.exp(-(v) / 80))
# compute the timesteps
t_steps= t_end/dt+1
# Compute the initial values
v0 = 0
m0 = alphaM(v0)/(alphaM(v0)+betaM(v0))
h0 = alphaH(v0)/(alphaH(v0)+betaH(v0))
n0 = alphaN(v0)/(alphaN(v0)+betaN(v0))
# Allocate memory for v, m, h, n
v = np.zeros((int(t_steps), 1))
m = np.zeros((int(t_steps), 1))
h = np.zeros((int(t_steps), 1))
n = np.zeros((int(t_steps), 1))
# Set Initial values
v[:, 0] = v0
m[:, 0] = m0
h[:, 0] = h0
n[:, 0] = n0
### Noise component
knoise= 0.0005 #uA/(mS)^1/2
### --------- Step3: SOLVE
for i in range(0, int(t_steps)-1, 1):
# Get current states
vT = v[i]
mT = m[i]
hT = h[i]
nT = n[i]
# Stimulus current
IStim = 0
if delay / dt <= i <= (delay + duration) / dt:
IStim = I # in uA
else:
IStim = 0
# Compute change of m, h and n
m[i + 1] = (mT + dt * alphaM(vT)) / (1 + dt * (alphaM(vT) + betaM(vT)))
h[i + 1] = (hT + dt * alphaH(vT)) / (1 + dt * (alphaH(vT) + betaH(vT)))
n[i + 1] = (nT + dt * alphaN(vT)) / (1 + dt * (alphaN(vT) + betaN(vT)))
# Ionic currents
iNa = gNa * m[i + 1] ** 3. * h[i + 1] * (vT - vNa)
iK = gK * n[i + 1] ** 4. * (vT - vK)
iL = gL * (vT-vL)
Inoise = (np.random.normal(0, 1) * knoise * np.sqrt(gNa * A))
IIon = ((iNa + iK + iL) * A) + Inoise #
# Compute change of voltage
v[i + 1] = (vT + ((-IIon + IStim) / C) * dt)[0] # in ((uA / cm ^ 2) / (uF / cm ^ 2)) * ms == mV
# adjust the voltage to the resting potential
v = v + v_Rest
# test if there was a spike
if max(v[:]-v_Rest) > 60:
count += 1
return count
# some datapoints from another model out of 1000 runs. ydata means therefore 'count' out of 1000 runs.
# xdata = np.array([0.92*I,0.925*I,0.9535*I,0.975*I,0.9789*I,I,1.02*I,1.043*I,1.06*I,1.078*I,1.09*I])
# ydata = np.array([150,170,269,360,377,500,583,690,761,827,840])
EDIT:
import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import minimize
# HH parameters
v_Rest = -65 # in mV
gNa = 120 # in mS/cm^2
gK = 36 # in mS/cm^2
gL = 0.3 # in mS/cm^2
vNa = 115 # in mV
vK = -12 # in mV
vL = 10.6 # in mV
#Number of runs
runs = 1000
c = 1 # in uF/cm^2
def HH_model(x,I,area_factor):
count = 0
t_end = 10 # in ms
delay = 0.1 # in ms
duration = 0.1 # in ms
dt = 0.0025 # in ms
area_factor = area_factor
#geometry
d = 2 # diameter in um
r = d/2 # Radius in um
l = 10 # Length of the compartment in um
A = (1*10**(-8))*area_factor # surface [cm^2]
I = I*x
C = c*A # uF
for j in range(0,runs):
# Introduction of equations and channels
def alphaM(v): return 12 * ((2.5 - 0.1 * (v)) / (np.exp(2.5 - 0.1 * (v)) - 1))
def betaM(v): return 12 * (4 * np.exp(-(v) / 18))
def betaH(v): return 12 * (1 / (np.exp(3 - 0.1 * (v)) + 1))
def alphaH(v): return 12 * (0.07 * np.exp(-(v) / 20))
def alphaN(v): return 12 * ((1 - 0.1 * (v)) / (10 * (np.exp(1 - 0.1 * (v)) - 1)))
def betaN(v): return 12 * (0.125 * np.exp(-(v) / 80))
# compute the timesteps
t_steps= t_end/dt+1
# Compute the initial values
v0 = 0
m0 = alphaM(v0)/(alphaM(v0)+betaM(v0))
h0 = alphaH(v0)/(alphaH(v0)+betaH(v0))
n0 = alphaN(v0)/(alphaN(v0)+betaN(v0))
# Allocate memory for v, m, h, n
v = np.zeros((int(t_steps), 1))
m = np.zeros((int(t_steps), 1))
h = np.zeros((int(t_steps), 1))
n = np.zeros((int(t_steps), 1))
# Set Initial values
v[:, 0] = v0
m[:, 0] = m0
h[:, 0] = h0
n[:, 0] = n0
### Noise component
knoise= 0.0005 #uA/(mS)^1/2
### --------- Step3: SOLVE
for i in range(0, int(t_steps)-1, 1):
# Get current states
vT = v[i]
mT = m[i]
hT = h[i]
nT = n[i]
# Stimulus current
IStim = 0
if delay / dt <= i <= (delay + duration) / dt:
IStim = I # in uA
else:
IStim = 0
# Compute change of m, h and n
m[i + 1] = (mT + dt * alphaM(vT)) / (1 + dt * (alphaM(vT) + betaM(vT)))
h[i + 1] = (hT + dt * alphaH(vT)) / (1 + dt * (alphaH(vT) + betaH(vT)))
n[i + 1] = (nT + dt * alphaN(vT)) / (1 + dt * (alphaN(vT) + betaN(vT)))
# Ionic currents
iNa = gNa * m[i + 1] ** 3. * h[i + 1] * (vT - vNa)
iK = gK * n[i + 1] ** 4. * (vT - vK)
iL = gL * (vT-vL)
Inoise = (np.random.normal(0, 1) * knoise * np.sqrt(gNa * A))
IIon = ((iNa + iK + iL) * A) + Inoise #
# Compute change of voltage
v[i + 1] = (vT + ((-IIon + IStim) / C) * dt)[0] # in ((uA / cm ^ 2) / (uF / cm ^ 2)) * ms == mV
# adjust the voltage to the resting potential
v = v + v_Rest
# test if there was a spike
if max(v[:]-v_Rest) > 60:
count += 1
return count
def loss(parameters, model, x_ref, y_ref):
# unpack multiple parameters
I, area_factor = parameters
# compute prediction
y_predicted = np.array([model(x, I, area_factor) for x in x_ref])
# compute error and use it as loss
mse = ((y_ref - y_predicted) ** 2).mean()
return mse
# some datapoints from another model out of 1000 runs. ydata means therefore 'count' out of 1000 runs.
xdata = np.array([0.92,0.925,0.9535, 0.975, 0.9789, 1])
ydata = np.array([150,170,269, 360, 377, 500])
y_data_scaled = ydata / runs
y_predicted = np.array([HH_model(x,I=10**(-3), area_factor=1) for x in xdata])
parameters = (10**(-3), 1)
mse0 = loss(parameters, HH_model, xdata, y_data_scaled)
# compute the parameters that minimize the loss (alias, the error between the data and the predictions of the model)
optimum = minimize(loss, x0=np.array([10**(-3), 1]), args=(HH_model, xdata, y_data_scaled))
# compute the predictions with the optimized parameters
I = optimum['x'][0]
area_factor = optimum['x'][1]
y_predicted_opt = np.array([HH_model(x, I, area_factor) for x in xdata])
# plot the raw data, the model with handcrafted guess and the model with optimized parameters
fig, ax = plt.subplots(1, 1)
ax.set_xlabel('input')
ax.set_ylabel('output predictions')
ax.plot(xdata, y_data_scaled, marker='o')
ax.plot(xdata, y_predicted, marker='*')
ax.plot(xdata, y_predicted_opt, marker='v')
ax.legend([
"raw data points",
"initial guess",
"predictions with optimized parameters"
])
I started using your function,
then I noticed it was very slow to execute.
Hence, I decided to show the process with a toy (linear) model.
The process remains the same.
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import minimize
def loss(parameters, model, x_ref, y_ref):
# unpack multiple parameters
m, q = parameters
# compute prediction
y_predicted = np.array([model(x, m, q) for x in x_ref])
# compute error and use it as loss
mse = ((y_ref - y_predicted) ** 2).mean()
return mse
# load a dataset to fit a model
x_data = np.array([0.92, 0.925, 0.9535, 0.975, 0.9789, 1, 1.02, 1.043, 1.06, 1.078, 1.09])
y_data = np.array([150, 170, 269, 360, 377, 500, 583, 690, 761, 827, 840])
# normalise the data - input is already normalised
y_data_scaled = y_data / 1000
# create a model (linear, as an example) using handcrafted parameters, ex:(1,1)
linear_fun = lambda x, m, q: m * x + q
y_predicted = np.array([linear_fun(x, m=1, q=1) for x in x_data])
# create a function that given a model (linear_fun), a dataset(x,y) and the parameters, compute the error
parameters = (1, 1)
mse0 = loss(parameters, linear_fun, x_data, y_data_scaled)
# compute the parameters that minimize the loss (alias, the error between the data and the predictions of the model)
optimum = minimize(loss, x0=np.array([1, 1]), args=(linear_fun, x_data, y_data_scaled))
# compute the predictions with the optimized parameters
m = optimum['x'][0]
q = optimum['x'][1]
y_predicted_opt = np.array([linear_fun(x, m, q) for x in x_data])
# plot the raw data, the model with handcrafted guess and the model with optimized parameters
fig, ax = plt.subplots(1, 1)
ax.set_xlabel('input')
ax.set_ylabel('output predictions')
ax.plot(x_data, y_data_scaled, marker='o')
ax.plot(x_data, y_predicted, marker='*')
ax.plot(x_data, y_predicted_opt, marker='v')
ax.legend([
"raw data points",
"initial guess",
"predictions with optimized parameters"
])
# Note1: good practise is to validate your model with a different set of data,
# respect to the one that you have used to find the parameters
# here, however, it is shown just the optimization procedure
# Note2: in your case you should use the HH_model instead of the linear_fun
# and I and Area_factor instead of m and q.
Output:
-- EDIT: To use the HH_model:
I went deeper in your code,
I tried few values for area and stimulus
and I executed a single run of HH_Model without taking the threshold.
Then, I checked the predicted dynamic of voltage (v):
the sequence is always diverging ( all values become nan after few steps )
if you have an initial guess for stimulus and area that could make the code to work, great.
if you have no idea of the order of magnitude of these parameters
the unique solution I see is a grid search over them - just to find this initial guess.
however, it might take a very long time without guarantee of success.
given that the code is based on a physical model, I would suggest to:
1 - find pen and paper a reasonable values.
2 - check that this simulation works for these values.
3 - then, run the optimizer to find the minimum.
Or, worst case scenario, reverse engineer the code and find the value that makes the equation to converge
Here the refactored code:
import math
import numpy as np
# HH parameters
v_Rest = -65 # in mV
gNa = 1200 # in mS/cm^2
gK = 360 # in mS/cm^2
gL = 0.3 * 10 # in mS/cm^2
vNa = 115 # in mV
vK = -12 # in mV
vL = 10.6 # in mV
# Number of runs
c = 1 # in uF/cm^2
# Introduction of equations and channels
def alphaM(v):
return 12 * ((2.5 - 0.1 * (v)) / (np.exp(2.5 - 0.1 * (v)) - 1))
def betaM(v):
return 12 * (4 * np.exp(-(v) / 18))
def betaH(v):
return 12 * (1 / (np.exp(3 - 0.1 * (v)) + 1))
def alphaH(v):
return 12 * (0.07 * np.exp(-(v) / 20))
def alphaN(v):
return 12 * ((1 - 0.1 * (v)) / (10 * (np.exp(1 - 0.1 * (v)) - 1)))
def betaN(v):
return 12 * (0.125 * np.exp(-(v) / 80))
def predict_voltage(A, C, delay, dt, duration, stimulus, t_end):
# compute the timesteps
t_steps = t_end / dt + 1
# Compute the initial values
v0 = 0
m0 = alphaM(v0) / (alphaM(v0) + betaM(v0))
h0 = alphaH(v0) / (alphaH(v0) + betaH(v0))
n0 = alphaN(v0) / (alphaN(v0) + betaN(v0))
# Allocate memory for v, m, h, n
v = np.zeros((int(t_steps), 1))
m = np.zeros((int(t_steps), 1))
h = np.zeros((int(t_steps), 1))
n = np.zeros((int(t_steps), 1))
# Set Initial values
v[:, 0] = v0
m[:, 0] = m0
h[:, 0] = h0
n[:, 0] = n0
# Noise component
knoise = 0.0005 # uA/(mS)^1/2
for i in range(0, int(t_steps) - 1, 1):
# Get current states
vT = v[i]
mT = m[i]
hT = h[i]
nT = n[i]
# Stimulus current
if delay / dt <= i <= (delay + duration) / dt:
IStim = stimulus # in uA
else:
IStim = 0
# Compute change of m, h and n
m[i + 1] = (mT + dt * alphaM(vT)) / (1 + dt * (alphaM(vT) + betaM(vT)))
h[i + 1] = (hT + dt * alphaH(vT)) / (1 + dt * (alphaH(vT) + betaH(vT)))
n[i + 1] = (nT + dt * alphaN(vT)) / (1 + dt * (alphaN(vT) + betaN(vT)))
# Ionic currents
iNa = gNa * m[i + 1] ** 3. * h[i + 1] * (vT - vNa)
iK = gK * n[i + 1] ** 4. * (vT - vK)
iL = gL * (vT - vL)
Inoise = (np.random.normal(0, 1) * knoise * np.sqrt(gNa * A))
IIon = ((iNa + iK + iL) * A) + Inoise #
# Compute change of voltage
v[i + 1] = (vT + ((-IIon + IStim) / C) * dt)[0] # in ((uA / cm ^ 2) / (uF / cm ^ 2)) * ms == mV
# stop simulation if it diverges
if math.isnan(v[i + 1]):
return [None]
# adjust the voltage to the resting potential
v = v + v_Rest
return v
def HH_model(stimulus, area_factor, runs=1000):
count = 0
t_end = 10 # in ms
delay = 0.1 # in ms
duration = 0.1 # in ms
dt = 0.0025 # in ms
area_factor = area_factor
# geometry
d = 2 # diameter in um
r = d / 2 # Radius in um
l = 10 # Length of the compartment in um
A = (1 * 10 ** (-8)) * area_factor # surface [cm^2]
stimulus = stimulus
C = c * A # uF
for j in range(0, runs):
v = predict_voltage(A, C, delay, dt, duration, stimulus, t_end)
if max(v[:] - v_Rest) > 60:
count += 1
return count
And the attempt to run one simulation:
import time
from ex_21.equations import c, predict_voltage
area_factor = 0.1
stimulus = 70
# input signal
count = 0
t_end = 10 # in ms
delay = 0.1 # in ms
duration = 0.1 # in ms
dt = 0.0025 # in ms
# geometry
d = 2 # diameter in um
r = d / 2 # Radius in um
l = 10 # Length of the compartment in um
A = (1 * 10 ** (-8)) * area_factor # surface [cm^2]
C = c * A # uF
start = time.time()
voltage_dynamic = predict_voltage(A, C, delay, dt, duration, stimulus, t_end)
elapse = time.time() - start
print(voltage_dynamic)
Output:
[None]
Related
Obtain solutions in Gekko by slightly relaxing constraints
I have a modified version of the spring optimization Gekko model. Is there a way to slightly relax constraints so that the solver can still give me solutions even if they start falling out of the range of my constraints? I'm aware of RTOL, but is there a way of specifying tolerances for individual equations?
One way to do this is create a new variable eps that has a lower bound of zero and an upper bound that is the maximum allowable violation. This becomes the deviation of the inequality constraints that can be minimized with an importance factor (e.g. 10). eps = m.Var(lb=0,ub=0.5) m.Minimize(10*eps) Here are modified equations with eps: m.Equations([ d_coil / d_wire >= 4-eps, d_coil / d_wire <= 16+eps ]) and the full script: from gekko import GEKKO # Initialize Gekko model m = GEKKO() #Maximize force of a spring at its preload height h_0 of 1 inches #The stress at Hs (solid height) must be less than Sy to protect from damage # Constants from numpy import pi # Model Parameters delta_0 = 0.4 # inches (spring deflection) h_0 = 1.0 # inches (preload height) Q = 15e4 # psi G = 12e6 # psi S_e = 45e3 # psi S_f = 1.5 w = 0.18 # Variables # inches (wire diameter) d_wire = m.Var(value=0.07247, lb = 0.01, ub = 0.2) # inches (coil diameter) d_coil = m.Var(value=0.6775, lb = 0.0) # number of coils in the spring n_coils = m.Var(value=7.58898, lb = 0.0) # inches (free height spring exerting no force) h_f = m.Var(value=1.368117, lb = 1.0) F = m.Var() # Spring force # Intermediates S_y = m.Intermediate((0.44 * Q) / (d_wire**w)) h_s = m.Intermediate(n_coils * d_wire) k = m.Intermediate((G * d_wire**4)/(8 * d_coil**3 * n_coils)) kW = m.Intermediate((4 * d_coil - d_wire)/(4 * (d_coil-d_wire)) \ + 0.62 * d_wire/d_coil) n = m.Intermediate((8 * kW * d_coil)/(pi * d_wire**3)) tau_max = m.Intermediate((h_f - h_0 + delta_0) * k * n) tau_min = m.Intermediate((h_f - h_0) * k * n) tau_mean = m.Intermediate((tau_max + tau_min) / 2) tau_alt = m.Intermediate((tau_max - tau_min) / 2) h_def = m.Intermediate(h_0 - delta_0) tau_hs = m.Intermediate((h_f - h_s) * k * n) # Equations eps = m.Var(lb=0,ub=0.5) m.Minimize(10*eps) m.Equations([ F == k * (h_f - h_0), d_coil / d_wire >= 4-eps, d_coil / d_wire <= 16+eps, d_coil + d_wire < 0.75, h_def - h_s > 0.05, tau_alt < S_e / S_f, tau_alt + tau_mean < S_y / S_f, tau_hs < S_y / S_f ]) # Objective function m.Maximize(F) # Send to solver m.solve() # Print solution print('Maximum force: ' + str(F[0])) print('Optimal values: ') print('d_wire : ' + str(d_wire[0])) print('d_coil : ' + str(d_coil[0])) print('n_coils : ' + str(n_coils[0])) print('h_f : ' + str(h_f[0])) print('eps : ' + str(eps[0]))
Figure out parameter in ordinary differential equation when some data provided
Code: from scipy.integrate import odeint import numpy as np import matplotlib.pyplot as plt # parameters S = 0.0001 M = 30.03 K = 113.6561 Vr = 58 R = 8.3145 T = 298.15 Q = 0.000133 Vp = 0.000022 Mr = 36 Pvap = 1400 wf = 0.001 tr = 1200 mass = 40000 # define t time = 14400 t = np.arange(0, time + 1, 1) # define initial state Cv0 = (mass / Vp) * wf # Cv(0) Cr0 = (mass / Vp) * (1 - wf) Cair0 = 0 # Cair(0) # define function and solve ode def model(x, t): C = x[0] # C is Cair(t) c = x[1] # c is Cv(t) a = Q + (K * S / Vr) b = (K * S * M) / (Vr * R * T) s = (K * S * M) / (Vp * R * T) w = (1 - wf) * 1000 Peq = (c * Pvap) / (c + w * c * M / Mr) Pair = (C * R * T) / M dcdt = -s * (Peq - Pair) if t <= tr: dCdt = -a * C + b * Peq else: dCdt = -a * C return [dCdt, dcdt] x = odeint(model, [Cair0, Cv0], t) C = x[:, 0] c = x[:, 1] Now, I want to figure out wf value when I know C(0)(when t is 0) and C(tr)(when t is tr)(Therefore I know two kind of t and C(t)). I found some links(Curve Fit Parameters in Multiple ODE Function, Solving ODE with Python reversely, https://medium.com/analytics-vidhya/coronavirus-in-italy-ode-model-an-parameter-optimization-forecast-with-python-c1769cf7a511, https://kitchingroup.cheme.cmu.edu/blog/2013/02/18/Fitting-a-numerical-ODE-solution-to-data/) related to this, although I cannot get the hang of subject. Can I fine parameter wf with two data((0, C(0)), (tr, C(tr)) and ode?
First, ODE solvers assume smooth right-hand-side functions. So the if t <= tr:... statement in your code isn't going to work. Two separate integrations must be done to deal with the discontinuity. Integrate to tf, then use the solution at tf as initial conditions to integrate beyond tf for the new ODE function. But it seems like your main problem (solving for wf) only involves integrating to tf (not beyond), so we can ignore that issue when solving for wf Now, I want to figure out wf value when I know C(0)(when t is 0) and C(tr)(when t is tr)(Therefore I know two kind of t and C(t)). You can do a non-linear solve for wf: from scipy.integrate import odeint import numpy as np import matplotlib.pyplot as plt # parameters S = 0.0001 M = 30.03 K = 113.6561 Vr = 58 R = 8.3145 T = 298.15 Q = 0.000133 Vp = 0.000022 Mr = 36 Pvap = 1400 mass = 40000 # initial condition for wf wf_initial = 0.02 # define t tr = 1200 t_eval = np.array([0, tr], np.float) # define initial state. This is C(t = 0) Cv0 = (mass / Vp) * wf_initial # Cv(0) Cair0 = 0 # Cair(0) init_cond = np.array([Cair0, Cv0],np.float) # Definte the final state. This is C(t = tr) final_state = 3.94926615e-03 # define function and solve ode def model(x, t, wf): C = x[0] # C is Cair(t) c = x[1] # c is Cv(t) a = Q + (K * S / Vr) b = (K * S * M) / (Vr * R * T) s = (K * S * M) / (Vp * R * T) w = (1 - wf) * 1000 Peq = (c * Pvap) / (c + w * c * M / Mr) Pair = (C * R * T) / M dcdt = -s * (Peq - Pair) dCdt = -a * C + b * Peq return [dCdt, dcdt] # define non-linear system to solve def function(x): wf = x[0] x = odeint(model, init_cond, t_eval, args = (wf,), rtol = 1e-10, atol = 1e-10) return x[-1,0] - final_state from scipy.optimize import root sol = root(function, np.array([wf_initial]), method='lm') print(sol.success) wf_solution = sol.x[0] x = odeint(model, init_cond, t_eval, args = (wf_solution,), rtol = 1e-10, atol = 1e-10) print(wf_solution) print(x[-1]) print(final_state)
I'm having difficulties simulating physics equations in python using matplotlib & numpy
I'm working on a physics simulation where particles collide together and I need to calculate the resulting velocities of the particles after the collision. For that, the following formulas are given (I have to use these, I've found other equations online which caused me different problems): I've tried to recreate them like this: def reflect_particle_velocity(Pa, Pb, Va, Vb, Ma, Mb): Vas = (np.sqrt(np.square(Va[0]) + np.square(Va[1]))) Vbs = (np.sqrt(np.square(Vb[0]) + np.square(Vb[1]))) Prel = [np.abs(Pa[0] - Pb[0]), np.abs(Pa[1] - Pb[1])] Vrel = [np.abs(Va[0] - Vb[0]), np.abs(Va[1] - Vb[1])] dotFactor = np.dot(Vrel, Prel) / np.dot(Prel , Prel) MaFact = (2 * Mb) / (Ma + Mb) MbFact = (2 * Ma) / (Ma + Mb) NewVa = [(Vas - (MaFact * dotFactor)) * Prel[0], (Vas - (MaFact * dotFactor)) * Prel[1]] NewVb = [(Vbs + (MbFact * dotFactor)) * Prel[0], (Vbs + (MbFact * dotFactor)) * Prel[1]] return NewVa, NewVb Where Pa is the 2D position of the first particle, Pb is the 2D position of the second particle, Va is the 2D velocity of the first particle, Vb is the 2D velocity of the second particle, Pa is the 2D position of the first particle, Pb is the 2D position of the second particle, Ma is the mass of the first particle and Mb is the mass of the second particle. Using this code I get the following readings: The green line being the total energy in the system. As you can see it's increasing massively, infact, the first few values are around 200 slowly going upward. What I've noticed is that somehow my code increases the velocity too much. either that or I've created a working perpetual motion machine... What have I done wrong? EDIT: Here is the code used for generating data/displaying the energy over time: import numpy as np import matplotlib.pyplot as plt def step(pos, vel, mas, Na, Nb, R, dt, boxX, boxY): pos = step_particles_pos(pos, vel, dt) vel = step_particles_vel(vel, dt) vel = check_collisions_bbox(pos, vel, boxX, boxY, Na + Nb) vel = check_collisions_particles(pos, vel, mas, Na, Nb, R) return pos, vel def step_particles_pos(pos, vel, dt): #return np.array([pos[:, 0] + (vel[:, 0] * dt), pos[:, 1] + (vel[:, 1] * dt) - 0.5 * (9.8 * np.square(dt))]) pos[:, 0] = pos[:, 0] + (vel[:, 0] * dt) pos[:, 1] = pos[:, 1] + (vel[:, 1] * dt) - 0.5 * (9.8 * np.square(dt)) return pos def step_particles_vel(vel, dt): vel[:, 1] = vel[:, 1] - (9.8 * dt) return vel def check_collisions_bbox(pos, vel, boxX, boxY, N): posMaskNy = np.zeros(np.shape(pos)) posMaskPx = np.zeros(np.shape(pos)) posMaskPy = np.zeros(np.shape(pos)) posMaskNy = np.where(pos < 0, 1, posMaskNy) posMaskPx = np.where(pos > boxX, 1, posMaskPx) posMaskPy = np.where(pos > boxY, 1, posMaskPy) posMaskPx[:, 1] = 0 posMaskPy[:, 0] = 0 posMask = posMaskNy + posMaskPx + posMaskPy posMask = np.where(posMask != 0, -1, posMask) posMask = np.where(posMask == 0, 1, posMask) vel = vel * posMask return vel def reflect_particle_velocity(Pa, Pb, Va, Vb, Ma, Mb): Vas = (np.sqrt(np.square(Va[0]) + np.square(Va[1]))) Vbs = (np.sqrt(np.square(Vb[0]) + np.square(Vb[1]))) Prel = [np.abs(Pa[0] - Pb[0]), np.abs(Pa[1] - Pb[1])] Vrel = [np.abs(Va[0] - Vb[0]), np.abs(Va[1] - Vb[1])] dotFactor = np.dot(Vrel, Prel) / np.dot(Prel , Prel) MaFact = (2 * Mb) / (Ma + Mb) MbFact = (2 * Ma) / (Ma + Mb) NewVa = [(Vas - (MaFact * dotFactor)) * Prel[0], (Vas - (MaFact * dotFactor)) * Prel[1]] NewVb = [(Vbs + (MbFact * dotFactor)) * Prel[0], (Vbs + (MbFact * dotFactor)) * Prel[1]] return NewVa, NewVb def check_collisions_particles(pos, vel, mas, Na, Nb, R): A = pos[:, 0] B = pos[:, 1] Xdiff = np.abs(A - A[:, np.newaxis]) Ydiff = np.abs(B - B[:, np.newaxis]) diff = np.triu(np.sqrt(np.square(Xdiff) + np.square(Ydiff))) diffMask = np.where(diff == 0 , R + 1, diff) diffMask = np.where(diffMask <= R , -1, diffMask) diffMask = np.where(diffMask > R , 0, diffMask) invertIDs = np.argwhere(diffMask) for int in invertIDs: vel[int[0]], vel[int[1]] = reflect_particle_velocity(pos[int[0]], pos[int[1]], vel[int[0]], vel[int[1]], mas[int[0]], mas[int[1]]) return vel def get_kinetic_energy(vel, M, start, end): N = end - start results = np.zeros(N) factor = ( 1 / (2 * N) ) * M for i in range(0, N): results[i] = factor * np.dot(np.abs(vel[i]), np.abs(vel[i])) return results def get_potential_energy(pos, M, start, end): N = end - start results = np.zeros(N) factor = ( 1 / N ) * M * 9.8 for i in range(0, N): results[i] = factor * pos[i][1] return results def get_average(a, n): sum = 0 for i in range(0, int(n)): sum = sum + a[i] return sum / n Na = 30 # number of A particles Nb = 30 # number of B particles Ma = 0.025 # weight of A particles Mb = 0.05 # weight of B particles R = 0.04 # particle radius T = 100 # simulation time dt = 0.004 # time increments dts = 0.04 # measurement increments stept = T / dt # number of cycles in simulation steps = T / dts # number of measurements ints = stept / steps # interval between measurements samples = 0 # number of measurements taken qTimes = 10 # inverval of quiver display qStep = T / qTimes # number of steps between quiver displays boxX = 8 # width of the bounding box boxY = 16 # height of the bounding box # generate random starting positions/velocities # for the amount of particles in the simulation. posX = np.random.rand(Na + Nb) posY = np.random.rand(Na + Nb) velX = np.random.rand(Na + Nb) velY = np.random.rand(Na + Nb) MA = np.full(Na, Ma) MB = np.full(Nb, Mb) mas = np.concatenate((MA, MB), axis=0) timespace = np.linspace(0, T, int(steps)) # merge the two arrays into one containing tuples. pos = np.array((posX * boxX, posY * boxY)).T vel = np.array([velX, velY]).T KEa = np.zeros(int(steps)) KEb = np.zeros(int(steps)) PEa = np.zeros(int(steps)) PEb = np.zeros(int(steps)) TE = np.zeros(int(steps)) # cycle stept amount of times for i in range(0, int(stept)): # update all particles pos, vel = step(pos, vel, mas, Na, Nb, R, dt, boxX, boxY) if i % ints == 0: KEa[samples] = get_average(get_kinetic_energy(vel, Ma, 0, Na), Na) KEb[samples] = get_average(get_kinetic_energy(vel, Mb, Na, Na + Nb), Nb) PEa[samples] = get_average(get_potential_energy(pos, Ma, 0, Na), Na) PEb[samples] = get_average(get_potential_energy(pos, Mb, Na, Na + Nb), Nb) TE[samples] = KEa[samples] + KEb[samples] + PEa[samples] + PEb[samples] samples = samples + 1 #Energy over Time plot plt.figure(figsize=(10, 6)) plt.tick_params(axis='both', labelsize=14) plt.grid(True) plt.xlabel('Time', fontsize=16) plt.ylabel('Energy', fontsize=16) plt.plot(timespace, KEa, color="royalblue") plt.plot(timespace, KEb, color="navy") plt.plot(timespace, PEa, color="gold") plt.plot(timespace, PEb, color="yellow") plt.plot(timespace, TE, color="green") plt.show()
finite difference methods in python
I am trying to calculate g(x_(i+2)) from the value g(x_(i+1)) and g(x_i), i is an integer, assuming I(x) and s(x) are Gaussian function. If we know x_i = 100, then the summation from 0 to 100, I don't know how to handle g(x_i) with the subscript in python, knowing the first and second value, we can find the third value, after n cycle, we can find the nth value. Equation: code: import numpy as np from matplotlib import pyplot as p from math import pi def f_s(x, mu_s, sig_s): ss = -np.power(x - mu_s, 2) / (2 * np.power(sig_s, 2)) return np.exp(ss) / (np.power(2 * pi, 2) * sig_s) def f_i(x, mu_i, sig_i): ii = -np.power(x - mu_i, 2) / (2 * np.power(sig_i, 2)) return np.exp(ii) / (np.power(2 * pi, 2) * sig_i) # problems occur in this part def g(x, m, mu_s, sig_s, mu_i, sig_i): for i in range(1, m): # specify the number x, x_1, x_2, x_3 ......X_m h = (x[i + 1] - x[i]) / e for n in range(0, x[i]): # calculate summation sum_f = (f_i(x[i], mu_i, sig_i) - f_s(x[i] - n, mu_s, sig_s) * g_x[n]) * np.conj(f_s(n + x[i], mu_s, sig_s)) g_x[1] = 1 # initial value g_x[2] = 5 g_x[i + 2] = h * sum_f + 2 * g_x[i + 1] - g_x[i] return g_x[i + 2] x = np.linspace(-10, 10, 10000) e = 1 d = 0.01 m = 1000 mu_s = 2 sig_s = 1 mu_i = 1 sig_i = 1 p.plot(x, g(x, m, mu_s, sig_s, mu_i, sig_i)) p.legend() p.show() result: I(x) and s(x)
Why Won't This Python Code match the Formula for a European Call Option?
import math import numpy as np S0 = 100.; K = 100.; T = 1.0; r = 0.05; sigma = 0.2 M = 100; dt = T / M; I = 500000 S = np.zeros((M + 1, I)) S[0] = S0 for t in range(1, M + 1): z = np.random.standard_normal(I) S[t] = S[t - 1] * np.exp((r - 0.5 * sigma ** 2) * dt + sigma * math.sqrt(dt) * z) C0 = math.exp(-r * T) * np.sum(np.maximum(S[-1] - K, 0)) / I print ("European Option Value is ", C0) It gives a value of around 10.45 as you increase the number of simulations, but using the B-S formula the value should be around 10.09. Anybody know why the code isn't giving a number closer to the formula?