I'm writing a script which plots the bifurcation diagram of a damped pendulum with a small direct forcing.
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint
epsi = 0.01
# Declare the model
f_dir = np.arange(0,1.3,0.01)
A_s = np.zeros(len(f_dir))
i = 0
for f in f_dir:
def myModel(y, t):
dy0 = y[1]
dy1 = -epsi*y[1]-np.sin(y[0]) - f*np.cos((1.01)*t)*np.cos(y[0])
return [dy0, dy1]
time = np.arange(0.0, 2000,0.01)
yinit = np.array([np.pi/2, 0])
y = odeint(myModel, yinit, time)
A_s.insert(i,np.abs(np.max(y[-600:-1,0])- np.min(y[-600:-1,0])))
i += 1
plt.plot(f_dir,A_s,'*')
plt.xlabel(r'$f_s$')
plt.ylabel(r'$A_s$')
plt.hold
plt.show()
The problem is that I am not inserting anything into A_s, and I do not know why because the variable i is increased at each step of the loop.
It's a little hard to follow your code, but this is probably closer to what you want. You only need to define your model once, even if f is a variable argument: you can pass such arguments to odeint in the args tuple and they get handed on to the model function.
Also note that NumPy arrays don't have an insert method.
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint
epsi = 0.01
# Declare the model
f_dir = np.arange(0,1.3,0.01)
A_s = np.zeros(len(f_dir))
def myModel(y, t, f):
dy0 = y[1]
dy1 = -epsi*y[1]-np.sin(y[0]) - f*np.cos((1.01)*t)*np.cos(y[0])
return [dy0, dy1]
i = 0
for f in f_dir:
time = np.arange(0.0, 2000,0.01)
yinit = np.array([np.pi/2, 0])
y = odeint(myModel, yinit, time, args=(f,))
A_s[i] = np.abs(np.max(y[-600:-1,0])- np.min(y[-600:-1,0]))
i += 1
plt.plot(f_dir,A_s,'*')
plt.xlabel(r'$f_s$')
plt.ylabel(r'$A_s$')
plt.hold
plt.show()
You defined the myModel function, but it's not actually being called anywhere - it's just referenced from within the function itself.
Related
I would like to fit z = f(x,y) using an objective function.
I plan to fit more parameters later on, and lmfit sounded a nice abstraction to try.
For the sake of testing I created a controlled data set. The data is an array of coordinate X, coordinate Y, Vector X, Vector Y
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib import gridspec
from scipy.optimize import leastsq
from lmfit import Parameters, fit_report, minimize
#Creat sample
xs = 10
ys = 10
s = 11
coo_x = np.linspace(-xs, xs,s)
coo_y = np.linspace(-ys, ys,s)
#Get all permutations of X,Ycoordinates
mesh = np.array(np.meshgrid(coo_x, coo_y))
coo = mesh.T.reshape(-1, 2)
header = ["W_DesignPosX","W_DesignPosY","W_Registration_X_A","W_Registration_Y_A"]
transX = 3
transY = 0
angle = 0
magX = 0
magY = 0
orthX = 0
trans = np.linspace((transX,transY),(transX,transY),s*s)
rot = np.flip(coo, axis=1)*np.array ([-angle,angle])
mag = np.array([magX,magY])
orth = np.flip(coo, axis=1)*orthX/2
np.random.seed(seed=30)
random = np.random.normal(0,0.1, (s*s,2))
#random = np.zeros((s*s,2))
#Compute data
test= np.concatenate((coo, trans+coo*mag+rot+orth+random), axis=1)
test_df = pd.DataFrame(data=test, columns=header)
In the test case above TransX = 3, all the other input are = 0
Running the minimize it should fit to the following A=3, B=0, C=0, but all end at 0 :(
def residual_x(param, x, y, data):
A=params['A']
B=params['B']
C=params['C']
model = A + B*x +C*y
return (model-data)
params = Parameters()
params.add('A', value=0.0)
params.add('B', value=0.0)
params.add('C', value=0.0)
x,y =test[:,:2].T
reg_x = test[:,2]
out = minimize(residual_x,params, args = (x,y,reg_x))
print(fit_report(out))
print()
print(out.params.pretty_print())
I did eyeball the array and the quiver chart. The data has a horizontal vector.
def vector_summary(df,Design_x,Design_y,Reg_x,Reg_y,s=1):
c = 'g'
fig = plt.figure(figsize=(8, 4))
grid = plt.GridSpec(2, 3,width_ratios=[1.5, 0.25,1])
#Vector map
###########
ax_q = fig.add_subplot(grid[:,0])
X = list(df[Design_x])
Y = list(df[Design_y])
U = list(df[Reg_x])
V = list(df[Reg_y])
ax_q.quiver(X,Y,U,V,scale=0.04/s,color=c)
ax_q.set_title("Vector map",fontsize=20)
ax_q.set_xlabel('W_DesignPosX')
ax_q.set_ylabel('W_DesignPosY')
#ax_q.set_ylim([-20000,20000])
#X_registration
###############
ax_x= fig.add_subplot(grid[0,2])
sns.histplot(df, x=Reg_x,ax=ax_x,color=c)
ax_x.set_title("Reg_X",fontsize=20)
#Y_registration
###############
ax_y= fig.add_subplot(grid[1,2])
sns.histplot(df, x=Reg_y,ax=ax_y,color=c)
ax_y.set_title("Reg_Y",fontsize=20)
plt.tight_layout()
plt.show()
vector_summary(test_df,'W_DesignPosX','W_DesignPosY','W_Registration_X_A','W_Registration_Y_A',0.0005)
I am not a computer scientist and only have some instinct that my issue lies in the objective function. but I cannot point my finger on the issue.
Any advises would be appreciated! I am eager to learn. It is about the journey right ;-)
You have a simple typo in your residuals function
def residual_x(param, x, y, data):
needs to be params and not param
def residual_x(params, x, y, data):
Hence instead of accessing the updated params from residuals (that exists in local namespace), it was just looking at your original params (that existed in global name space). There was no error raised because the minimizer doesn't check if the special keyword 'params' is passed or not, instead Python goes from local namespace to global namespace outside residuals and minimizer, and of course, that variable doesn't change.
Later when you were trying to access params you would get the original one you have created.
I'm trying to plot the output from an ODE using a Kronecker delta function which should only become 'active' at a specific time = t1.
This should give a sawtooth like response where the initial value decays down exponentially until t=t1 where it rises again instantly before decaying down once again.
However, when I plot this it looks like the solver is seeing the Kronecker delta function as zero for all time t. Is there anyway to do this in Python?
from scipy import KroneckerDelta
import scipy.integrate as sp
import matplotlib.pyplot as plt
import numpy as np
def dy_dt(y,t):
dy_dt = 500*KroneckerDelta(t,t1) - 2y
return dy_dt
t1 = 4
y0 = 500
t = np.arrange(0,10,0.1)
y = sp.odeint(dy_dt,y0,t)
plt.plot(t,y)
In the case of a simple Kronecker delta using time, you can run the ode in pieces like so:
from scipy.integrate import odeint
import matplotlib.pyplot as plt
import numpy as np
def dy_dt(y,t):
return -2*y
t_delta = 4
tend = 10
y0 = [500]
t1 = np.linspace(0,t_delta,50)
y1 = odeint(dy_dt,y0,t1)
y0 = y1[-1] + 500 # execute Kronecker delta
t2 = np.linspace(t_delta,tend,50)
y2 = odeint(dy_dt,y0,t2)
t = np.append(t1, t2)
y = np.append(y1, y2)
plt.plot(t,y)
Another option for complicated situations is to the events functionality of solve_ivp.
I think the problem could be internal rounding errors, because 0.1 cannot be represented exactly as a python float. I would try
import math
def dy_dt(y,t):
if math.isclose(t, t1):
return 500 - 2*y
else:
return -2y
Also the documentation of odeint suggests using the args parameter instead of global variables to give your derivative function access to additional arguments and replacing np.arange by np.linspace:
import scipy.integrate as sp
import matplotlib.pyplot as plt
import numpy as np
import math
def dy_dt(y, t, t1):
if math.isclose(t, t1):
return 500 - 2*y
else:
return -2*y
t1 = 4
y0 = 500
t = np.linspace(0, 10, num=101)
y = sp.odeint(dy_dt, y0, t, args=(t1,))
plt.plot(t, y)
I did not test the code so tell me if there is anything wrong with it.
EDIT:
When testing my code I took a look at the t values for which dy_dt is evaluated. I noticed that odeint does not only use the t values that where specified, but alters them slightly:
...
3.6636447422787928
3.743098503914526
3.822552265550259
3.902006027185992
3.991829287543431
4.08165254790087
4.171475808258308
...
Now using my method, we get
math.isclose(3.991829287543431, 4) # False
because the default tolerance is set to a relative error of at most 10^(-9), so the odeint function "misses" the bump of the derivative at 4. Luckily, we can fix that by specifying a higher error threshold:
def dy_dt(y, t, t1):
if math.isclose(t, t1, abs_tol=0.01):
return 500 - 2*y
else:
return -2*y
Now dy_dt is very high for all values between 3.99 and 4.01. It is possible to make this range smaller if the num argument of linspace is increased.
TL;DR
Your problem is not a problem of python but a problem of numerically solving an differential equation: You need to alter your derivative for an interval of sufficient length, otherwise the solver will likely miss the interesting spot. A kronecker delta does not work with numeric approaches to solving ODEs.
I'm trying to find a numerical solution and eventually graph, the Gyllenberg-Webb model (cancer cell growth model). This model looks like:
Where β is the reproduction rate of proliferating cells, µp is the death rate of proliferating cells, µq is the death rate of quiescent cells, and r0 and ri are functions (transition rates) of N(t). Also N(t) = P(t)+Q(t).
For my purposes here I defined r_0(N) = bN and r_i(N) = aN to make things more simple.
My problem is when I try and plot my solution with pyplot I get
ValueError: x and y must have same first dimension
which I guess is self-explanatory, but I'm not sure how to go about fixing it without breaking everything else.
My code, which I've done only for the first equation so far, is:
import numpy as np
import matplotlib.pyplot as plt
import scipy.integrate
def fun(P,t, params):
beta, mp,b,N,Q = params
return(beta-mp-(b*N))*P+(a*N)*Q
params = (0.5,0.6,0.7,0.8,0.9)
tvec = np.arange(0,6,0.1)
s1 = scipy.integrate.odeint(
fun,
y0 = 1,
t = tvec,
args = (params,))
#print(s1)
plt.plot(fun,tvec)
In the end you will want to solve the coupled system. This is not complicated, just make the state object a vector and return the derivatives in the correct order.
def fun(state,t, params):
P, Q = state
beta, mp, mq, a, b = params
N = P+Q
r0N, riN = b*N, a*N
return [ (beta-mp-r0N)*P + riN*Q, r0N*P - (riN+mq)*Q ]
params = (0.5,0.6,0.7,0.8,0.9)
tsol = np.arange(0,6,0.1)
sol = odeint( fun, y0 = [ 1, 0], t = tsol, args = (params,))
Psol, Qsol = sol.T; plt.plot(tsol, Psol, tsol, Qsol)
You are currently plotting fun vs. tvec. What you actually want is to plot tvec vs s1. You will also have to define the parameter a in fun; I set it to 1 in the code below:
import numpy as np
import matplotlib.pyplot as plt
import scipy.integrate
def fun(P, t, params):
beta, mp, b, N, Q = params
return (beta-mp-(b*N))*P + (1.0 * N)*Q
params = (0.5, 0.6, 0.7, 0.8, 0.9)
tvec = np.arange(0, 6, 0.1)
s1 = scipy.integrate.odeint(
fun,
y0=1.,
t=tvec,
args=(params,))
plt.plot(tvec, s1)
plt.show()
This will plot:
for MCMC I use emcee package this tutorial. Instead of the equation of thispart which is fractional and so easy I use this form, I mean I use matrix form(not its code) and wrote the following code.
for more explanation of my code:
def new_calculation(n) is the equation for each component of matrix and def log_likelihood(theta,hh): is the mentioned matrix.
the problem is, I need args to use in soln = minimize(nll, initial, args=(hh)) and def log_probability(theta,hh):
I use hh as args but the Python says the hh is not defined. the problem may be for definition of arguments and function. I do not know how to fix it.
import numpy as np
import emcee
import matplotlib.pyplot as plt
from math import *
import numpy as np
from scipy.integrate import quad
from scipy.integrate import odeint
xx=np.array([0.01,0.012,0.014,0.016])
yy=np.array([32.95388698,33.87900347,33.84214074,34.11856704])
Cov=[[137,168],[28155,-2217]]
rc=0.09
c=0.7
H01 = 70
O_m1 = 0.28
z0=0
M1=10
np.random.seed(123)
def ant(z,O_m,O_D):
return 1/sqrt(((1+z)**2)*(1+O_m*z))
def new_calculation(n):
O_D=1-O_m-(1/(2*rc*yyn))
q=quad(ant,0,xx[n],args=(O_m,O_D))[0]
h=log10((1+xx[n])*q)
fn=(yy[n]-M-h)
return fn
def log_likelihood(theta,hh):
H0, O_m,M= theta
f_list = []
for i in range(2): # the value '2' reflects matrix size
f_list.append(new_calculation(i))
rdag=[f_list]
rmat=[[f] for f in f_list]
hh=np.linalg.det(np.dot(rdag,Cov),rmat)*0.000001
return hh
from scipy.optimize import minimize
np.random.seed(42)
nll = lambda *args: -log_likelihood(*args)
initial = np.array([H01, O_m1,M1]) + 0.1*np.random.randn(3)
soln = minimize(nll, initial, args=(hh))
H0_ml, O_m0_ml = soln.x
def log_prior(theta):
H0, O_D = theta
if 65 < H0 < 75 and 0.22 < O_m < 0.32 and 0 < M < 12:
return 0.0
return -np.inf
def log_probability(theta, mm,zz,hh):
lp = log_prior(theta)
if not np.isfinite(lp):
return -np.inf
return lp + log_likelihood(theta, mm,zz,hh)
y0=H0
pos = soln.x + 1e-4*np.random.randn(200, 3)
nwalkers, ndim = pos.shape
sampler = emcee.EnsembleSampler(nwalkers, ndim, log_probability, args=(rdag, rmat))
sampler.run_mcmc(pos, 500);
fig = plt.figure(2,figsize=(10, 10))
fig.clf()
for j in range(ndim):
ax = fig.add_subplot(ndim,1,j+1)
ax.plot(np.array([sampler.chain[:,i,j] for i in range(nwalkers)]),"k", alpha = 0.3)
ax.set_ylabel(("H0", "O_m")[j], fontsize = 15)
plt.xlabel('Steps', fontsize = 15)
fig.show()
I appreciate your help and your attention.
I'm trying to model Chau's Circuit in Python using matplotlib and scipy, which involves solving a system of ordinary differential equations.
This has been done in matlab, and I simply wanted to attempt the problem in python. The matlab code linked is a little confusing; the code on the left doesn't appear to have much relevance to solving the system of ode's that describe Chua's Circuit (page 3, equations (2)(3) and (4)), whilst the code on the right goes beyond that to modelling the circuit component by component.
I'm not familiar with scipy's odeint function so I used some of the examples from the scipy cookbook for guidance.
Can anyone help me troubleshoot my system; why do I get a graph looking like this:
As opposed to one looking like this?
My code is attached below:
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint
def fV_1(V_1, G_a, G_b, V_b):
if V_1 < -V_b:
fV_1 = G_b*V_1+(G_b-G_a)*V_b
elif -V_b <= V_1 and V_1 <=V_b:
fV_1 = G_a*V_1
elif V_1 > V_b:
fV_1 = G_b*V_1+(G_a-G_b)*V_b
else:
print "Error!"
return fV_1
def ChuaDerivatives(state,t):
#unpack the state vector
V_1 = state[0]
V_2 = state[1]
I_3 = state[2]
#definition of constant parameters
L = 0.018 #H, or 18 mH
C_1 = 0.00000001 #F, or 10 nF
C_2 = 0.0000001 #F, or 100 nF
G_a = -0.000757576 #S, or -757.576 uS
G_b = -0.000409091 #S, or -409.091 uS
V_b = 1 #V (E)
G = 0.000550 #S, or 550 uS VARIABLE
#compute state derivatives
dV_1dt = (G/C_1)*(V_2-V_1)-(1/C_1)*fV_1(V_1, G_a, G_b, V_b)
dV_2dt = -(G/C_2)*(V_2-V_1)+(1/C_2)*I_3
dI_3dt = -(1/L)*V_2
#return state derivatives
return dV_1dt, dV_2dt, dI_3dt
#set up time series
state0 = [0.1, 0.1, 0.0001]
t = np.arange(0.0, 53.0, 0.1)
#populate state information
state = odeint(ChuaDerivatives, state0, t)
# do some fancy 3D plotting
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot(state[:,0],state[:,1],state[:,2])
ax.set_xlabel('V_1')
ax.set_ylabel('V_2')
ax.set_zlabel('I_3')
plt.show()
So I managed to work it out for myself after some fiddling; I was interpreting the odeint function wrong; more careful reading of the docstring and starting from scratch to stop me following a difficult method solved it. Code below:
import numpy as np
import scipy.integrate as integrate
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
#define universal variables
c0 = 15.6
c1 = 1.0
c2 = 28.0
m0 = -1.143
m1 = -0.714
#just a little extra, quite unimportant
def f(x):
f = m1*x+(m0-m1)/2.0*(abs(x+1.0)-abs(x-1.0))
return f
#the actual function calculating
def dH_dt(H, t=0):
return np.array([c0*(H[1]-H[0]-f(H[0])),
c1*(H[0]-H[1]+H[2]),
-c2*H[1]])
#computational time steps
t = np.linspace(0, 30, 1000)
#x, y, and z initial conditions
H0 = [0.7, 0.0, 0.0]
H, infodict = integrate.odeint(dH_dt, H0, t, full_output=True)
print infodict['message']
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot(H[:,0], H[:,1], H[:,2])
plt.show()
Which gives me this: