I have data collected in CSV of every output of the friction model. the model imagines the contact between to surfaces as one dimensional bristles that react to being bent like springs this deflection. the force of friction is model as:
FL(V,Z) = sig0*Z +sig1*DZ/Dt +sig2*V
where V is the Velocity of the Surface Z is the deflection of the Bristles And DZ/Dt is the rate of deflection and is equal to:
DZ/Dt = V + abs(V)*Z/(Fc + (Fs-Fc)*exp(-(V^2/Vs^2))
= V + abs(V)*Z/G(V)
= V + H(V)*Z
Where Fc is the friction of the object in motion(constant), Fs is equal to the Force required to get the object into motion (a constant > Fc) and Vs is the total speed required to transition between the domains(a constant I've experimentally derived). the velocity and position of the block are provided in the CSV as well as the force of friction all with respect to time. I have also created an easily integrable approximation of the Velocity as a function of time (trigonometric).
On to the problem: the code throws a fit with the way I'm trying to pass lists in to the functions (I think).
The function the passes the parameters SEEMS to work (taken from a different file that simply plots the data the) however I've tried to numerically integrate the DZ/Dt and fit the sig parameters to the imported Friction data.
What I imported
import numpy as np
from scipy.integrate import odeint
import matplotlib.pyplot as plt
from scipy import optimize
import pylab as pp
from math import sin, pi, exp, fabs, pow
Parameters
Fc=2.7 #N
Fs=8.2 #N
Vs=.34 #mm/s
Initial_conditions
ITime=Time[0]
Iz=[0,0,0]
Building the friction model
def velocity(time):
V=-13/2*1/60*pi*sin(1/60*pi*time+pi)
return V
def g(v,vs,fc,fs,sig0):
G=(1/sig0)*(fc+(fs-fc)*exp(-pow(v,2)/pow(vs,2)))
return G
def h(v,vg):
H=fabs(v)/vg
return H
def findz(z, time, sig):
Vx=velocity(time)
VG=g(Vx,Vs,Fc,Fs,sig)
HVx=h(Vx,VG)
dzdt=Vx+HVx*z
return dzdt
def friction(time,sig,iz):
dz=lambda z,time: findz(z,time,sig)
z=odeint(dz,iz,time)
return sig[0]*z+sig[1]*findz(z,time,sig[0])+sig[2]*velocity(Time)
Should return the difference between the Constructed function and the data and
yield a list containing the optimized parameters
def residual(sig):
return Ff-friction(Time,sig,Iz)
SigG=[4,20,1]
SigVal=optimize.leastsq(residual,SigG)
print "parameter values are ",SigVal
This returns
line 56, in velocity
V=-13/2*1/60*pi*sin(1/60*pi*time+pi)
TypeError: can't multiply sequence by non-int of type 'float'
Is this to do with the fact that I am passing lists?
As I mentioned in my comment, Velocity() ist the cause of the error that is most probably due to the fact that it uses a time value, whereas you pass a whole list/ array (with multiple values) to Velocity() when you call it in friction().
Using some chosen values and after shortening you code and passing ITime instead of Time the code runs correctly but it is left to you to judge if this is analytically what you wanted to achieve. Below is my code:
import numpy as np
from scipy import optimize
from scipy.integrate import odeint
from math import sin, pi, exp, fabs
# Parameters
Fc = 2.7 #N
Fs = 8.2 #N
Vs = .34 #mm/s
# define test values for Ff and Time
Ff = np.array([100, 50, 50])
Time = np.array([10, 20, 30])
# Initial_conditions
ITime = Time[0]
Iz = np.array([0, 0, 0])
# Building the friction model
V = lambda t: (-13 / 2) * ( 1 / (60 * pi * sin(1 / 60 * pi * t + pi)))
G = lambda v, vs, fc, fs, sig0: (1 / sig0) * (fc + (fs - fc) * exp(-v**2 / vs**2))
H = lambda v, vg: fabs(v) / vg
dzdt = lambda z, t, sig: V(t) + H(V(t), G(V(t), Vs, Fc, Fs, sig)) * z
def friction(t, sig, iz):
dz = lambda z, t: dzdt(z, t, sig)
z = odeint(dz, iz, t)
return sig[0]*z + sig[1]*dzdt(z, t, sig[0]) + sig[2]*V(t)
# Should return the difference between the Constructed function and the data
# and yield a list containing the optimized parameters
def residual(sig):
return Ff-friction(ITime, sig, Iz)[0]
SigG = np.array([4, 20, 1])
SigVal = optimize.leastsq(residual, SigG, full_output = False)
print("parameter values are ", SigVal )
Output:
parameter values are (array([ 4. , 3251.47271228, -2284.82881887]), 1)
Related
I am computing a solution to the free basis expansion of the dirac equation for electron-positron pairproduction. For this i need to solve a system of equations that looks like this:
Equation for pairproduction, from Mocken at al.
EDIT: This has been solved by passing y0 as complex type into the solver. As is stated in this issue: https://github.com/scipy/scipy/issues/8453 I would definitely consider this a bug but it seems like it has gone under the rock for at least 4 years
for this i am using SciPy's solve_ivp integrator in the following way:
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from scipy.integrate import solve_ivp
import scipy.constants as constants
#Impulse
px, py = 0 , 0
#physics constants
e = constants.e
m = constants.m_e # electronmass
c = constants.c
hbar = constants.hbar
#relativistic energy
E = np.sqrt(m**2 *c**4 + (px**2+py**2) * c**2) # E_p
#adiabatic parameter
xi = 1
#Parameter of the system
w = 0.840 #frequency in 1/m_e
N = 8 # amount of amplitudes in window
T = 2* np.pi/w
#unit system
c = 1
hbar = 1
m = 1
#strength of electric field
E_0 = xi*m*c*w/e
print(E_0)
#vectorpotential
A = lambda t,F: -E_0/w *np.sin(t)*F
def linearFenster2(t):
conditions = [t <=0, (t/w>=0) and (t/w <= T/2), (t/w >= T/2) and (t/w<=T*(N+1/2)), (t/w>=T*(N+1/2)) and (t/w<=T*(N+1)), t/w>=T*(N+1)]
funcs = [lambda t: 0, lambda t: 1/np.pi *t, lambda t: 1, lambda t: 1-w/np.pi * (t/w-T*(N+1/2)), lambda t: 0]
return np.piecewise(t,conditions,funcs)
#Coefficient functions
nu = lambda t: -1j/hbar *e*A(w*t,linearFenster2(w*t)) *np.exp(2*1j/hbar * E*t) *(px*py*c**2 /(E*(E+m*c**2)) + 1j*(1- c**2 *py**2/(E*(E+m*c**2))))
kappa = lambda t: 1j*e*A(t,linearFenster2(w*t))* c*py/(E * hbar)
#System to solve
def System(t, y, nu, kappa):
df = kappa(t) *y[0] + nu(t) * y[1]
dg = -np.conjugate(nu(t)) * y[0] + np.conjugate(kappa(t))*y[1]
return np.array([df,dg], dtype=np.cdouble)
def solver(tmin, tmax,teval=None,f0=0,g0=1):
'''solves the system.
#tmin: starttime
#tmax: endtime
#f0: starting percentage of already present electrons of positive energy usually 0
#g0: starting percentage of already present electrons of negative energy, usually 1, therefore full vaccuum
'''
y0=[f0,g0]
tspan = np.array([tmin, tmax])
koeff = np.array([nu,kappa])
sol = solve_ivp(System,tspan,y0,t_eval= teval,args=koeff)
return sol
#Plotting of windowfunction
amount = 10**2
t = np.arange(0, T*(N+1), 1/amount)
vlinearFenster2 = np.array([linearFenster2(w*a) for a in t ], dtype = float)
fig3, ax3 = plt.subplots(1,1,figsize=[24,8])
ax3.plot(t,E_0/w * vlinearFenster2)
ax3.plot(t,A(w*t,vlinearFenster2))
ax3.plot(t,-E_0 /w * vlinearFenster2)
ax3.xaxis.set_minor_locator(ticker.AutoMinorLocator())
ax3.set_xlabel("t in s")
ax3.grid(which = 'both')
plt.show()
sol = solver(0, 70,teval = t)
ts= sol.t
f=sol.y[0]
fsquared = 2* np.absolute(f)**2
plt.plot(ts,fsquared)
plt.show()
The plot for the window function looks like this (and is correct)
window function
however the plot for the solution looks like this:
Plot of pairproduction probability
This is not correct based on the papers graphs (and further testing using mathematica instead).
When running the line 'sol = solver(..)' it says:
\numpy\core\_asarray.py:102: ComplexWarning: Casting complex values to real discards the imaginary part
return array(a, dtype, copy=False, order=order)
I simply do not know why solve_ivp discard the imaginary part. Its absolutely necessary.
Can someone enlighten me who knows more or sees the mistake?
According to the documentation, the y0 passed to solve_ivp must be of type complex in order for the integration to be over the complex domain. A robust way of ensuring this is to add the following to your code:
def solver(tmin, tmax,teval=None,f0=0,g0=1):
'''solves the system.
#tmin: starttime
#tmax: endtime
#f0: starting percentage of already present electrons of positive energy usually 0
#g0: starting percentage of already present electrons of negative energy, usually 1, therefore full vaccuum
'''
f0 = complex(f0) # <-- added
g0 = complex(g0) # <-- added
y0=[f0,g0]
tspan = np.array([tmin, tmax])
koeff = np.array([nu,kappa])
sol = solve_ivp(System,tspan,y0,t_eval= teval,args=koeff)
return sol
I tried the above, and it indeed made the warning disappear. However, the result of the integration seems to be the same regardless.
Here is my code.
import numpy as np
from scipy.integrate import odeint
#Constant
R0=1.475
gamma=2.
ScaleMeVfm3toEskm3 = 8.92*np.power(10.,-7.)
def EOSe(p):
return np.power((p/450.785),(1./gamma))
def M(m,r):
return (4./3.)*np.pi*np.power(r,3.)*p
# function that returns dz/dt
def model(z,r):
p, m = z
dpdr = -((R0*EOSe(p)*m)/(np.power(r,2.)))*(1+(p/EOSe(p)))*(1+((4*math.pi*(np.power(r,3))*p)/(m)))*((1-((2*R0)*m)/(r))**(-1.))
dmdr = 4.*math.pi*(r**2.)*EOSe(p)
dzdr = [dpdr,dmdr]
return dzdr
# initial condition
r0=10.**-12.
p0=10**-6.
z0 = [p0, M(r0, p0)]
# radius
r = np.linspace(r0, 15, 100000)
# solve ODE
z = odeint(model,z0,r)
The result of z[:,0] keeps decreasing as I expected. But what I want is only positive values. One may run the code and try print(z[69306]) and it will show [2.89636405e-11 5.46983202e-01]. That is the last point I want the odeint to stop integration.
Of course, the provided code shows
RuntimeWarning: invalid value encountered in power
return np.power((p/450.785),(1./gamma))
because the result of p starts being negative. For any further points, the odeint yields the result [nan nan].
However, I could use np.nanmin() to find the minimum of z[:,0] that is not nan. But I have a set of p0 values for my work. I will need to call odeint in a loop like
P=np.linspace(10**-8.,10**-2.,10000)
for p0 in P:
#the code for solving ode provided above.
which takes more time.
I think it would reduce a time for execution if I can just stop at before z[:,0] going to be negative a value?
Here is the modified code using solve_ivp:
import numpy as np
from scipy.integrate import solve_ivp
import matplotlib.pylab as plt
# Constants
R0 = 1.475
gamma = 2.
def EOSe(p):
return np.power(np.abs(p)/450.785, 1./gamma)
def M(m, r):
return (4./3.)*np.pi*np.power(r,3.)*p
# function that returns dz/dt
# note: the argument order is reversed compared to `odeint`
def model(r, z):
p, m = z
dpdr = -R0*EOSe(p)*m/r**2*(1 + p/EOSe(p))*(1 + 4*np.pi*r**3*p/m)*(1 - 2*R0*m/r)**(-1)
dmdr = 4*np.pi * r**2 * EOSe(p)
dzdr = [dpdr, dmdr]
return dzdr
# initial condition
r0 = 1e-3
r_max = 50
p0 = 1e-6
z0 = [p0, M(r0, p0)]
# Define the event function
# from the doc: "The solver will find an accurate value
# of t at which event(t, y(t)) = 0 using a root-finding algorithm. "
def stop_condition(r, z):
return z[0]
stop_condition.terminal = True
# solve ODE
r_span = (r0, r_max)
sol = solve_ivp(model, r_span, z0,
events=stop_condition)
print(sol.message)
print('last p, m = ', sol.y[:, -1], 'for r_event=', sol.t_events[0][0])
r_sol = sol.t
p_sol = sol.y[0, :]
m_sol = sol.y[1, :]
# Graph
plt.subplot(2, 1, 1);
plt.plot(r_sol, p_sol, '.-b')
plt.xlabel('r'); plt.ylabel('p');
plt.subplot(2, 1, 2);
plt.plot(r_sol, m_sol, '.-r')
plt.xlabel('r'); plt.ylabel('m');
Actually, using events in this case do not prevent a warning because of negative p. The reason is that the solver is going to evaluate the model for p<O anyway. A solution is to take the absolute value of p in the square root (as in the code above). Using np.sign(p)*np.power(np.abs(p)/450.785, 1./gamma) gives interesting result too.
I have written this code to model the motion of a spring pendulum
import numpy as np
from scipy.integrate import odeint
from numpy import sin, cos, pi, array
import matplotlib.pyplot as plt
def deriv(z, t):
x, y, dxdt, dydt = z
dx2dt2=(0.415+x)*(dydt)**2-50/1.006*x+9.81*cos(y)
dy2dt2=(-9.81*1.006*sin(y)-2*(dxdt)*(dydt))/(0.415+x)
return np.array([x,y, dx2dt2, dy2dt2])
init = array([0,pi/18,0,0])
time = np.linspace(0.0,10.0,1000)
sol = odeint(deriv,init,time)
def plot(h,t):
n,u,x,y=h
n=(0.4+x)*sin(y)
u=(0.4+x)*cos(y)
return np.array([n,u,x,y])
init2 = array([0.069459271,0.393923101,0,pi/18])
time2 = np.linspace(0.0,10.0,1000)
sol2 = odeint(plot,init2,time2)
plt.xlabel("x")
plt.ylabel("y")
plt.plot(sol2[:,0], sol2[:, 1], label = 'hi')
plt.legend()
plt.show()
where x and y are two variables, and I'm trying to convert x and y to the polar coordinates n (x-axis) and u (y-axis) and then graph n and u on a graph where n is on the x-axis and u is on the y-axis. However, when I graph the code above it gives me:
Instead, I should be getting an image somewhat similar to this:
The first part of the code - from "def deriv(z,t): to sol:odeint(deriv..." is where the values of x and y are generated, and using that I can then turn them into rectangular coordinates and graph them. How do I change my code to do this? I'm new to Python, so I might not understand some of the terminology. Thank you!
The first solution should give you the expected result, but there is a mistake in the implementation of the ode.
The function you pass to odeint should return an array containing the solutions of a 1st-order differential equations system.
In your case what you are solving is
While instead you should be solving
In order to do so change your code to this
import numpy as np
from scipy.integrate import odeint
from numpy import sin, cos, pi, array
import matplotlib.pyplot as plt
def deriv(z, t):
x, y, dxdt, dydt = z
dx2dt2 = (0.415 + x) * (dydt)**2 - 50 / 1.006 * x + 9.81 * cos(y)
dy2dt2 = (-9.81 * 1.006 * sin(y) - 2 * (dxdt) * (dydt)) / (0.415 + x)
return np.array([dxdt, dydt, dx2dt2, dy2dt2])
init = array([0, pi / 18, 0, 0])
time = np.linspace(0.0, 10.0, 1000)
sol = odeint(deriv, init, time)
plt.plot(sol[:, 0], sol[:, 1], label='hi')
plt.show()
The second part of the code looks like you are trying to do a change of coordinate.
I'm not sure why you try to solve the ode again instead of just doing this.
x = sol[:,0]
y = sol[:,1]
def plot(h):
x, y = h
n = (0.4 + x) * sin(y)
u = (0.4 + x) * cos(y)
return np.array([n, u])
n,u = plot( (x,y))
As of now, what you are doing there is solving this system:
Which leads to x=e^t and y=e^t and n' = (0.4 + e^t) * sin(e^t) u' = (0.4 + e^t) * cos(e^t).
Without going too much into the details, with some intuition you could see that this will lead to an attractor as the derivative of n and u will start to switch sign faster and with greater magnitude at an exponential rate, leading to n and u collapsing onto an attractor as shown by your plot.
If you are actually trying to solve another differential equation I would need to see it in order to help you further
This is what happen if you do the transformation and set the time to 1000:
I have a system of ODEs that depend on a matrix of data. Each ODE should reference a different column of data in its evaluation.
import numpy as np
n_eqns = 20
coeffs = np.random.normal(0, 1, (n_eqns, 20))
def dfdt(_, f, idx):
return (f ** 2) * coeffs[idx, :].sum() - 2 * f * coeffs.sum()
from scipy.integrate import ode
f0 = np.random.uniform(-1, 1, n_eqns)
t0 = 0
tf = 1
dt = 0.001
r = ode(dfdt)
r.set_initial_value(f0, t0).set_f_params(range(n_eqns))
while r.successful() and r.t < tf:
print(r.t+dt, r.integrate(r.t+dt))
How can I specify that each ODE should use the idx value associated with its index in the system of ODEs? The first equation should be passed idx=0, the second idx=1, and so on.
The function dfdt takes and returns the state and derivative, respectively as arrays (or other iterables). Thus, all you have to do is to loop over all indices and apply your operations accordingly. For example:
def dfdt(t,f):
output = np.empty_like(f)
for i,entry in enumerate(f)
output[i] = f[i]**2 * coeffs[i,:].sum() - 2*f[i]*coeffs.sum()
return output
You can also write this using NumPy’s component-wise operations (which is quicker):
def dfdt(t,f):
return f**2 * coeffs.sum(axis=1) - 2*f*coeffs.sum()
Finally note that using f for your state may be somewhat confusing since this is how ode denotes the derivative (which you call dfdt).
I am having some trouble translating my MATLAB code into Python via Scipy & Numpy. I am stuck on how to find optimal parameter values (k0 and k1) for my system of ODEs to fit to my ten observed data points. I currently have an initial guess for k0 and k1. In MATLAB, I can using something called 'fminsearch' which is a function that takes the system of ODEs, the observed data points, and the initial values of the system of ODEs. It will then calculate a new pair of parameters k0 and k1 that will fit the observed data. I have included my code to see if you can help me implement some kind of 'fminsearch' to find the optimal parameter values k0 and k1 that will fit my data. I want to add whatever code to do this to my lsqtest.py file.
I have three .py files - ode.py, lsq.py, and lsqtest.py
ode.py:
def f(y, t, k):
return (-k[0]*y[0],
k[0]*y[0]-k[1]*y[1],
k[1]*y[1])
lsq.py:
import pylab as py
import numpy as np
from scipy import integrate
from scipy import optimize
import ode
def lsq(teta,y0,data):
#INPUT teta, the unknowns k0,k1
# data, observed
# y0 initial values needed by the ODE
#OUTPUT lsq value
t = np.linspace(0,9,10)
y_obs = data #data points
k = [0,0]
k[0] = teta[0]
k[1] = teta[1]
#call the ODE solver to get the states:
r = integrate.odeint(ode.f,y0,t,args=(k,))
#the ODE system in ode.py
#at each row (time point), y_cal has
#the values of the components [A,B,C]
y_cal = r[:,1] #separate the measured B
#compute the expression to be minimized:
return sum((y_obs-y_cal)**2)
lsqtest.py:
import pylab as py
import numpy as np
from scipy import integrate
from scipy import optimize
import lsq
if __name__ == '__main__':
teta = [0.2,0.3] #guess for parameter values k0 and k1
y0 = [1,0,0] #initial conditions for system
y = [0.000,0.416,0.489,0.595,0.506,0.493,0.458,0.394,0.335,0.309] #observed data points
data = y
resid = lsq.lsq(teta,y0,data)
print resid
For these kind of fitting tasks you could use the package lmfit. The outcome of the fit would look like this; as you can see, the data are reproduced very well:
For now, I fixed the initial concentrations, you could also set them as variables if you like (just remove the vary=False in the code below). The parameters you obtain are:
[[Variables]]
x10: 5 (fixed)
x20: 0 (fixed)
x30: 0 (fixed)
k0: 0.12183301 +/- 0.005909 (4.85%) (init= 0.2)
k1: 0.77583946 +/- 0.026639 (3.43%) (init= 0.3)
[[Correlations]] (unreported correlations are < 0.100)
C(k0, k1) = 0.809
The code that reproduces the plot looks like this (some explanation can be found in the inline comments):
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint
from lmfit import minimize, Parameters, Parameter, report_fit
from scipy.integrate import odeint
def f(y, t, paras):
"""
Your system of differential equations
"""
x1 = y[0]
x2 = y[1]
x3 = y[2]
try:
k0 = paras['k0'].value
k1 = paras['k1'].value
except KeyError:
k0, k1 = paras
# the model equations
f0 = -k0 * x1
f1 = k0 * x1 - k1 * x2
f2 = k1 * x2
return [f0, f1, f2]
def g(t, x0, paras):
"""
Solution to the ODE x'(t) = f(t,x,k) with initial condition x(0) = x0
"""
x = odeint(f, x0, t, args=(paras,))
return x
def residual(paras, t, data):
"""
compute the residual between actual data and fitted data
"""
x0 = paras['x10'].value, paras['x20'].value, paras['x30'].value
model = g(t, x0, paras)
# you only have data for one of your variables
x2_model = model[:, 1]
return (x2_model - data).ravel()
# initial conditions
x10 = 5.
x20 = 0
x30 = 0
y0 = [x10, x20, x30]
# measured data
t_measured = np.linspace(0, 9, 10)
x2_measured = np.array([0.000, 0.416, 0.489, 0.595, 0.506, 0.493, 0.458, 0.394, 0.335, 0.309])
plt.figure()
plt.scatter(t_measured, x2_measured, marker='o', color='b', label='measured data', s=75)
# set parameters including bounds; you can also fix parameters (use vary=False)
params = Parameters()
params.add('x10', value=x10, vary=False)
params.add('x20', value=x20, vary=False)
params.add('x30', value=x30, vary=False)
params.add('k0', value=0.2, min=0.0001, max=2.)
params.add('k1', value=0.3, min=0.0001, max=2.)
# fit model
result = minimize(residual, params, args=(t_measured, x2_measured), method='leastsq') # leastsq nelder
# check results of the fit
data_fitted = g(np.linspace(0., 9., 100), y0, result.params)
# plot fitted data
plt.plot(np.linspace(0., 9., 100), data_fitted[:, 1], '-', linewidth=2, color='red', label='fitted data')
plt.legend()
plt.xlim([0, max(t_measured)])
plt.ylim([0, 1.1 * max(data_fitted[:, 1])])
# display fitted statistics
report_fit(result)
plt.show()
If you have data for additional variables, you can simply update the function residual.
The following worked for me:
import pylab as pp
import numpy as np
from scipy import integrate, interpolate
from scipy import optimize
##initialize the data
x_data = np.linspace(0,9,10)
y_data = np.array([0.000,0.416,0.489,0.595,0.506,0.493,0.458,0.394,0.335,0.309])
def f(y, t, k):
"""define the ODE system in terms of
dependent variable y,
independent variable t, and
optinal parmaeters, in this case a single variable k """
return (-k[0]*y[0],
k[0]*y[0]-k[1]*y[1],
k[1]*y[1])
def my_ls_func(x,teta):
"""definition of function for LS fit
x gives evaluation points,
teta is an array of parameters to be varied for fit"""
# create an alias to f which passes the optional params
f2 = lambda y,t: f(y, t, teta)
# calculate ode solution, retuen values for each entry of "x"
r = integrate.odeint(f2,y0,x)
#in this case, we only need one of the dependent variable values
return r[:,1]
def f_resid(p):
""" function to pass to optimize.leastsq
The routine will square and sum the values returned by
this function"""
return y_data-my_ls_func(x_data,p)
#solve the system - the solution is in variable c
guess = [0.2,0.3] #initial guess for params
y0 = [1,0,0] #inital conditions for ODEs
(c,kvg) = optimize.leastsq(f_resid, guess) #get params
print "parameter values are ",c
# fit ODE results to interpolating spline just for fun
xeval=np.linspace(min(x_data), max(x_data),30)
gls = interpolate.UnivariateSpline(xeval, my_ls_func(xeval,c), k=3, s=0)
#pick a few more points for a very smooth curve, then plot
# data and curve fit
xeval=np.linspace(min(x_data), max(x_data),200)
#Plot of the data as red dots and fit as blue line
pp.plot(x_data, y_data,'.r',xeval,gls(xeval),'-b')
pp.xlabel('xlabel',{"fontsize":16})
pp.ylabel("ylabel",{"fontsize":16})
pp.legend(('data','fit'),loc=0)
pp.show()
Look at the scipy.optimize module. The minimize function looks fairly similar to fminsearch, and I believe that both basically use a simplex algorithm for optimization.
# cleaned up a bit to get my head around it - thanks for sharing
import pylab as pp
import numpy as np
from scipy import integrate, optimize
class Parameterize_ODE():
def __init__(self):
self.X = np.linspace(0,9,10)
self.y = np.array([0.000,0.416,0.489,0.595,0.506,0.493,0.458,0.394,0.335,0.309])
self.y0 = [1,0,0] # inital conditions ODEs
def ode(self, y, X, p):
return (-p[0]*y[0],
p[0]*y[0]-p[1]*y[1],
p[1]*y[1])
def model(self, X, p):
return integrate.odeint(self.ode, self.y0, X, args=(p,))
def f_resid(self, p):
return self.y - self.model(self.X, p)[:,1]
def optim(self, p_quess):
return optimize.leastsq(self.f_resid, p_guess) # fit params
po = Parameterize_ODE(); p_guess = [0.2, 0.3]
c, kvg = po.optim(p_guess)
# --- show ---
print "parameter values are ", c, kvg
x = np.linspace(min(po.X), max(po.X), 2000)
pp.plot(po.X, po.y,'.r',x, po.model(x, c)[:,1],'-b')
pp.xlabel('X',{"fontsize":16}); pp.ylabel("y",{"fontsize":16}); pp.legend(('data','fit'),loc=0); pp.show()