Lotka Volterra with Runge Kutta not desired precision - python

Population over time (should be the same height at every peak
I've programmed a code to simulate a mice and fox population using Runge-Kutta 4th order.
But the result is not as wanted to be.. each peak should nearly be at same height
I don't think that it is a problem of step size..
Do you have an idea?
import matplotlib.pyplot as plt
import numpy as np
#function definition
def mice(f_0, m_0):
km = 2. #birthrate mice
kmf = 0.02 #deathrate mice
return km*m_0 - kmf*m_0*f_0
def fox(m_0, f_0):
kf = 1.06 #deathrate foxes
kfm = 0.01 #birthrate foxes
return kfm*m_0*f_0 -kf*f_0
def Runge_kutta4( h, f, xn, yn):
k1 = h*f(xn, yn)
k2 = h*f(xn+h/2, yn + k1/2)
k3 = h*f(xn+h/2, yn + k2/2)
k4 = h*f(xn+h, yn + k3)
return yn + k1/6 + k2/3 + k3/3 + k4/6
h = 0.01
f = 15.
m = 100.
f_list = [f]
m_list = [m]
for i in range(10000):
fox_new = Runge_kutta4(h, fox, m, f)
mice_new = Runge_kutta4(h, mice, f, m)
f_list.append(fox_new)
m_list.append(mice_new)
f = fox_new
m = mice_new
time = np.linspace(0,100,10001)
#Faceplot LV
fig = plt.figure(figsize=(10,10))
fig.suptitle("Runge Kutta 4")
plt.grid()
plt.xlabel('Mice', fontsize = 10)
plt.ylabel('Foxes', fontsize = 10)
plt.plot(m_list, f_list, '-')
plt.axis('equal')
plt.show()
fig.savefig("Faceplott_Runge_Kutta4.jpg", dpi=fig.dpi)
fig1 = plt.figure(figsize=(12,10))
fig1.suptitle("Runge Kutta 4")
plt.grid()
plt.xlabel('Time [d]', fontsize = 10)
plt.ylabel('Populationsize', fontsize = 10)
plt.plot(time, m_list , label='mice')
plt.plot(time, f_list , label='fox')
plt.legend()
plt.show()
fig1.savefig("Fox_Miceplot_Runge_Kutta4.jpg", dpi=fig.dpi)

In the Runge-Kutta implementation, xn is the time variable and yn the scalar state variable. f is the scalar ODE function for the scalar ODE y'(x)=f(x,y(x)). However, this is not how you apply the RK4 procedure, your ODE functions are autonomous, contain no time variable but instead of it two coupled state variables. As implemented, the result should be a convoluted first order method of no specific type.
You need to solve the coupled system as a coupled system, that is, the stages for both variables have to be calculated simultaneously with the same increments.
kf1 = h*fox(mn, fn)
km1 = h*mice(fn, mn)
kf2 = h*fox(mn+0.5*km1, fn+0.5*kf1)
km2 = h*mice(fn+0.5*kf1, mn+0.5*km1)
kf3 = h*fox(mn+0.5*km2, fn+0.5*kf2)
km3 = h*mice(fn+0.5*kf2, mn+0.5*km2)
kf4 = h*fox(mn+km3, fn+kf3)
km4 = h*mice(fn+kf3, mn+km3)
etc.
See also Runge Kutta problems in JS for the same problem in JavaScript
The other way is to vectorize the system so that the Runge-Kutta procedure can remain the same, but in the integration loop the state vector has to be constructed and unpacked,
def VL(x,y): f, m = y; return np.array([fox(m,f), mice(f,m)])
y = np.array([f,m])
time = np.arange(x0,xf+0.1*h,h)
for x in time[1:]:
y = Runge_kutta4(h, VL, x, y)
f, m = y
f_list.append(f)
m_list.append(m)
everything else remaining the same.

Related

scipy curve_fi returns initial parameters estimates

I am triyng to use scipy curve_fit function to fit a gaussian function to my data to estimate a theoretical power spectrum density. While doing so, the curve_fit function always return the initial parameters (p0=[1,1,1]) , thus telling me that the fitting didn't work.
I don't know where the issue is. I am using python 3.9 (spyder 5.1.5) from the anaconda distribution on windows 11.
here a Wetransfer link to the data file
https://wetransfer.com/downloads/6097ebe81ee0c29ee95a497128c1c2e420220704110130/86bf2d
Here is my code below. Can someone tell me what the issue is, and how can i solve it?
on the picture of the plot, the blue plot is my experimental PSD and the orange one is the result of the fit.
import numpy as np
import math
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
import scipy.constants as cst
File = np.loadtxt('test5.dat')
X = File[:, 1]
Y = File[:, 2]
f_sample = 50000
time=[]
for i in range(1,len(X)+1):
t=i*(1/f_sample)
time= np.append(time,t)
N = X.shape[0] # number of observation
N1=int(N/2)
delta_t = time[2] - time[1]
T_mes = N * delta_t
freq = np.arange(1/T_mes, (N+1)/T_mes, 1/T_mes)
freq=freq[0:N1]
fNyq = f_sample/2 # Nyquist frequency
nb = 350
freq_block = []
# discrete fourier tansform
X_ft = delta_t*np.fft.fft(X, n=N)
X_ft=X_ft[0:N1]
plt.figure()
plt.plot(time, X)
plt.xlabel('t [s]')
plt.ylabel('x [micro m]')
# Experimental power spectrum on both raw and blocked data
PSD_X_exp = (np.abs(X_ft)**2/T_mes)
PSD_X_exp_b = []
STD_PSD_X_exp_b = []
for i in range(0, N1+2, nb):
freq_b = np.array(freq[i:i+nb]) # i-nb:i
psd_b = np.array(PSD_X_exp[i:i+nb])
freq_block = np.append(freq_block, (1/nb)*np.sum(freq_b))
PSD_X_exp_b = np.append(PSD_X_exp_b, (1/nb)*np.sum(psd_b))
STD_PSD_X_exp_b = np.append(STD_PSD_X_exp_b, PSD_X_exp_b/np.sqrt(nb))
plt.figure()
plt.loglog(freq, PSD_X_exp)
plt.legend(['Raw Experimental PSD'])
plt.xlabel('f [Hz]')
plt.ylabel('PSD')
plt.figure()
plt.loglog(freq_block, PSD_X_exp_b)
plt.legend(['Experimental PSD after blocking'])
plt.xlabel('f [Hz]')
plt.ylabel('PSD')
kB = cst.k # Boltzmann constant [m^2kg/s^2K]
T = 273.15 + 25 # Temperature [K]
r = (2.8 / 2) * 1e-6 # Particle radius [m]
v = 0.00002414 * 10 ** (247.8 / (-140 + T)) # Water viscosity [Pa*s]
gamma = np.pi * 6 * r * v # [m*Pa*s]
Do = kB*T/gamma # expected value for D
f3db_o = 50000 # expected value for f3db
fc_o = 300 # expected value pour fc
n = np.arange(-10,11)
def theo_spectrum_lorentzian_filter(x, D_, fc_, f3db_):
PSD_theo=[]
for i in range(0,len(x)):
# print(i)
psd_theo=np.sum((((D_*Do)/2*math.pi**2)/((fc_*fc_o)**2+(x[i]+n*f_sample)
** 2))*(1/(1+((x[i]+n*f_sample)/(f3db_*f3db_o))**2)))
PSD_theo= np.append(PSD_theo,psd_theo)
return PSD_theo
popt, pcov = curve_fit(theo_spectrum_lorentzian_filter, freq_block, PSD_X_exp_b, p0=[1, 1, 1], sigma=STD_PSD_X_exp_b, absolute_sigma=True, check_finite=True,bounds=(0.1, 10), method='trf', jac=None)
D_, fc_, f3db_ = popt
D1 = D_*Do
fc1 = fc_*fc_o
f3db1 = f3db_*f3db_o
print('Diffusion constant D = ', D1, ' Corner frequency fc= ',fc1, 'f3db(diode,eff)= ', f3db1)
I believe I've successfully fitted your data. Here's the approach I took.
First, I plotted your model (with popt=[1, 1, 1]) and the data you had. I noticed your data was significantly lower than the model. Then I started fiddling with the parameters. I wanted to push the model upwards. I did that by multiplying popt[0] by increasingly large values. I ended up with 1E13 as a ballpark value. Note that I have no idea if this is physically possible for your model. Then I jury-rigged your fitting function to multiply D_ by 1E13 and ran your code. I got this fit:
So I believe it's a problem of 1) inappropriate starting values and 2) inappropriate bounds. In your position, I would revise this model, check if there's any problems with units and so on.
Here's what I used to try to fit your model:
plt.figure()
plt.loglog(freq_block[:170], PSD_X_exp_b[:170], label='Exp')
plt.loglog(freq_block[:170],
theo_spectrum_lorentzian_filter(
freq_block[:170],
1E13*popt[0], popt[1], popt[2]),
label='model'
)
plt.xlabel('f [Hz]')
plt.ylabel('PSD')
plt.legend()
I limited the data to point 170 because there were some weird backwards values that made me uncomfortable. I would recheck them if I were you.
Here's the model code I used. I didn't change the curve_fit call (except to limit x to :170.
def theo_spectrum_lorentzian_filter(x, D_, fc_, f3db_):
PSD_theo=[]
D_ = 1E13*D_ # I only changed here
for i in range(0,len(x)):
psd_theo=np.sum((((D_*Do)/2*math.pi**2)/((fc_*fc_o)**2+(x[i]+n*f_sample)
** 2))*(1/(1+((x[i]+n*f_sample)/(f3db_*f3db_o))**2)))
PSD_theo= np.append(PSD_theo,psd_theo)
return PSD_theo

Solving the energy of the harmonic oscillator using solve_ivp

I had been given the harmonic oscillator equation in the form of : y'' = -w^2 * y with initial conditions y(0) = 1 and y'(0) = 0 and w = 2*pi and I had to compare the analytical solution with the exact solution in the form of y(t) = cos(wt).
So the first part went smoothly and I saw that my result using solve_ivp gives me the exact same curve as the exact solution.
But then I had to compare the evolution of energy for RK45, RK23, and DOP853.
The energy is written : E = 0.5 * k * y^2 + 0.5 * v^2 with k = w^2 and v the velocity v = y'
I was expecting to get a straigh tline, since my harmonic oscillator isn't damped but I got a decreasing curve for each integrations, and I do not know why. If anyone has any idea, I post my code Here. Thanks in advance for the help !
import numpy as np
from scipy.integrate import solve_ivp
import matplotlib.pyplot as plt
#Exercice 1
#We first write the initial conditions
omega = (2*np.pi)
omega_sq = omega**2
y_0 = [1., 0.]
t = np.linspace(0, 4 ,100) #Time interval between 0 and 4 as asked in the problem
def dY_dt (t, y): #Definition of the system
solution = [y[1], - omega_sq *y[0]]
return solution
result = solve_ivp(dY_dt, (0, 100),y0 = y_0, method = 'RK45', t_eval = t)
z = np.cos(omega*t) #Exact solution, for comparison
plt.plot(t, z, 'r', lw = 5) #Plot of the exact solution
plt.plot(t,result.y[0], 'b') #Plot of the analytical solution
#Exercice 2
def Energie(Y): #Energy
k = omega_sq
E = 0.5*(result.y[1])**2 + 0.5*k*(result.y[0])**2
return E
E = Energie(result)
#Legendes
plt.ylabel("Position")
plt.xlabel("Time in seconds")
plt.title('Comparison between analytical and exact')
plt.legend(['Solution exacte', 'Solution solve_ivp'], loc='lower left')
plt.figure()
plt.plot(t, E)
plt.show

Implementation of piecewised function in python with numerical ODE solution. TypeError: float() argument must be a string or a number, not 'OdeResult'

I have a reaction system whose mechanism is defined in the form of a nonlinear first-order ODE as:
dx(t)/dt = - gaf(1 - rho)(1 - exp(-kt))(x(t)^2 + bx(t))/(cx(t) + p)
where, g, a, f, b, c, rho, k and p are all constants.
I’m trying to compile a code in python to obtain the time trajectory of the variable x(t) from time t1 to time tf where the kinetic range of the profile is always preceded by a waiting period of 2 min before the reaction starts with constant x0 value in that period starting a time t0 = 0. My aim is to export the whole profile to excel (as .xlsx file) in regular spaced intervals of 2.5 secs as shown in the link to the figure below.
Overall Trajectory Profile - Plotted Curve
I wish the system be defined in the form of a piecewised function, but when I run the code below in python it gives me the error: "float() argument must be a string or a number, not 'OdeResult'". I’m stuck here! Any suggestions on how to remedy this problem or alternatives?
import numpy as np
from scipy.integrate import solve_ivp
import matplotlib.pyplot as plt
f = 1
rho = 0.1
r = 0.5
a = 0.08
x0 = 2
b = x0*(1/r - 1)
k = 1.0e-2
g = 0.138
b = 2
c = 1.11
p = 2.174
tau = (1/k)*np.log(f/(f-rho)) # Inhibition time (induction period)
t0 = 0
ti = 120
t1 = ti + tau
tf = 1000
delta_t = 2.5
t_end = tf + delta_t
dt = 1.0e-5
t_eval=np.arange(t0, t_end, dt)
def ode(t, x):
return -g*a*f*(1-rho)*(1-np.exp(-k*t))*(x**2 + b*x)/(c*x + p)
sol = solve_ivp(ode, [t0, t_end], [x0], t_eval=t_eval, method='RK45')
plt.figure(figsize = (12, 4))
plt.subplot(121)
curve, = plt.plot(sol.t, sol.y[0])
plt.xlabel("t")
plt.ylabel("x(t)")
plt.show()
t = np.arange(t0, tf, delta_t)
def x(t):
if(t0 <= t <= ti): return x0 # Waiting period
elif(ti < t < t1): return x0 # Lag period associated to 'tau' (inhibition period)
else: return sol # Dynamic range (problem here!!). TypeError: float() argument must be a string or a number, not 'OdeResult'
y = []
for i in range(len(t)):
y.append(x(t[i]))
plt.plot(t, y, c='red', ls='', ms=5, marker='.')
ax = plt.gca()
ax.set_ylim([0, x0])
plt.show()
Replace t_eval=t_eval with dense_output=True. Then the returned sol object contains an interpolation function sol so that you could complete your x function as
return sol.sol(t)[0]
You do not need an else branch if the branch before is concluded with a return statement.
You might want to include the derivative zero on the constant segment inside the ODE function, so that the x(t) function is not needed. Then you would get the desired values also direct with the instant evaluation using t_eval as done originally.

How to calculate "relative error in the sum of squares" and "relative error in the approximate solution" from least squares method?

I have implemented a 3D gaussian fit using scipy.optimize.leastsq and now I would like to tweak the arguments ftol and xtol to optimize the performances. However, I don't understand the "units" of these two parameters in order to make a proper choice. Is it possible to calculate these two parameters from the results? That would give me an understanding of how to choose them. My data is numpy arrays of np.uint8. I tried to read the FORTRAN source code of MINIPACK but my FORTRAN knowledge is zero. I also read checked the Levenberg-Marquardt algorithm, but I could not really get a number that was below the ftol for example.
Here is a minimal example of what I do:
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import leastsq
class gaussian_model:
def __init__(self):
self.prev_iter_model = None
self.f_vals = []
def gaussian_1D(self, coeffs, xx):
A, sigma, mu = coeffs
# Center rotation around peak center
x0 = xx - mu
model = A*np.exp(-(x0**2)/(2*(sigma**2)))
return model
def residuals(self, coeffs, I_obs, xx, model_func):
model = model_func(coeffs, xx)
residuals = I_obs - model
if self.prev_iter_model is not None:
self.f = np.sum(((model-self.prev_iter_model)/model)**2)
self.f_vals.append(self.f)
self.prev_iter_model = model
return residuals
# x data
x_start = 1
x_stop = 10
num = 100
xx, dx = np.linspace(x_start, x_stop, num, retstep=True)
# Simulated data with some noise
A, s_x, mu = 10, 0.5, 3
coeffs = [A, s_x, mu]
model = gaussian_model()
yy = model.gaussian_1D(coeffs, xx)
noise_ampl = 0.5
noise = np.random.normal(0, noise_ampl, size=num)
yy += noise
# LM Least squares
initial_guess = [1, 1, 1]
pred_coeffs, cov_x, info, mesg, ier = leastsq(model.residuals, initial_guess,
args=(yy, xx, model.gaussian_1D),
ftol=1E-6, full_output=True)
yy_fit = model.gaussian_1D(pred_coeffs, xx)
rel_SSD = np.sum(((yy-yy_fit)/yy)**2)
RMS_SSD = np.sqrt(rel_SSD/num)
print(RMS_SSD)
print(model.f)
print(model.f_vals)
fig, ax = plt.subplots(1,2)
# Plot results
ax[0].scatter(xx, yy)
ax[0].plot(xx, yy_fit, c='r')
ax[1].scatter(range(len(model.f_vals)), model.f_vals, c='r')
# ax[1].set_ylim(0, 1E-6)
plt.show()
rel_SSD is around 1 and definitely not something below ftol = 1E-6.
EDIT: Based on #user12750353 answer below I updated my minimal example to try to recreate how lmdif determines termination with ftol. The problem is that my f_vals are too small, so they are not the right values. The reason I would like to recreate this is that I would like to see what kind of numbers I am getting on my main code to decide on a ftol that would terminate the fitting process earlier.
Since you are giving a function without the gradient, the method called is lmdif. Instead of gradients it will use forward difference gradient estimate, f(x + delta) - f(x) ~ delta * df(x)/dx (I will write as if the parameter).
There you find the following description
c ftol is a nonnegative input variable. termination
c occurs when both the actual and predicted relative
c reductions in the sum of squares are at most ftol.
c therefore, ftol measures the relative error desired
c in the sum of squares.
c
c xtol is a nonnegative input variable. termination
c occurs when the relative error between two consecutive
c iterates is at most xtol. therefore, xtol measures the
c relative error desired in the approximate solution.
Looking in the code the actual reduction acred = 1 - (fnorm1/fnorm)**2 is what you calculated for rel_SSD, but between the two last iterations, not between the fitted function and the target points.
Example
The problem here is that we need to discover what are the values assumed by the internal variables. An attempt to do so is to save the coefficients and the residual norm every time the function is called as follows.
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import leastsq
class gaussian_model:
def __init__(self):
self.prev_iter_model = None
self.fnorm = []
self.x = []
def gaussian_1D(self, coeffs, xx):
A, sigma, mu = coeffs
# Center rotation around peak center
x0 = xx - mu
model = A*np.exp(-(x0**2)/(2*(sigma**2)))
grad = np.array([
model / A,
model * x0**2 / (sigma**3),
model * 2 * x0 / (2*(sigma**2))
]).transpose();
return model, grad
def residuals(self, coeffs, I_obs, xx, model_func):
model, grad = model_func(coeffs, xx)
residuals = I_obs - model
self.x.append(np.copy(coeffs));
self.fnorm.append(np.sqrt(np.sum(residuals**2)))
return residuals
def grad(self, coeffs, I_obs, xx, model_func):
model, grad = model_func(coeffs, xx)
residuals = I_obs - model
return -grad
def plot_progress(self):
x = np.array(self.x)
dx = np.sqrt(np.sum(np.diff(x, axis=0)**2, axis=1))
plt.plot(dx / np.sqrt(np.sum(x[1:, :]**2, axis=1)))
fnorm = np.array(self.fnorm)
plt.plot(1 - (fnorm[1:]/fnorm[:-1])**2)
plt.legend(['$||\Delta f||$', '$||\Delta x||$'], loc='upper left');
# x data
x_start = 1
x_stop = 10
num = 100
xx, dx = np.linspace(x_start, x_stop, num, retstep=True)
# Simulated data with some noise
A, s_x, mu = 10, 0.5, 3
coeffs = [A, s_x, mu]
model = gaussian_model()
yy, _ = model.gaussian_1D(coeffs, xx)
noise_ampl = 0.5
noise = np.random.normal(0, noise_ampl, size=num)
yy += noise
Then we can see the relative variation of $x$ and $f$
initial_guess = [1, 1, 1]
pred_coeffs, cov_x, info, mesg, ier = leastsq(model.residuals, initial_guess,
args=(yy, xx, model.gaussian_1D),
xtol=1e-6,
ftol=1e-6, full_output=True)
plt.figure(figsize=(14, 6))
plt.subplot(121)
model.plot_progress()
plt.yscale('log')
plt.grid()
plt.subplot(122)
yy_fit,_ = model.gaussian_1D(pred_coeffs, xx)
# Plot results
plt.scatter(xx, yy)
plt.plot(xx, yy_fit, c='r')
plt.show()
The problem with this is that the function is evaluated both to compute f and to compute the gradient of f. To produce a cleaner plot what can be done is to implement pass Dfun so that it evaluate func only once per iteration.
# x data
x_start = 1
x_stop = 10
num = 100
xx, dx = np.linspace(x_start, x_stop, num, retstep=True)
# Simulated data with some noise
A, s_x, mu = 10, 0.5, 3
coeffs = [A, s_x, mu]
model = gaussian_model()
yy, _ = model.gaussian_1D(coeffs, xx)
noise_ampl = 0.5
noise = np.random.normal(0, noise_ampl, size=num)
yy += noise
# LM Least squares
initial_guess = [1, 1, 1]
pred_coeffs, cov_x, info, mesg, ier = leastsq(model.residuals, initial_guess,
args=(yy, xx, model.gaussian_1D),
Dfun=model.grad,
xtol=1e-6,
ftol=1e-6, full_output=True)
plt.figure(figsize=(14, 6))
plt.subplot(121)
model.plot_progress()
plt.yscale('log')
plt.grid()
plt.subplot(122)
yy_fit,_ = model.gaussian_1D(pred_coeffs, xx)
# Plot results
plt.scatter(xx, yy)
plt.plot(xx, yy_fit, c='r')
plt.show()
Well, the value I am obtaining for xtol is not exactly what is in the lmdif implementation.

Runge Kutta constants diverging for Lorenz system?

I'm trying to solve the Lorenz system using the 4th order Runge Kutta method, where
dx/dt=a*(y-x)
dy/dt=x(b-z)-y
dx/dt=x*y-c*z
Since this system doesn't depend explicity on time, it's possibly to ignore that part in the iteration, so I just have
dX=F(x,y,z)
def func(x0):
a=10
b=38.63
c=8/3
fx=a*(x0[1]-x0[0])
fy=x0[0]*(b-x0[2])-x0[1]
fz=x0[0]*x0[1]-c*x0[2]
return np.array([fx,fy,fz])
def kcontants(f,h,x0):
k0=h*f(x0)
k1=h*f(f(x0)+k0/2)
k2=h*f(f(x0)+k1/2)
k3=h*f(f(x0)+k2)
#note returned K is a matrix
return np.array([k0,k1,k2,k3])
x0=np.array([-8,8,27])
h=0.001
t=np.arange(0,50,h)
result=np.zeros([len(t),3])
for time in range(len(t)):
if time==0:
k=kcontants(func,h,x0)
result[time]=func(x0)+(1/6)*(k[0]+2*k[1]+2*k[2]+k[3])
else:
k=kcontants(func,h,result[time-1])
result[time]=result[time-1]+(1/6)*(k[0]+2*k[1]+2*k[2]+k[3])
The result should be the Lorenz atractors, however my code diverges around the fifth iteration, and it's because the contants I create in kconstants do, however I checked and I'm pretty sure the runge kutta impletmentation is not to fault... (at least i think)
edit:
Found a similar post ,yet can't figure what I'm doing wrong
You have an extra call of f(x0) in the calculation of k1, k2 and k3. Change the function kcontants to
def kcontants(f,h,x0):
k0=h*f(x0)
k1=h*f(x0 + k0/2)
k2=h*f(x0 + k1/2)
k3=h*f(x0 + k2)
#note returned K is a matrix
return np.array([k0,k1,k2,k3])
Have you looked at different initial values for your calculation? Do the ones you've chosen make sense? I.e. are they physical? From past experience with rk you can sometimes get very confusing results if you pick silly starting parameters.
Goodnight. This and version I made using the scipy edo integrator, scipy.integrate.odeint.
# Author : Carlos Eduardo da Silva Lima
# Theme : Movement of a Plant around a fixed star
# Language : Python
# date : 11/19/2022
# Environment : Google Colab
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint
from scipy.optimize import root
from scipy.linalg import eig
from mpl_toolkits.mplot3d import Axes3D
##################################
# Condições inicial e parãmetros #
##################################
t_inicial = 0
t_final = 100
N = 10000
h = 1e-3
x_0 = 1.0
y_0 = 1.0
z_0 = 1.0
#####################
# Equação de Lorenz #
#####################
def Lorenz(r,t,sigma,rho,beta):
x = r[0]; y = r[1]; z = r[2]
edo1 = sigma*(y-x)
edo2 = x*(rho-z)-y
edo3 = x*y-beta*z
return np.array([edo1,edo2,edo3])
t = np.linspace(t_inicial,t_final,N)
r_0 = np.array([x_0,y_0,z_0])
#sol = odeint(Lorenz,r_0,t,rtol=1e-6,args = (10,28,8/3))
sol = odeint(Lorenz, r_0, t, args=(10,28,8/3), Dfun=None, col_deriv=0, full_output=0, ml=None, mu=None, rtol=1e-9, atol=1e-9, tcrit=None, h0=0.0, hmax=0.0, hmin=0.0, ixpr=0, mxstep=0, mxhnil=0, mxordn=12, mxords=5, printmessg=0, tfirst=False)
'''x = sol[:,0]
y = sol[:,1]
z = sol[:,2]'''
x, y, z = sol.T
# Plot
plt.style.use('dark_background')
ax = plt.figure(figsize = (10,10)).add_subplot(projection='3d')
ax.plot(x,y,z,'m-',lw=0.5, linewidth = 1.5)
ax.set_xlabel("X")
ax.set_ylabel("Y")
ax.set_zlabel("Z")
ax.set_title("Atrator de Lorenz")
plt.show()
In this second part, I simulate two Lorenz systems to verify the sensitive dependencies of the systems to the initial conditions. In the second system, I add a certain amount of eps = 1e-3 to the initial conditions of x(t0), y(t0) and z(t0).
# Depedência com as condições iniciais
eps = 1e-3
r_0_eps = np.array([x_0+eps,y_0+eps,z_0+eps])
sol_eps = odeint(Lorenz, r_0_eps, t, args=(10,28,8/3), Dfun=None, col_deriv=0, full_output=0, ml=None, mu=None,
rtol=1e-9, atol=1e-9, tcrit=None, h0=0.0, hmax=0.0, hmin=0.0, ixpr=0, mxstep=0, mxhnil=0, mxordn=12, mxords=5, printmessg=0, tfirst=False)
'''x_eps = sol_eps[:,0]
y_eps = sol_eps[:,1]
z_eps = sol_eps[:,2]'''
x_eps, y_eps, z_eps = sol_eps.T
# Plot
plt.style.use('dark_background')
ax = plt.figure(figsize = (10,10)).add_subplot(projection='3d')
ax.plot(x,y,z,'r-',lw=1.5)
ax.plot(x_eps,y_eps,z_eps,'b-.',lw=1.1)
ax.set_xlabel("X")
ax.set_ylabel("Y")
ax.set_zlabel("Z")
ax.set_title("Lorenz Attractor")
plt.show()
Hope I helped, see you :).

Categories