python Converting and solving of stiff ODE system - python

I have stiff system of differential equations given to the first-order ODE. This system is written in Maple. The default method used by Maple is the Rosenbrock method. Now my task is to solve these equations with python tools.
1) I do not know how to write the equations in the python code.
2) I do not know how to solve the equations with numpy, scipy, matplotlib or PyDSTool. For the library PyDSTool I did not find any examples at all, although I read that it is well suited for stiff systems.
Code:
import numpy
import scipy
import matplotlib
varepsilon = pow(10, -2); j = -2.5*pow(10, -2); e = 3.0; tau = 0.3; delta = 2.0
u0 = -math.sqrt(-1 + math.sqrt(varepsilon ** 2 + 12) / varepsilon) * math.sqrt(2) / 6
u = -math.sqrt(-1 + math.sqrt(varepsilon ** 2 + 12) / varepsilon) * math.sqrt(2) * (1 + delta) / 6
v = 1 / (1 - 2 / e) * math.sqrt(j ** 2 + (1 - 2 / e) * (e ** 2 * u ** 2 + 1))
y8 = lambda y1,y5,y7: 1 / (1 - 2 / y1) * math.sqrt(y5 ** 2 + (1 - 2 / y1) * (1 + y1 ** 2 * y7 ** 2))
E0 = lambda y1,y8: (1 - 2 / y1) * y8
Phi0 = lambda y1,y7: y1 ** 2 * y7
y08 = y8(y1=e, y5=j, y7=u0);
E = E0(y1=e, y8=y08); Phi = Phi0(y1=e, y7=u0)
# initial values
z01 = e; z03 = 0; z04 = 0; z05 = j; z07 = u0; z08 = y08;
p1 = -z1(x)*z5(x)/(z1(x)-2);
p3 = -z1(x)^2*z7(x);
p4 = z8(x)*(1-2/z1(x));
Q1 = -z5(x)^2/(z1(x)*(z1(x)-2))+(z8(x)^2/z1(x)^3-z7(x)^2)*(z1(x)-2);
Q3 = 2*z5(x)*z7(x)/z1(x);
Q4 = 2*z5(x)*z8(x)/(z1(x)*(z1(x)-2));
c1 = z1(x)*z7(x)*varepsilon;
c3 = -z1(x)*z5(x)*varepsilon;
C = z7(x)*varepsilon/z1(x)-z8(x)*(1-2/z1(x));
d1 = -z1(x)*z8(x)*varepsilon;
d3 = z1(x)*z5(x)*varepsilon;
B = z1(x)^2*z7(x)-z8(x)*varepsilon*(1-2/z1(x));
Omega = 1/(c1*d3*p3+c3*d1*p4-c3*d3*p1);
# differential equations
diff(z1(x), x) = z5(x);
diff(z3(x), x) = z7(x);
diff(z4(x), x) = z8(x);
diff(z5(x), x) = Omega*(-Q1*c1*d3*p3 - Q1*c3*d1*p4 + Q1*c3*d3*p1 + B*c3*p4 + C*d3*p3 + E*d3*p3 - Phi*c3*p4);
diff(z7(x), x) = -Omega*(Q3*c1*d3*p3 + Q3*c3*d1*p4 - Q3*c3*d3*p1 + B*c1*p4 - C*d1*p4 + C*d3*p1 - E*d1*p4 + E*d3*p1 - Phi*c1*p4);
diff(z8(x), x) = Omega*(-Q4*c1*d3*p3 - Q4*c3*d1*p4 + Q4*c3*d3*p1 + B*c1*p3 - B*c3*p1 - C*d1*p3 - E*d1*p3 - Phi*c1*p3 + Phi*c3*p1);
#features to be found and built curve
{z1(x), z3(x), z4(x), z5(x), z7(x), z8(x)}

After drifting on the Internet, I found something in principle:
import math
import matplotlib.pyplot as plt
import numpy as np
from scipy import integrate
from scipy.signal import argrelextrema
from mpmath import mp, mpf
mp.dps = 50
varepsilon = pow(10, -2); j = 2.5*pow(10, -4); e = 3.0; tau = 0.5; delta = 2.0
u0 = -math.sqrt(-1 + math.sqrt(varepsilon ** 2 + 12) / varepsilon) * math.sqrt(2) / 6
u = -math.sqrt(-1 + math.sqrt(varepsilon ** 2 + 12) / varepsilon) * math.sqrt(2) * (1 + delta) / 6
v = 1 / (1 - 2 / e) * math.sqrt(j ** 2 + (1 - 2 / e) * (e ** 2 * u ** 2 + 1))
y8 = lambda y1,y5,y7: 1 / (1 - 2 / y1) * math.sqrt(y5 ** 2 + (1 - 2 / y1) * (1 + y1 ** 2 * y7 ** 2))
E0 = lambda y1,y8: (1 - 2 / y1) * y8
Phi0 = lambda y1,y7: y1 ** 2 * y7
y08 = y8(y1=e, y5=j, y7=u0);
E = E0(y1=e, y8=y08); Phi = Phi0(y1=e, y7=u0)
# initial values
z01 = e; z03 = 0.0; z04 = 0.0; z05 = j; z07 = u0; z08 = y08;
def model(x, z, varepsilon, E, Phi):
z1, z3, z4, z5, z7, z8 = z[0], z[1], z[2], z[3], z[4], z[5]
p1 = -z1*z5/(z1 - 2);
p3 = -pow(z1, 2) *z7;
p4 = z8*(1 - 2/z1);
Q1 = -pow(z5, 2)/(z1*(z1 - 2)) + (pow(z8, 2)/pow(z1, 3) - pow(z7, 2))*(z1 - 2);
Q3 = 2*z5*z7/z1;
Q4 = 2*z5*z8/(z1*(z1 - 2));
c1 = z1*z7*varepsilon;
c3 = -z1*z5*varepsilon;
C = z7*varepsilon/z1 - z8*(1 - 2/z1);
d1 = -z1*z8*varepsilon;
d3 = z1*z5*varepsilon;
B = pow(z1, 2)*z7 - z8*varepsilon*(1 - 2/z1);
Omega = 1/(c1*d3*p3+c3*d1*p4-c3*d3*p1);
# differential equations
dz1dx = z5;
dz3dx = z7;
dz4dx = z8;
dz5dx = Omega*(-Q1*c1*d3*p3 - Q1*c3*d1*p4 + Q1*c3*d3*p1 + B*c3*p4 + C*d3*p3 + E*d3*p3 - Phi*c3*p4);
dz7dx = -Omega*(Q3*c1*d3*p3 + Q3*c3*d1*p4 - Q3*c3*d3*p1 + B*c1*p4 - C*d1*p4 + C*d3*p1 - E*d1*p4 + E*d3*p1 - Phi*c1*p4);
dz8dx = Omega*(-Q4*c1*d3*p3 - Q4*c3*d1*p4 + Q4*c3*d3*p1 + B*c1*p3 - B*c3*p1 - C*d1*p3 - E*d1*p3 - Phi*c1*p3 + Phi*c3*p1);
dzdx = [dz1dx, dz3dx, dz4dx, dz5dx, dz7dx, dz8dx]
return dzdx
z0 = [z01, z03, z04, z05, z07, z08]
if __name__ == '__main__':
# Start by specifying the integrator:
# use ``vode`` with "backward differentiation formula"
r = integrate.ode(model).set_integrator('vode', method='bdf')
r.set_f_params(varepsilon, E, Phi)
# Set the time range
t_start = 0.0
t_final = 0.1
delta_t = 0.00001
# Number of time steps: 1 extra for initial condition
num_steps = np.floor((t_final - t_start)/delta_t) + 1
r.set_initial_value(z0, t_start)
t = np.zeros((int(num_steps), 1), dtype=np.float64)
Z = np.zeros((int(num_steps), 6,), dtype=np.float64)
t[0] = t_start
Z[0] = z0
k = 1
while r.successful() and k < num_steps:
r.integrate(r.t + delta_t)
# Store the results to plot later
t[k] = r.t
Z[k] = r.y
k += 1
# All done! Plot the trajectories:
Z1, Z3, Z4, Z5, Z7, Z8 = Z[:,0], Z[:,1] ,Z[:,2], Z[:,3], Z[:,4], Z[:,5]
plt.plot(t,Z1,'r-',label=r'$r(s)$')
plt.grid('on')
plt.ylabel(r'$r$')
plt.xlabel('proper time s')
plt.legend(loc='best')
plt.show()
plt.plot(t,Z5,'r-',label=r'$\frac{dr}{ds}$')
plt.grid('on')
plt.ylabel(r'$\frac{dr}{ds}$')
plt.xlabel('proper time s')
plt.legend(loc='best')
plt.show()
plt.plot(t, Z7, 'r-', label=r'$\frac{dϕ}{ds}$')
plt.grid('on')
plt.xlabel('proper time s')
plt.ylabel(r'$\frac{dϕ}{ds}$')
plt.legend(loc='upper center')
plt.show()
However, reviewing the solutions obtained by the library scipy,
I encountered the problem of inconsistency of the solutions obtained by scipy and Maple. The essence of the problem is that the solutions are quickly oscillating and the Maple catches these oscillations with high precision using Rosenbrock's method. While Pythonn has problems with this using Backward Differentiation Methods:
r = integrate.ode(model).set_integrator('vode', method='bdf')
http://www.scholarpedia.org/article/Backward_differentiation_formulas
I tried all the modes of integrating: “vode” ; “zvode”; “lsoda” ; “dopri5” ; “dop853” and I found that the best suited mode “vode” however, still does not meet my needs...
https://docs.scipy.org/doc/scipy/reference/generated/scipy.integrate.ode.html
So this method catches oscillations in the range j ~ 10^{-5}-10^{-3}..While the maple shows good results for any j.
I present the results obtained by scipy for j ~ 10^{-2}:
enter image description here
enter image description here
and the results obtained by Maple for j ~ 10^{-2}:
enter image description here
enter image description here
It is important that oscillations are physical solutions! That is, the Python badly captures oscillations for j ~ 10^{-2}((. Can anyone tell me what I'm doing wrong?? how to look at the absolute error of integration?

Related

How to use sliders to change how region varies with varying parameters (sympy)

I have a problem using sympy.
I have to plot regions that satisfies few inequalities.
There are 3 parameters u, c, and q, which are all between (0,1), and I want to see how the region changes as one parameter varies.
In the code below, I fixed one parameter 'q', to see which parameters 'u' and 'c' satisfy the inequalities.
Currently, I have to manually change the fixed value q to a different float to see how region changes.
Is there any way I can use sliders to see how the region changes continuously?
Thank you.
I'm new to python and plotting.
It doesn't have to be Sympy, matplotlib, or plotly as long as my needs are satisfied.
Here is the code I wrote below
import sympy
from sympy import And, symbols, plot_implicit
u, c = symbols('u c')
q = 0.5
m2 = 2 * sympy.sqrt(c*q*u)
k2 = (m2 - 2 * c) / m2
a2 = (m2 * (m2 - 2 * c)) / (2 * c * q)
b2 = (m2 ** 2 - c * ( 1 + q ) * m2) / (2 * c * q)
t3 = u - c + 1
k3 = (q * t3 - sympy.sqrt(c * q * t3 + 1 - c))/(q * t3 + 1 - c)
a3 = (2 * c * k3 ) / (q * (1 - k3)**2)
b3 = (u - c - ((c * (k3 **2)) / (q * ((1 - k3)**2)))) / ( 1- k3)
d3 = (- 1 + ((1 - k3) / c) - ((1 - k3)*(u - c - (u - c - ((c * (k3 **2)) / (q * ((1 - k3)**2)))))) / c ) * (1 - k3)
m3 = (2 * c * d3) / ((1 - k3)**2)
p1 = plot_implicit(And( 2 * (1 - u) < 1, u > (c + 4 + sympy.sqrt(c**2 + 8*c)) / 8 , u >= (4*q + c + sympy.sqrt(c**2 + 8 * q * c)) / (8 * q)), x_var = (u, 0, 1), y_var = (c, 0, 1), line_color = 'red', show=False)
p2 = plot_implicit(And( b2 + m2 <=1 , a2 > 0, u>c ), x_var = (u,0,1), y_var = (c, 0, 1), line_color = 'green', show=False)
p3 = plot_implicit(And(u>c, k3>0, d3>0, b3>a3, b3+m3 <=1, k3+d3 < 1) , x_var = (u, 0, 1), y_var = (c, 0, 1), show = False)
p1.append(p2[0])
p1.append(p3[0])
p1.show()
Any references or skeleton code will help.
Thank you
I'm going to use the following libraries:
SymPy Plot Backend for data generation. This package is capable of creating interactive plots with sliders. However, this functionality is not yet implemented for plot_implicit. Nonetheless, I'll use it to create the numerical data to be plotted.
Matplotlib, in particular I'm going to follow the slider demo.
Note that you are plotting boolean expressions (created with sympy's And). Consequently, the ImplicitSeries used to generate the numerical data will use an adaptive algorithm, which is slow! So, whenever you'll move the slider you will have to wait a few seconds for the update to be rendered on the screen.
from sympy import *
from spb import *
from spb.series import ImplicitSeries
from spb.backends.matplotlib import _matplotlib_list
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider
import numpy as np
u, c, q = symbols('u c q')
init_q = 0.5
m2 = 2 * sqrt(c*q*u)
k2 = (m2 - 2 * c) / m2
a2 = (m2 * (m2 - 2 * c)) / (2 * c * q)
b2 = (m2 ** 2 - c * ( 1 + q ) * m2) / (2 * c * q)
t3 = u - c + 1
k3 = (q * t3 - sqrt(c * q * t3 + 1 - c))/(q * t3 + 1 - c)
a3 = (2 * c * k3 ) / (q * (1 - k3)**2)
b3 = (u - c - ((c * (k3 **2)) / (q * ((1 - k3)**2)))) / ( 1- k3)
d3 = (- 1 + ((1 - k3) / c) - ((1 - k3)*(u - c - (u - c - ((c * (k3 **2)) / (q * ((1 - k3)**2)))))) / c ) * (1 - k3)
m3 = (2 * c * d3) / ((1 - k3)**2)
i1 = And( 2 * (1 - u) < 1, u > (c + 4 + sqrt(c**2 + 8*c)) / 8 , u >= (4*q + c + sqrt(c**2 + 8 * q * c)) / (8 * q))
i2 = And( b2 + m2 <=1 , a2 > 0, u>c )
i3 = And(u>c, k3>0, d3>0, b3>a3, b3+m3 <=1, k3+d3 < 1)
def compute_inequality(i, val):
# generate numerical data for matplotlib fill method
i = i.subs(q, val)
s = ImplicitSeries(i, (u, 0, 1), (c, 0, 1))
return _matplotlib_list(s.get_data()[0])
# create the figure and initialize the regions
fig, ax = plt.subplots()
region1, = ax.fill(*compute_inequality(i1, init_q))
region2, = ax.fill(*compute_inequality(i2, init_q))
region3, = ax.fill(*compute_inequality(i3, init_q))
# adjust the main plot to make room for the sliders
fig.subplots_adjust(left=0.25, bottom=0.25)
# Make a horizontal slider to control the frequency.
ax_q = fig.add_axes([0.25, 0.1, 0.65, 0.03])
q_slider = Slider(
ax=ax_q,
label='q',
valmin=0,
valmax=5,
valinit=init_q,
)
# The function to be called anytime a slider's value changes
def update(val):
region1.set_xy(np.c_[compute_inequality(i1, val)])
region2.set_xy(np.c_[compute_inequality(i2, val)])
region3.set_xy(np.c_[compute_inequality(i3, val)])
fig.canvas.draw_idle()
# register the update function with each slider
q_slider.on_changed(update)
plt.show()

Solver Issue No Algorithm Found

Trying to replace the function U2(x,y,z) with specified values of x,y,z. Not sure how to do that with sympy because they are as "x = arange.(-h,h,0.001)" as seen in the code below.
Below you will find my implementation with sympy. Additionally I am using PyCharm.
This implementation is based on Dr. Annabestani and Dr. Naghavis' paper: A 3D analytical ion transport model for ionic polymer metal composite actuators in large bending deformations
import sympy as sp
h = 0.1 # [mm] half of thickness
W: float = 6 # [mm] width
L: float = 28 # [mm] length
F: float = 96458 # [C/mol] Faraday's constant
k_e = 1.34E-6 # [F/m]
Y = 5.71E8 # [Pa]
d = 1.03 - 11 # [m^2/s] diffiusitivity coefficient
T = 293 # [K]
C_minus = 1200 # [mol/m^3] Cation concentration
C_plus = 1200 # [mol/m^3] anion concentration
R = 8.3143 # [J/mol*K] Gas constant
Vol = 2*h*W*L
#dVol = diff(Vol,x) + diff(Vol, y) + diff(Vol, z) # change in Volume
theta = 1 / W
x, y, z, m, n, p, t = sp.symbols('x y z m n p t')
V_1 = 0.5 * sp.sin(2 * sp.pi * t) # Voltage as a function of time
k_f = 0.5
t_f = 44
k_g = 4.5
t_g = 0.07
B_mnp = 0.003
b_mnp: float = B_mnp
gamma_hat_2 = 0.04
gamma_hat_5 = 0.03
gamma_hat_6 = 5E-3
r_M = 0.15 # membrane resistance
r_ew = 0.175 # transverse resistance of electrode
r_el = 0.11 # longitudinal resistance of electrode
mu = 2.4
sigma_not = 0.1
a_L: float = 1.0 # distrubuted surface attentuation
r_hat = sp.sqrt(r_M ** 2 + r_ew ** 2 + r_el ** 2)
lambda_1 = 0.0001
dVol = 1
K = (F ** 2 * C_minus * d * (1 - C_minus * dVol)) / (R * T * k_e) # also K = a
K_hat = (K-lambda_1)/d
gamma_1 = 1.0
gamma_2 = 1.0
gamma_3 = 1.0
gamma_4 = 1.0
gamma_5 = 1.0
gamma_6 = 1.0
gamma_7 = 1.0
small_gamma_1 = 1.0
small_gamma_2 = 1.0
small_gamma_3 = 1.0
psi = gamma_1*x + gamma_2*y + gamma_3*z + gamma_4*x*y + gamma_5*x*z + gamma_6*y*z + gamma_7*x*y*z + (small_gamma_1/2)*x**2 + (small_gamma_2/2)*y**2 + (small_gamma_3/2)*x*z**2
psi_hat_part = ((sp.sin(((m + 1) * sp.pi) / 2 * h)) * x) * ((sp.sin(((n + 1) * sp.pi) / W)) * y) * ((sp.sin(((p + 1) * sp.pi) / L)) * z)
psi_hat = psi * psi_hat_part # Eqn. 19
print(psi_hat)
x1: float = -h
x2: float = h
y1: float = 0
y2: float = W
z1: float = 0
z2: float = L
I = psi_hat.integrate((x, x1, x2), (y, y1, y2), (z, z1, z2)) # Integration for a_mnp Eqn. 18
A_mnp = ((8 * K_hat) / (2 * h * W * L)) * I
Partial = A_mnp * ((sp.sin(((m + 1) * sp.pi) / 2 * h)) * x) * ((sp.sin(((n + 1) * sp.pi) / W)) * y) * ((sp.sin(((p + 1) * sp.pi) / L)) * z)
start = Partial.integrate((p, 0 , 10E9), (n, 0, 10E9), (m, 0, 10E9)) #when using infinity it goes weird, also integrating leads to higher thresholds than summation
a_mnp_denom = (((sp.sin(((m + 1) * sp.pi) / 2 * h)) ** 2) * ((sp.sin(((n + 1) * sp.pi) / W)) ** 2) * (
(sp.sin(((p + 1) * sp.pi) / L)) ** 2) + K_hat)
a_mnp = A_mnp / a_mnp_denom # Eqn. 18
U2 = sp.Function("U2")
U2 = a_mnp * ((sp.sin(((m + 1) * sp.pi) / 2 * h)) * x) * ((sp.sin(((n + 1) * sp.pi) / W)) * y) * (
(sp.sin(((p + 1) * sp.pi) / L)) * z) # Eqn. 13
x = np.arange(-h, h, 0.001)
y = np.arange(-h, h, 0.001)
z = np.arange(-h, h, 0.001)
f= sp.subs((U2), (x ,y ,z))
I currently get the error message: ValueError: subs accepts either 1 or 2 arguments. So that means I can't use the subs() method and replace() also doesn't work too well. Are there any other methods one can use?
Any help will be grateful, thank you!
Oscar is right: you are trying to deal with too much of the problem at once. That aside, Numpy and SymPy do not work like you think they do. What were you hoping to see when you replaced 3 variables, each with a range?
You cannot replace a SymPy variable/Symbol with a Numpy arange object, but you can replace a Symbol with a single value:
>>> from sympy.abc import x, y
>>> a = 1.0
>>> u = x + y + a
>>> u.subs(x, 1)
y + 2.0
>>> u.subs([(x,1), (y,2)])
4.0
You might iterate over the arange values, creating values of f and then doing something with each value:
f = lambda v: u.subs(dict(zip((x,y),v)))
for xi in range(1,3): # replace range with your arange call
for yi in range(-4,-2):
fi = f((xi,yi))
print(xi,yi,fi)
Be careful about iterating and using x or y as your loop variable, however, since that will then lose the assignment of the Symbol to that variable,
for x in range(2):
print(u.subs(x, x)) # no change and x is no longer a Symbol, it is now an int

Trouble with Runge-Kutta Method for Coulomb's Law

I am trying to simulate a two-dimensional two charge situation using Euler's Method and the Runge-Kutta 4th Order Method. I have gotten relatively expected answers using both methods. But I had never tried using my RK4 method with different initial conditions.
Starting with a positive initial velocity vx0 and a negative initial x-position x0, the code seems to work just fine. But when I flip them so that vx0 is negative and x0 is positive, I get different answers, when they should be symmetric.
I made sure that my Euler method worked for both variations of initial conditions to confirm that it was my RK4 function that was the problem. I struggled initially to apply the RK4 method for two dimensional motion, so it doesn't surprise me that this error has come about. Below is an image that might make the problem a bit clearer.
Here is where I calculate the constants:
# Initial conditions
time[0] = t = t0
vx1[0] = vx = vx0
vy1[0] = vy = vy0
x1[0] = x = x0
y1[0] = y = y0
for i in range(1, n + 1):
# Calculate our constants
k1vx = step * ax(x, y, q1, q2, m)
k1vy = step * ay(x, y, q1, q2, m)
k1x = step * vx
k1y = step * vy
k2vx = step * ax(x + 0.5 * k1x, y + 0.5 * k1y, q1, q2, m)
k2vy = step * ay(x + 0.5 * k1x, y + 0.5 * k1y, q1, q2, m)
k2x = step * (vx + (k1vx / 2))
k2y = step * (vy + (k1vy / 2))
k3vx = step * ax(x + 0.5 * k2x, y + 0.5 * k2y, q1, q2, m)
k3vy = step * ay(x + 0.5 * k2x, y + 0.5 * k2y, q1, q2, m)
k3x = step * (vx + (k2vx / 2))
k3y = step * (vy + (k2vy / 2))
k4vx = step * ax(x + k3x, y + k3y, q1, q2, m)
k4vy = step * ay(x + k3x, y + k3y, q1, q2, m)
k4x = step * (vx + k3vx)
k4y = step * (vy + k3vy)
# Update the values based on our calculated constants
vx1[i] = vx = vx + (k1vx + k2vx + k2vx + k3vx + k3vx + k4vx) / 6
vy1[i] = vy = vy + (k1vx + k2vy + k2vy + k3vy + k3vy + k4vy) / 6
x1[i] = x = x + ((k1x + 2 * k2x + 2 * k3x + k4x) / 6)
y1[i] = y = y + ((k1y + 2 * k2y + 2 * k3y + k4y) / 6)
# Update the time
time[i] = t = t0 + i * step
Here are the functions that I use for ax and ay in the previous code
def accel_rk4_x(x, y, q1, q2, m):
const = (q1 * q2) / (4 * math.pi * 8.854e-12 * m)
return const * (x / ((x ** 2 + y ** 2) ** 1.5))
def accel_rk4_y(x, y, q1, q2, m):
const = (q1 * q2) / (4 * math.pi * 8.854e-12 * m)
return const * (y / ((x ** 2 + y ** 2) ** 1.5))
I appreciate any help with this problem! I might just need a second pair of eyes.

Odeint Error - Excess work done on this call

I am writing a code to solve coupled harmonic oscillator equations using odeint from scipy. I want to add a random number to one of the equations at every time step of the ODESolver. To do this, I have written two time dependent constants, and used them. However, this gives me the following error.
ODEintWarning: Excess work done on this call (perhaps wrong Dfun type). Run
with full_output = 1 to get quantitative information.
warnings.warn(warning_msg, ODEintWarning)
My code is given below.
import matplotlib.pyplot as plt
import numpy as np
from scipy.integrate import odeint
import scipy.stats as stats
from scipy.stats import beta
m1 = 1.1
m2 = 1.0
k1 = 1000.0
k2 = 1000.0
k12 = 100
g = 0.0
global Steps
Steps = 0
x10 = 1
x20 = 0
alpha = 1
a = 2
b = 3
v10 = 0
v20 = 0
#A = np.random.beta(a,b, 10) * alpha
#B = np.random.beta(a,b, 10) * alpha
def c(t):
return np.random.beta(a,b) * alpha
def d(t):
return np.random.beta(a,b) * alpha
def f(x, t, c, d):
y = []
y.append(x[1] - c(t) * x[0])
#print(c(t))
y.append(-(k1 + k12) / m1 * x[0] + k12 / m1 * x[2] - 2 * g * x[1] - c(t) * x[1])
y.append(x[3] - d(t) * x[2])
y.append(-(k2 + k12) / m2 * x[2] + k12 / m2 * x[0] - 2 * g * x[3] - d(t) * x[3])
return y
b0 = [x10, v10, x20, v20]
b0 = np.array(b0)
args = (c, d)
t = np.linspace(0, 1, 1000 )
t = np.array(t)
X1, infodict = odeint(f, b0, t, args, full_output = 1)
X1 = X1.T
Q1 = X1[0]
Q2 = X1[2]
plt.plot(t, Q1, 'g-')
plt.plot(t, Q2, 'b-')
plt.show()
a = m1*m2
b = -(m1*(k2 + k12) + m2*(k1 + k12))
c = k1*k2 + k12*(k1 + k2)
wp = np.sqrt((-b + np.sqrt(b**2 - 4*a*c))/(2*a))
wm = np.sqrt((-b - np.sqrt(b**2 - 4*a*c))/(2*a))
print(wp)
print(wm)
f = open('simdata.csv', mode='w')
for i in range(len(t)):
p = str(t[i]) + ',' + str(Q1[i]) + ',' + str(Q2[i]) + '\n'
f.write(p)
f.close()

Cardano's formula not working with numpy?

--- using python 3 ---
Following the equations here, I tried to find all real roots of an arbitrary third-order-polynomial. Unfortunatelly, my implementation does not yield the correct result and I cannot find the error. Maybe you are able to spot it within a blink of an eye and tell me.
(As you notice, only the roots of the green curve are wrong.)
With best regards
import numpy as np
def find_cubic_roots(a,b,c,d):
# with ax³ + bx² + cx + d = 0
a,b,c,d = a+0j, b+0j, c+0j, d+0j
all_ = (a != np.pi)
Q = (3*a*c - b**2)/ (9*a**2)
R = (9*a*b*c - 27*a**2*d - 2*b**3) / (54 * a**3)
D = Q**3 + R**2
S = (R + np.sqrt(D))**(1/3)
T = (R - np.sqrt(D))**(1/3)
result = np.zeros(tuple(list(a.shape) + [3])) + 0j
result[all_,0] = - b / (3*a) + (S+T)
result[all_,1] = - b / (3*a) - (S+T) / 2 + 0.5j * np.sqrt(3) * (S - T)
result[all_,2] = - b / (3*a) - (S+T) / 2 - 0.5j * np.sqrt(3) * (S - T)
return result
The example where you see it does not work:
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
a = np.array([2.5])
b = np.array([-5])
c = np.array([0])
x = np.linspace(-2,3,100)
for i, d in enumerate([-8,0,8]):
d = np.array(d)
roots = find_cubic_roots(a,b,c,d)
ax.plot(x, a*x**3 + b*x**2 + c*x + d, label = "a = %.3f, b = %.3f, c = %.3f, d = %.3f"%(a,b,c,d), color = colors[i])
print(roots)
ax.plot(x, x*0)
ax.scatter(roots,roots*0, s = 80)
ax.legend(loc = 0)
ax.set_xlim(-2,3)
plt.show()
Output:
[[ 2.50852567+0.j -0.25426283+1.1004545j -0.25426283-1.1004545j]]
[[ 2.+0.j 0.+0.j 0.-0.j]]
[[ 1.51400399+1.46763129j 1.02750817-1.1867528j -0.54151216-0.28087849j]]
Here is my stab at the solution. Your code fails for the case where R + np.sqrt(D) or R - np.sqrt(D) is negative. The reason is in this post. Basically if you do a**(1/3) where a is negative, numpy returns a complex number. However, we infact, want S and T to be real since cube root of a negative real number is simply a negative real number (let's ignore De Moivre's theorem for now and focus on the code and not the math). The way to work around it is to check if S is real, cast it to real and pass S to the function from scipy.special import cbrt. Similarly for T.
Example code:
import numpy as np
import pdb
import math
from scipy.special import cbrt
def find_cubic_roots(a,b,c,d, bp = False):
a,b,c,d = a+0j, b+0j, c+0j, d+0j
all_ = (a != np.pi)
Q = (3*a*c - b**2)/ (9*a**2)
R = (9*a*b*c - 27*a**2*d - 2*b**3) / (54 * a**3)
D = Q**3 + R**2
S = 0 #NEW CALCULATION FOR S STARTS HERE
if np.isreal(R + np.sqrt(D)):
S = cbrt(np.real(R + np.sqrt(D)))
else:
S = (R + np.sqrt(D))**(1/3)
T = 0 #NEW CALCULATION FOR T STARTS HERE
if np.isreal(R - np.sqrt(D)):
T = cbrt(np.real(R - np.sqrt(D)))
else:
T = (R - np.sqrt(D))**(1/3)
result = np.zeros(tuple(list(a.shape) + [3])) + 0j
result[all_,0] = - b / (3*a) + (S+T)
result[all_,1] = - b / (3*a) - (S+T) / 2 + 0.5j * np.sqrt(3) * (S - T)
result[all_,2] = - b / (3*a) - (S+T) / 2 - 0.5j * np.sqrt(3) * (S - T)
#if bp:
#pdb.set_trace()
return result
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
a = np.array([2.5])
b = np.array([-5])
c = np.array([0])
x = np.linspace(-2,3,100)
for i, d in enumerate([-8,0,8]):
d = np.array(d)
if d == 8:
roots = find_cubic_roots(a,b,c,d, True)
else:
roots = find_cubic_roots(a,b,c,d)
ax.plot(x, a*x**3 + b*x**2 + c*x + d, label = "a = %.3f, b = %.3f, c = %.3f, d = %.3f"%(a,b,c,d))
print(roots)
ax.plot(x, x*0)
ax.scatter(roots,roots*0, s = 80)
ax.legend(loc = 0)
ax.set_xlim(-2,3)
plt.show()
DISCLAIMER: The output root gives some warning, which you can probably ignore. The output is correct. However, the plotting shows an extra root for some reasons. This is likely due to your plotting code. The printed roots look fine though.

Categories