I was trying to run a SymPy integration where I created my own variables, and the integral is
import numpy as np
import seaborn
import math
import pandas as pd
import matplotlib as plt
import scipy.integrate as integrate
import scipy.special as special
from sympy import init_session
init_session()
%matplotlib inline
dP, dT = symbols('dP dT', real = True, positive = True)
dr = symbols('dr', real = True, positive = True)
T_9, T = symbols('T_9 T', real = True, positive = True)
rho, mu, m_u, k, P, gamma = symbols('rho mu m_u k P gamma', real = True, positive = True)
r, R_solar, R = symbols('r R_solar R', real = True, positive = True)
G, M_solar, M = symbols('G M_solar M', real = True, positive = True)
a, c, kappa = symbols('a c kappa', real = True, positive = True)
F = symbols('F', real = True, positive = True)
cc = (T / (rho * k * T / (mu * m_u)) * (1 - (1 / gamma)) * ((-rho * G * M / r**2)))
r_int = integrate(((cc), (0, r, R)))
I tried changing the limits to specify what I wanted, but I kept getting the same error; the error is: ValueError: specify dummy variables for (T / (rho * k * T / (mu * m_u)) * (1 - (1 / gamma)) * ((-rho * G * M / r**2))).
You wrote the wrong integrate command. For a definite integral it should be like this: integrate(expr, (symbol, lower, upper)). Assuming r is the integration variable, your example will be:
r_int = integrate(cc, (r, 0, R))
Related
I'm trying to integrate an equation using sympy, but the evaluation keeps erroring out with:
TypeError: cannot add <class 'sympy.matrices.immutable.ImmutableDenseMatrix'> and <class 'sympy.core.numbers.Zero'>
However, when I integrate the same equation in Mathematica, I get the result I'm looking for. I'm hoping someone can explain what is happening under the hood with sympy and how its integration differs from mathematica's.
Because there are a lot of equations involved, I am only going to post the Python representation of the equations which are used to make up the variables called in the integration. To know for certain that the integration is an issue, I have independently checked each equation involved and made sure the results match between Python and Mathematica. I can confirm that only the integration fails.
Integration variable setup in Python:
import numpy as np
import sympy as sym
from sympy import I, Matrix, integrate
from sympy.integrals import Integral
from sympy.functions import exp
from sympy.physics.vector import cross, dot
c_speed = 299792458
lambda_u = 0.01
k_u = (2 * np.pi)/lambda_u
K_0 = 0.2
gamma_0 = 2.0
beta_0 = sym.sqrt(1 - 1 / gamma_0**2)
t = sym.symbols('t')
t_start = (-2 * lambda_u) / c_speed
t_end = (3 * lambda_u) / c_speed
beta_x_of_t = sym.Piecewise( (0.0, sym.Or(t < t_start, t > t_end)),
((-1 * K_0)/gamma_0 * sym.sin(k_u * c_speed * t), sym.And(t_start <= t, t <= t_end)) )
beta_z_of_t = sym.Piecewise( (beta_0, sym.Or(t < t_start, t > t_end)),
(beta_0 * (1 - K_0**2/ (4 * beta_0 * gamma_0**2)) + K_0**2 / (4 * beta_0 * gamma_0**2) * sym.sin(2 * k_u * c_speed * t), sym.And(t_start <= t, t <= t_end)) )
beta_xp_of_t = sym.diff(beta_x_of_t, t)
beta_zp_of_t = sym.diff(beta_z_of_t, t)
Python Integration:
n = Matrix([(0, 0, 1)])
beta = Matrix([(beta_x_of_t, 0, beta_z_of_t)])
betap = Matrix([(beta_xp_of_t, 0, beta_zp_of_t)])
def rad(n, beta, betap):
return integrate(n.cross((n-beta).cross(betap)), (t, t_start, t_end))
rad(n, beta, betap)
# Output is the error above
Mathematica integration:
rad[n_, beta_, betap_] :=
NIntegrate[
Cross[n, Cross[n - beta, betap]]
, {t, tstart, tend}, AccuracyGoal -> 3]
rad[{0, 0, 1}, {betax[t], 0, betaz[t]}, {betaxp[t], 0, betazp[t]}]
# Output is {0.00150421, 0., 0.}
I did see similar question such as this one and this question, but I'm not entirely sure if they are relevant here (the second link seems to be a closer match, but not quite a fit).
As you're working with imprecise floats (and even use np.pi instead of sym.pi), while sympy tries to find exact symbolic solutions, sympy's expressions get rather wild with constants like 6.67e-11 mixed with much larger values.
Here is an attempt to use more symbolic expressions, and only integrate the x-coordinate of the function.
import sympy as sym
from sympy import I, Matrix, integrate, S
from sympy.integrals import Integral
from sympy.functions import exp
from sympy.physics.vector import cross, dot
c_speed = 299792458
lambda_u = S(1) / 100
k_u = (2 * sym.pi) / lambda_u
K_0 = S(2) / 100
gamma_0 = 2
beta_0 = sym.sqrt(1 - S(1) / gamma_0 ** 2)
t = sym.symbols('t')
t_start = (-2 * lambda_u) / c_speed
t_end = (3 * lambda_u) / c_speed
beta_x_of_t = sym.Piecewise((0, sym.Or(t < t_start, t > t_end)),
((-1 * K_0) / gamma_0 * sym.sin(k_u * c_speed * t), sym.And(t_start <= t, t <= t_end)))
beta_z_of_t = sym.Piecewise((beta_0, sym.Or(t < t_start, t > t_end)),
(beta_0 * (1 - K_0 ** 2 / (4 * beta_0 * gamma_0 ** 2)) + K_0 ** 2 / (
4 * beta_0 * gamma_0 ** 2) * sym.sin(2 * k_u * c_speed * t),
sym.And(t_start <= t, t <= t_end)))
beta_xp_of_t = sym.diff(beta_x_of_t, t)
beta_zp_of_t = sym.diff(beta_z_of_t, t)
n = Matrix([(0, 0, 1)])
beta = Matrix([(beta_x_of_t, 0, beta_z_of_t)])
betap = Matrix([(beta_xp_of_t, 0, beta_zp_of_t)])
func = n.cross((n - beta).cross(betap))[0]
print(integrate(func, (t, t_start, t_end)))
This doesn't give an error, and outputs just zero.
Lambdify can be used to convert the function to numpy, and plot it.
func_np = sym.lambdify(t, func)
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import quad
t_start_np = float(t_start.evalf())
t_end_np = float(t_end.evalf())
ts = np.linspace(t_start_np, t_end_np, 100000)
plt.plot(ts, func_np(ts))
print("approximate integral (np.trapz):", np.trapz(ts, func_np(ts)))
print("approximate integral (scipy's quad):", quad(func_np, t_start_np, t_end_np))
This outputs:
approximate integral (np.trapz): 0.04209721470548062
approximate integral (scipy's quad): (-2.3852447794681098e-18, 5.516333374450447e-10)
Note the huge values on the y-axis, and the small values on the x-axis. These values, as well as matematica's, could well be rounding errors.
Code:
from scipy.integrate import odeint
import numpy as np
import matplotlib.pyplot as plt
# parameters
S = 0.0001
M = 30.03
K = 113.6561
Vr = 58
R = 8.3145
T = 298.15
Q = 0.000133
Vp = 0.000022
Mr = 36
Pvap = 1400
wf = 0.001
tr = 1200
mass = 40000
# define t
time = 14400
t = np.arange(0, time + 1, 1)
# define initial state
Cv0 = (mass / Vp) * wf # Cv(0)
Cr0 = (mass / Vp) * (1 - wf)
Cair0 = 0 # Cair(0)
# define function and solve ode
def model(x, t):
C = x[0] # C is Cair(t)
c = x[1] # c is Cv(t)
a = Q + (K * S / Vr)
b = (K * S * M) / (Vr * R * T)
s = (K * S * M) / (Vp * R * T)
w = (1 - wf) * 1000
Peq = (c * Pvap) / (c + w * c * M / Mr)
Pair = (C * R * T) / M
dcdt = -s * (Peq - Pair)
if t <= tr:
dCdt = -a * C + b * Peq
else:
dCdt = -a * C
return [dCdt, dcdt]
x = odeint(model, [Cair0, Cv0], t)
C = x[:, 0]
c = x[:, 1]
Now, I want to figure out wf value when I know C(0)(when t is 0) and C(tr)(when t is tr)(Therefore I know two kind of t and C(t)).
I found some links(Curve Fit Parameters in Multiple ODE Function, Solving ODE with Python reversely, https://medium.com/analytics-vidhya/coronavirus-in-italy-ode-model-an-parameter-optimization-forecast-with-python-c1769cf7a511, https://kitchingroup.cheme.cmu.edu/blog/2013/02/18/Fitting-a-numerical-ODE-solution-to-data/) related to this, although I cannot get the hang of subject.
Can I fine parameter wf with two data((0, C(0)), (tr, C(tr)) and ode?
First, ODE solvers assume smooth right-hand-side functions. So the if t <= tr:... statement in your code isn't going to work. Two separate integrations must be done to deal with the discontinuity. Integrate to tf, then use the solution at tf as initial conditions to integrate beyond tf for the new ODE function.
But it seems like your main problem (solving for wf) only involves integrating to tf (not beyond), so we can ignore that issue when solving for wf
Now, I want to figure out wf value when I know C(0)(when t is 0) and C(tr)(when t is tr)(Therefore I know two kind of t and C(t)).
You can do a non-linear solve for wf:
from scipy.integrate import odeint
import numpy as np
import matplotlib.pyplot as plt
# parameters
S = 0.0001
M = 30.03
K = 113.6561
Vr = 58
R = 8.3145
T = 298.15
Q = 0.000133
Vp = 0.000022
Mr = 36
Pvap = 1400
mass = 40000
# initial condition for wf
wf_initial = 0.02
# define t
tr = 1200
t_eval = np.array([0, tr], np.float)
# define initial state. This is C(t = 0)
Cv0 = (mass / Vp) * wf_initial # Cv(0)
Cair0 = 0 # Cair(0)
init_cond = np.array([Cair0, Cv0],np.float)
# Definte the final state. This is C(t = tr)
final_state = 3.94926615e-03
# define function and solve ode
def model(x, t, wf):
C = x[0] # C is Cair(t)
c = x[1] # c is Cv(t)
a = Q + (K * S / Vr)
b = (K * S * M) / (Vr * R * T)
s = (K * S * M) / (Vp * R * T)
w = (1 - wf) * 1000
Peq = (c * Pvap) / (c + w * c * M / Mr)
Pair = (C * R * T) / M
dcdt = -s * (Peq - Pair)
dCdt = -a * C + b * Peq
return [dCdt, dcdt]
# define non-linear system to solve
def function(x):
wf = x[0]
x = odeint(model, init_cond, t_eval, args = (wf,), rtol = 1e-10, atol = 1e-10)
return x[-1,0] - final_state
from scipy.optimize import root
sol = root(function, np.array([wf_initial]), method='lm')
print(sol.success)
wf_solution = sol.x[0]
x = odeint(model, init_cond, t_eval, args = (wf_solution,), rtol = 1e-10, atol = 1e-10)
print(wf_solution)
print(x[-1])
print(final_state)
Now I have two functions respectively are
rho(u) = np.exp( (-2.0 / 0.2) * (u**0.2-1.0) )
psi( w(x-u) ) = (1/(4.0 * math.sqrt(np.pi))) * np.exp(- ((w * (x-u))**2) / 4.0) * (2.0 - (w * (x-u))**2)
And then I want to integrate 'rho(u) * psi( w(x-u) )' with respect to 'u'. So that the integral result can be one function with respect to 'w' and 'x'.
Here's my Python code snippet as I try to solve this integral.
import numpy as np
import math
import matplotlib.pyplot as plt
from scipy import integrate
x = np.linspace(0,10,1000)
w = np.linspace(0,10,500)
u = np.linspace(0,10,1000)
rho = np.exp((-2.0/0.2)*(u**0.2-1.0))
value = np.zeros((500,1000),dtype="float32")
# Integrate the products of rho with
# (1/(4.0*math.sqrt(np.pi)))*np.exp(- ((w[i]*(x[j]-u))**2) / 4.0)*(2.0 - (w[i]*(x[j]-u))**2)
for i in range(len(w)):
for j in range(len(x)):
value[i,j] =value[i,j]+ integrate.simps(rho*(1/(4.0*math.sqrt(np.pi)))*np.exp(- ((w[i]*(x[j]-u))**2) / 4.0)*(2.0 - (w[i]*(x[j]-u))**2),u)
plt.imshow(value,origin='lower')
plt.colorbar()
As illustrated above, when I do the integration, I used nesting for loops. We all know that such a way is inefficient.
So I want to ask whether there are methods not using for loop.
Here is a possibility using scipy.integrate.quad_vec. It executes in 6 seconds on my machine, which I believe is acceptable. It is true, however, that I have used a step of 0.1 only for both x and w, but such a resolution seems to be a good compromise on a single core.
from functools import partial
import matplotlib.pyplot as plt
from numpy import empty, exp, linspace, pi, sqrt
from scipy.integrate import quad_vec
from time import perf_counter
def func(u, x):
rho = exp(-10 * (u ** 0.2 - 1))
var = w * (x - u)
psi = exp(-var ** 2 / 4) * (2 - var ** 2) / 4 / sqrt(pi)
return rho * psi
begin = perf_counter()
x = linspace(0, 10, 101)
w = linspace(0, 10, 101)
res = empty((x.size, w.size))
for i, xVal in enumerate(x):
res[i], err = quad_vec(partial(func, x=xVal), 0, 10)
print(f'{perf_counter() - begin} s')
plt.contourf(w, x, res)
plt.colorbar()
plt.xlabel('w')
plt.ylabel('x')
plt.show()
UPDATE
I had not realised, but one can also work with a multi-dimensional array in quad_vec. The updated approach below enables to increase the resolution by a factor of 2 for both x and w, and keep an execution time of around 7 seconds. Additionally, no more visible for loops.
import matplotlib.pyplot as plt
from numpy import exp, mgrid, pi, sqrt
from scipy.integrate import quad_vec
from time import perf_counter
def func(u):
rho = exp(-10 * (u ** 0.2 - 1))
var = w * (x - u)
psi = exp(-var ** 2 / 4) * (2 - var ** 2) / 4 / sqrt(pi)
return rho * psi
begin = perf_counter()
x, w = mgrid[0:10:201j, 0:10:201j]
res, err = quad_vec(func, 0, 10)
print(f'{perf_counter() - begin} s')
plt.contourf(w, x, res)
plt.colorbar()
plt.xlabel('w')
plt.ylabel('x')
plt.show()
Addressing the comment
Just add the following lines before plt.show() to have both axes scale logarithmically.
plt.gca().set_xlim(0.05, 10)
plt.gca().set_ylim(0.05, 10)
plt.gca().set_xscale('log')
plt.gca().set_yscale('log')
I am doing simple integration, only thing is that I want to keep 'n' as a variable. How can I do this while still integrating over t?
import numpy as np
import matplotlib as mpl
from matplotlib import pyplot as plt
import scipy.integrate as integrate
from scipy.integrate import quad
import math as m
y = lambda t: 3*t
T = 4 #period
n = 1
w = 2*np.pi*n/T
#for odd functions
def integrand(t):
#return y*(2/T)*np.sin(w*t)
return y(t)*np.sin(n*w*t)
Bn = (2/T)*quad(integrand,-T/2,T/2)[0]
print(Bn)
Using quad, you cannot. Perhaps you're looking for symbolic integration like you would do with pen and paper; sympy can help you here:
import sympy
x = sympy.Symbol("x")
t = sympy.Symbol("t")
T = sympy.Symbol("T")
n = sympy.Symbol("n", positive=True)
w = 2 * sympy.pi * n / T
y = 3 * t
out = 2 / T * sympy.integrate(y * sympy.sin(n * w * t), (t, -T/2, T/2))
print(out)
2*(-3*T**2*cos(pi*n**2)/(2*pi*n**2) + 3*T**2*sin(pi*n**2)/(2*pi**2*n**4))/T
If you want to evaluate the integral for many n, quadpy can help:
import numpy as np
from quadpy import quad
y = lambda t: 3 * t
T = 4
n = np.linspace(0.5, 4.5, 20)
w = 2 * np.pi * n / T
# for odd functions
def integrand(t):
return y(t) * np.sin(np.multiply.outer(n * w, t))
Bn = (2 / T) * quad(integrand, -T / 2, T / 2)[0]
print(Bn)
[ 2.95202424 4.88513496 4.77595051 1.32599514 -1.93954768 -0.23784853
1.11558278 -0.95397681 0.63709387 -0.4752673 0.45818227 -0.4740128
0.35943759 -0.01510463 -0.30348511 0.09861289 0.25428048 0.10030723
-0.06099483 -0.13128359]
I have a function as the following
q = 1 / sqrt( ((1+z)**2 * (1+0.01*o_m*z) - z*(2+z)*(1-o_m)) )
h = 5 * log10( (1+z)*q ) + 43.1601
I have experimental answers of above equation and once I must to put some data into above function and solve equation below
chi=(q_exp-q_theo)**2/err**2 # this function is a sigma, sigma chi from z=0 to z=1.4 (in the data file)
z, err and q_exp are in the data file(2.txt). Now I have to choose a range for o_m (0.2 to 0.4) and find in what o_m, the chi function will be minimized.
my code is:
from math import *
from scipy.integrate import quad
min = None
l = None
a = None
b = None
c = 0
def ant(z,om,od):
return 1/sqrt( (1+z)**2 * (1+0.01*o_m*z) - z*(2+z)*o_d )
for o_m in range(20,40,1):
o_d=1-0.01*o_m
with open('2.txt') as fp:
for line in fp:
n = list( map(float, line.split()) )
q = quad(ant,n[0],n[1],args=(o_m,o_d))[0]
h = 5.0 * log10( (1+n[1])*q ) + 43.1601
chi = (n[2]-h)**2 / n[3]**2
c = c + chi
if min is None or min>c:
min = c
l = o_m
print('chi=',q,'o_m=',0.01*l)
n[1],n[2],n[3],n[4] are z1, z2, q_exp and err, respectively in the data file. and z1 and z2 are the integration range.
I need your help and I appreciate your time and your attention.
Please do not rate a negative value. I need your answers.
Here is my understanding of the problem.
First I generate some data by the following code
import numpy as np
from scipy.integrate import quad
from random import random
def boxmuller(x0,sigma):
u1=random()
u2=random()
ll=np.sqrt(-2*np.log(u1))
z0=ll*np.cos(2*np.pi*u2)
z1=ll*np.cos(2*np.pi*u2)
return sigma*z0+x0, sigma*z1+x0
def q_func(z, oM, oD):
den= np.sqrt( (1.0 + z)**2 * (1+0.01 * oM * z) - z * (2+z) * (1-oD) )
return 1.0/den
def h_func(z,q):
out = 5 * np.log10( (1.0 + z) * q ) + .25#43.1601
return out
def q_Int(z1,z2,oM,oD):
out=quad(q_func, z1,z2,args=(oM,oD))
return out
ooMM=0.3
ooDD=1.0-ooMM
dataList=[]
for z in np.linspace(.3,20,60):
z1=.1+.1*z*.01*z**2
z2=z1+3.0+.08+z**2
q=q_Int(z1,z2,ooMM,ooDD)[0]
h=h_func(z,q)
sigma=np.fabs(.01*h)
h=boxmuller(h,sigma)[0]
dataList+=[[z,z1,z2,h,sigma]]
dataList=np.array(dataList)
np.savetxt("data.txt",dataList)
which I would then fit in the following way
import matplotlib
matplotlib.use('Qt5Agg')
from matplotlib import pyplot as plt
import numpy as np
from scipy.integrate import quad
from scipy.optimize import leastsq
def q_func(z, oM, oD):
den= np.sqrt( (1.0 + z)**2 * (1+0.01 * oM * z) - z * (2+z) * (1-oD) )
return 1.0/den
def h_func(z,q):
out = 5 * np.log10( (1.0 + z) * q ) + .25#43.1601
return out
def q_Int(z1,z2,oM,oD):
out=quad(q_func, z1,z2,args=(oM,oD))
return out
def residuals(parameters,data):
om,od=parameters
zList=data[:,0]
yList=data[:,3]
errList=data[:,4]
qList=np.fromiter( (q_Int(z1,z2, om,od)[0] for z1,z2 in data[ :,[1,2] ]), np.float)
hList=np.fromiter( (h_func(z,q) for z,q in zip(zList,qList)), np.float)
diffList=np.fromiter( ( (y-h)/e for y,h,e in zip(yList,hList,errList) ), np.float)
return diffList
dataList=np.loadtxt("data.txt")
###fitting
startGuess=[.4,.8]
bestFitValues, cov,info,mesg, ier = leastsq(residuals, startGuess , args=( dataList,),full_output=1)
print bestFitValues,cov
fig=plt.figure()
ax=fig.add_subplot(1,1,1)
ax.plot(dataList[:,0],dataList[:,3],marker='x')
###fitresult
fqList=[q_Int(z1,z2,bestFitValues[0], bestFitValues[1])[0] for z1,z2 in zip(dataList[:,1],dataList[:,2])]
fhList=[h_func(z,q) for z,q in zip(dataList[:,0],fqList)]
ax.plot(dataList[:,0],fhList,marker='+')
plt.show()
giving output
>>[ 0.31703574 0.69572673]
>>[[ 1.38135263e-03 -2.06088258e-04]
>> [ -2.06088258e-04 7.33485166e-05]]
and the graph
Note that for leastsq the covariance matrix is in reduced form and needs to be rescaled.
Unconcsiosely, this question overlap my other question. The correct answer is:
from math import *
import numpy as np
from scipy.integrate import quad
min=l=a=b=chi=None
c=0
z,mo,err=np.genfromtxt('Union2.1_z_dm_err.txt',unpack=True)
def ant(z,o_m): #0.01*o_m is steps of o_m
return 1/sqrt(((1+z)**2*(1+0.01*o_m*z)-z*(2+z)*(1-0.01*o_m)))
for o_m in range(20,40):
c=0
for i in range(len(z)):
q=quad(ant,0,z[i],args=(o_m,))[0] #Integration o to z
h=5*log10((1+z[i])*(299000/70)*q)+25 #function of dL
chi=(mo[i]-h)**2/err[i]**2 #chi^2 test function
c=c+chi
l=o_m
print('chi^2=',c,'Om=',0.01*l,'OD=',1-0.01*l)