How to simulate coupled PDE in python - python

I'm trying to simulate in time and space the following system of partial differential equations. I'm using python 3 for that.
Here is a link to the set of equations with their boundary conditions
My ideas was to transform all the equations to the discrete form (forward Euler as the simplest starting point) and then run the code.
Forward Euler implies:
Here lin to image i = 0,...,Nx - mesh for n = 0,1,...,Nt
Here what I have (by the means of numpy)
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import numpy as np
#Define exponents for PDE
m = 0
n = 2
#Define constants for PDE
a = 0.2
b= -0.4
av = 5.0
c = 0.6
d = -0.8
Du = 1
Dv = 20
Dz = 1000
u0 = 0.5
v0 = 0.5
kz = 0.001
L = 10
Nx = 100
T = 5
Nt = 100
x = np.linspace(0, L, Nx+1)
dx = x[1] - x[0]
#print(dx)
#print(dt)
t = np.linspace(0, T, Nt+1)
dt = t[1] - t[0]
if dt<=0.5*dx**2:
print("Ok!")
else:
print("Alert! dt is not smaller than dx^2/2")
u = np.zeros(Nx+1)
v = np.zeros(Nx+1)
z = np.zeros(Nx+1)
u_1 = np.zeros(Nx+1)
v_1 = np.zeros(Nx+1)
z_1 = np.zeros(Nx+1)
# mesh points in space
# mesh points in time
# Set initial condition u(x,0) = I(x)
for i in range(0, Nx+1):
u_1[i] = np.random.random_sample()
v_1[i] = np.random.random_sample()
z_1[i] = np.random.random_sample()
for n in range(0, Nt):
# Compute u at inner mesh points
for i in range(1, Nx):
u[i] = u_1[i] + dt*(a*(u_1[i]-u0) +
b*(v_1[i]-v0)+av*(u_1[i]-u0)**3+(Du/dx**2)*(u_1[i-1] -
2*u_1[i] + u_1[i+1]))*z_1[i]**n
v[i] = v_1[i] + dt*(c*(u_1[i]-u0)+d*(v_1[i]-v0)+(Dv/dx**2)*(v_1[i-1] - 2*v_1[i] + v_1[i+1]))*z_1[i]**n
z[i] = (Dz/dx**2)*((z_1[i-1] - 2*z_1[i] + z_1[i+1]) - kz * z[i])
# Insert boundary conditions u[0]=0; u[Nx]=0
u[0]=0; u[Nx]=1/Dz
v[0]=0; v[Nx]=1
z[0]=0; z[Nx]=1
# Update u_1 before next step
u_1[:]= u
v_1[:]= v
z_1[:]= z
My first problem I'm encountering is different warnings:
/miniconda3/lib/python3.6/site-packages/ipykernel_launcher.py:31: RuntimeWarning: overflow encountered in double_scalars
/miniconda3/lib/python3.6/site-packages/ipykernel_launcher.py:31: RuntimeWarning: invalid value encountered in double_scalars
/miniconda3/lib/python3.6/site-packages/ipykernel_launcher.py:32: RuntimeWarning: invalid value encountered in double_scalars
/miniconda3/lib/python3.6/site-packages/ipykernel_launcher.py:32: RuntimeWarning: overflow encountered in double_scalars
/miniconda3/lib/python3.6/site-packages/ipykernel_launcher.py:33: RuntimeWarning: overflow encountered in double_scalars
/miniconda3/lib/python3.6/site-packages/ipykernel_launcher.py:33: RuntimeWarning: invalid value encountered in double_scalars
My main question is: it possible to solve this set with the forward Euler Method Iam trying at the moment ?
Thank you everyone in advance!

The answer is "yes", but your code needs more work. For example, you will need to work on the algorithm's stability (to avoid it blowing up). Also, the BC does not reflect your system; I think you are looking for zero flux conditions; if so, you are not coding it right. Finally, you can also consider using Fipy, which could make your life easier. Please take a look here https://www.ctcms.nist.gov/fipy/ I also wrote a basic example here http://biological-complexity.blogspot.pe/

Related

Fipy error:’’The Factor is exactly singular’’, when applying Neumann boundary conditions

We’re trying to solve a one-dimensional Coupled Continuity-Poisson problem in Fipy. When applying
Dirichlet’s conditions, it gives the correct results, but when we change the boundaries conditions to Neumann’s which is closer to our problem, it gives “The Factor is exactly singular’’ error.
Any help is highly appreciated. The code is as follows (0<x<2.5):
from fipy import *
from fipy import Grid1D, CellVariable, TransientTerm, DiffusionTerm, Viewer
import numpy as np
import math
import matplotlib.pyplot as plt
from matplotlib import cm
from cachetools import cached, TTLCache #caching to increase the speed of python
cache = TTLCache(maxsize=100, ttl=86400) #creating the cache object: the
#first argument= the number of objects we store in the cache.
#____________________________________________________
nx=50
dx=0.05
L=nx*dx
e=math.e
m = Grid1D(nx=nx, dx=dx)
print(np.log(e))
#____________________________________________________
phi = CellVariable(mesh=m, hasOld=True, value=0.)
ne = CellVariable(mesh=m, hasOld=True, value=0.)
phi_face = phi.faceValue
ne_face = ne.faceValue
x = m.cellCenters[0]
t0 = Variable()
phi.setValue((x-1)**3)
ne.setValue(-6*(x-1))
#____________________________________________________
#cached(cache)
def S(x,t):
f=6*(x-1)*e**(-t)+54*((x-1)**2)*e**(-2.*t)
return f
#____________________________________________________
#Boundary Condition:
valueleft_phi=3*e**(-t0)
valueright_phi=6.75*e**(-t0)
valueleft_ne=-6*e**(-t0)
valueright_ne=-6*e**(-t0)
phi.faceGrad.constrain([valueleft_phi], m.facesLeft)
phi.faceGrad.constrain([valueright_phi], m.facesRight)
ne.faceGrad.constrain([valueleft_ne], m.facesLeft)
ne.faceGrad.constrain([valueright_ne], m.facesRight)
#____________________________________________________
eqn0 = DiffusionTerm(1.,var=phi)==ImplicitSourceTerm(-1.,var=ne)
eqn1 = TransientTerm(1.,var=ne) ==
VanLeerConvectionTerm(phi.faceGrad,var=ne)+S(x,t0)
eqn = eqn0 & eqn1
#____________________________________________________
steps = 1.e4
dt=1.e-4
T=dt*steps
F=dt/(dx**2)
print('F=',F)
#____________________________________________________
vi = Viewer(phi)
with open('out2.txt', 'w') as output:
while t0()<T:
print(t0)
phi.updateOld()
ne.updateOld()
res=1.e30
#for sweep in range(steps):
while res > 1.e-4:
res = eqn.sweep(dt=dt)
t0.setValue(t0()+dt)
for m in range(nx):
output.write(str(phi[m])+' ') #+ os.linesep
output.write('\n')
if __name__ == '__main__':
vi.plot()
#____________________________________________________
data = np.loadtxt('out2.txt')
X, T = np.meshgrid(np.linspace(0, L, len(data[0,:])), np.linspace(0, T,
len(data[:,0])))
fig = plt.figure(3)
ax = fig.add_subplot(111,projection='3d')
ax.plot_surface(X, T, Z=data)
plt.show(block=True)
The issue with these equations, particularly eqn0, is that they admit an infinite number of solutions when Neumann boundary conditions are applied on both boundaries. You can fix this by pinning a value somewhere with an internal fixed value. E.g., based on the analytical solution given in the comments, phi = (x-1)**3 * exp(-t), we can pin phi = 0 at x = 1 with
mask = (m.x > 1-dx/2) & (m.x < 1+dx/2)
largeValue = 1e6
value = 0.
#____________________________________________________
eqn0 = (DiffusionTerm(1.,var=phi)==ImplicitSourceTerm(-1.,var=ne)
+ ImplicitSourceTerm(mask * largeValue, var=phi) - mask * largeValue * value)
At this point, the solutions still do not agree with the expected solutions. This is because, while you have called ne.faceGrad.constrain() for the left and right boundaries, does not appear in the discretized equations. You can see this if you plot ne; the gradient is zero at both boundaries despite the constraint because FiPy never "sees" the constraint.
What does appear is the flux . By applying fixed flux boundary conditions, I obtain the expected solutions:
ne_left = 6 * numerix.exp(-t0)
ne_right = -9 * numerix.exp(-t0)
eqn1 = (TransientTerm(1.,var=ne)
== VanLeerConvectionTerm(phi.faceGrad * m.interiorFaces,var=ne)
+ S(x,t0)
+ (m.facesLeft * ne_left * phi.faceGrad).divergence
+ (m.facesRight * ne_right * phi.faceGrad).divergence)
You can probably get better convergence properties with
eqn1 = (TransientTerm(1.,var=ne)
== DiffusionTerm(coeff=ne.faceValue * m.interiorFaces, var=phi)
+ S(x,t0)
+ (m.facesLeft * ne_left * phi.faceGrad).divergence
+ (m.facesRight * ne_right * phi.faceGrad).divergence)
but either formulation seems to work.
Note: phi.faceGrad.constrain() is fine, because the flux does appear in DiffusionTerm(coeff=1., var=phi).
Separately, it appears (based on "The Factor is exactly singular") that you are solving with the SciPy LinearLUSolver. The PETSc LinearLUSolver does better, but the baseline value of the solution wanders all over the place. Calling
res = eqn.sweep(dt=dt, solver=LinearGMRESSolver())
also seems to produce stable results (without pinning an internal value). This behavior probably shouldn't be relied on; pinning a value is the right thing to do.

How to simulate a reaction with an order < 1 in pyomo?

I am simulating a chemical reaction of the form A --> B --> C using a chemical batch reactor model. The corresponding ODE is a follows:
dcA/dt = - kA * cA(t) ** nA1
dcB/dt = kA * cA(t) ** nA1 - kB * cB(t) **nB2
dcC/dt = - kB * cB(t) ** nB2
Pyomo solves the ODE system fine if the exponents nA1 and nB2 are 1 or higher. But in my case they below 1 and as the components concentrations approach zero the ode integration fails, giving out only nans. The reason is that once the concentrations approach zero they numerically become values of cA(t) = -10e-20 for example and then the expression cA(t)**nA1 is not solvable any more.
I tried to implement a workaround of the form:
if cA < 0:
R1 = 0
else:
R1 = kA * cA(t) ** nA1
but I wasn't able to do it properly as I had a hard time using the pyomo synthax.
This is the minimal working example:
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from pyomo.environ import *
from pyomo.dae import *
V = 40 # l
kA = 0.5 # 1/min
kB = 0.1 # 1/min
nA1 = 0.5
nB2 = 0.5
cAf = 2.0 # mol/l
def batch_plot(t, y):
plt.plot(t, y[:, 0], label = "cA")
plt.plot(t, y[:, 1], label = "cB")
plt.plot(t, y[:, 2], label = "cC")
plt.legend()
def batch():
m = ConcreteModel()
m.t = ContinuousSet(bounds = (0, 500))
m.cA = Var(m.t, domain = NonNegativeReals)
m.cB = Var(m.t, domain = NonNegativeReals)
m.cC = Var(m.t, domain = NonNegativeReals)
m.dcA = DerivativeVar(m.cA, wrt = m.t)
m.dcB = DerivativeVar(m.cB, wrt = m.t)
m.dcC = DerivativeVar(m.cC, wrt = m.t)
m.cA[0] = cAf
m.cB[0] = 0
m.cC[0] = 0
R1 = lambda m, t: kA * m.cA[t] ** nA1
R2 = lambda m, t: kB * m.cB[t] ** nB2
m.odeA = Constraint(m.t, rule = lambda m, t: m.dcA[t] == - R1(m, t) )
m.odeB = Constraint(m.t,
rule = lambda m, t: m.dcB[t] == R1(m, t) - R2(m, t) )
m.odeC = Constraint(m.t,
rule = lambda m, t: m.dcC[t] == R2(m, t) )
return m
tsim, profiles = Simulator(batch(), package = "scipy").simulate(numpoints = 100)
batch_plot(tsim, profiles)
I expect the ode integration to work even with reaction orders below 1.
Does anybody have an idea on how to achieve this?
There are two aims in modifying the power function x^n:
extend to negative x in a smooth way so that the numerical method does not hiccup close to x=0 and
have a small slope for small x so that the numerical integration for very small x has a greater chance to be stable.
The first condition is satisfied by constructs like
x*max(eps,abs(x))^(n-1) or
x*(eps+abs(x-eps))^(n-1),
x*(eps^2+abs(x-eps)^2)^(0.5*(n-1)),
which all have the exact same value x^n for x>eps and are continuous and piecewise smooth. But the slope at x=0 is of the size eps^(n-1) which will require very small step sizes even after the system stabilizes.
The solution is to extract even more integer power from the rational power in the form of
x*abs(x) * max(eps,abs(x))^(n-2)
or one of the other variants for the last factor. For 0<x<eps and n=0.5 this results in the value r(x)=x^2 * eps^(-1.5), so that the equation x'=-k*r(x) has the solution x(t)=x1/(1+x1*k*eps^(-1.5)*(t-t1)) after it fell to a point 0<x1<eps at t=t1. The slope of r is smaller 2, which is nice for numerical integrators.
This was implemented for scipy.integrate.solve_ivp, using method LSODA and rather strict tolerances, with the ODE right side function
# your original function, stabilizes at negative values
power0 = lambda x,n: max(0,x) ** n;
# linear at x=0, small step sizes
def power1(x,n): eps=1e-4; return x * max(eps, abs(x)) ** (n-1);
def power2(x,n): eps=1e-4; return x * (eps**2+(x-eps)**2) ** (0.5*(n-1))
# quadratic at x=0, large step sizes on the tail
eps = 1e-8
power3 = lambda x,n: x * abs(x) * max(eps,abs(x)) ** (n-2)
power4 = lambda x,n: x * abs(x) * (eps**2+(x-eps)**2) ** (0.5*n-1)
# select the power approximation used
power = power3
def model(t,u):
cA, cB, Cc = u;
R1 = kA * power(cA, nA1)
R2 = kB * power(cB, nB2)
return [ -R1, R1-R2, R2 ]
The integration runs successfully, using step sizes 20-30 in the tail end. The resulting plot looks qualitatively correct,
and in the zoom for small values is smooth and remains positive.

fmin_slsqp returns initial guess finding the minimum of cubic spline

I am trying to find the minimum of a natural cubic spline. I have written the following code to find the natural cubic spline. (I have been given test data and have confirmed this method is correct.) Now I can not figure out how to find the minimum of this function.
This is the data
xdata = np.linspace(0.25, 2, 8)
ydata = 10**(-12) * np.array([1,2,1,2,3,1,1,2])
This is the function
import scipy as sp
import numpy as np
import math
from numpy.linalg import inv
from scipy.optimize import fmin_slsqp
from scipy.optimize import minimize, rosen, rosen_der
def phi(x, xd,yd):
n = len(xd)
h = np.array(xd[1:n] - xd[0:n-1])
f = np.divide(yd[1:n] - yd[0:(n-1)],h)
q = [0]*(n-2)
for i in range(n-2):
q[i] = 3*(f[i+1] - f[i])
A = np.zeros(((n-2),(n-2)))
#define A for j=0
A[0,0] = 2*(h[0] + h[1])
A[0,1] = h[1]
#define A for j = n-2
A[-1,-2] = h[-2]
A[-1,-1] = 2*(h[-2] + h[-1])
#define A for in the middle
for j in range(1,(n-3)):
A[j,j-1] = h[j]
A[j,j] = 2*(h[j] + h[j+1])
A[j,j+1] = h[j+1]
Ainv = inv(A)
B = Ainv.dot(q)
b = (n)*[0]
b[1:(n-1)] = B
# now we find a, b, c and d
a = [0]*(n-1)
c = [0]*(n-1)
d = [0]*(n-1)
s = [0]*(n-1)
for r in range(n-1):
a[r] = 1/(3*h[r]) * (b[r + 1] - b[r])
c[r] = f[r] - h[r]*((2*b[r] + b[r+1])/3)
d[r] = yd[r]
#solution 1 start
for m in range(n-1):
if xd[m] <= x <= xd[m+1]:
s = a[m]*(x - xd[m])**3 + b[m]*(x-xd[m])**2 + c[m]*(x-xd[m]) + d[m]
return(s)
#solution 1 end
I want to find the minimum on the domain of my xdata, so a fmin didn't work as you can not define bounds there. I tried both fmin_slsqp and minimize. They are not compatible with the phi function I wrote so I rewrote phi(x, xd,yd) and added an extra variable such that phi is phi(x, xd,yd, m). M indicates in which subfunction of the spline we are calculating a solution (from x_m to x_m+1). In the code we replaced #solution 1 by the following
# solution 2 start
return(a[m]*(x - xd[m])**3 + b[m]*(x-xd[m])**2 + c[m]*(x-xd[m]) + d[m])
# solution 2 end
To find the minimum in a domain x_m to x_(m+1) we use the following code: (we use an instance where m=0, so x from 0.25 to 0.5. The initial guess is 0.3)
fmin_slsqp(phi, x0 = 0.3, bounds=([(0.25,0.5)]), args=(xdata, ydata, 0))
What I would then do (I know it's crude), is iterate this with a for loop to find the minimum on all subdomains and then take the overall minimum. However, the function fmin_slsqp constantly returns the initial guess as the minimum. So there is something wrong, which I do not know how to fix. If you could help me this would be greatly appreciated. Thanks for reading this far.
When I plot your function phi and the data you feed in, I see that its range is of the order of 1e-12. However, fmin_slsqp is unable to handle that level of precision and fails to find any change in your objective.
The solution I propose is scaling the return of your objective by the same order of precision like so:
return(s*1e12)
Then you get good results.
>>> sol = fmin_slsqp(phi, x0=0.3, bounds=([(0.25, 0.5)]), args=(xdata, ydata))
>>> print(sol)
Optimization terminated successfully. (Exit mode 0)
Current function value: 1.0
Iterations: 2
Function evaluations: 6
Gradient evaluations: 2
[ 0.25]

How to solve a 9-equations system of non linear DE with python?

I'm desperately trying to solve (and display the graph) a system made of nine nonlinear differential equations which model the path of a boomerang. The system is the following:
All the letters on the left side are variables, the others are either constants or known functions depending on v_G and w_z
I have tried with scipy.odeint with no conclusive results (I had this issue but the workaround did not work.)
I begin to think that the problem is linked with the fact that these equations are nonlinear or that the function in denominator might cause a singularity that the scipy solver is simply unable to handle. However, I am not familiar with that sort of mathematical knowledge.
What possibilities python-wise do I have to solve this set of equations?
EDIT : Sorry if I was not clear enough. Since it models the path of a boomerang, my goal is not to solve analytically this system (ie I don't care about the mathematical expression of each function), but rather to get the values of each function for a specific time range (say, from t1 = 0s to t2 = 15s with an interval of 0.01s between each value) in order to display the graph of each function and the graph of the center of mass of the boomerang (X,Y,Z are its coordinates).
Here is the code I tried :
import scipy.integrate as spi
import numpy as np
#Constants
I3 = 10**-3
lamb = 1
L = 5*10**-1
mu = I3
m = 0.1
Cz = 0.5
rho = 1.2
S = 0.03*0.4
Kz = 1/2*rho*S*Cz
g = 9.81
#Initial conditions
omega0 = 20*np.pi
V0 = 25
Psi0 = 0
theta0 = np.pi/2
phi0 = 0
psi0 = -np.pi/9
X0 = 0
Y0 = 0
Z0 = 1.8
INPUT = (omega0, V0, Psi0, theta0, phi0, psi0, X0, Y0, Z0) #initial conditions
def diff_eqs(t, INP):
'''The main set of equations'''
Y=np.zeros((9))
Y[0] = (1/I3) * (Kz*L*(INP[1]**2+(L*INP[0])**2))
Y[1] = -(lamb/m)*INP[1]
Y[2] = -(1/(m * INP[1])) * ( Kz*L*(INP[1]**2+(L*INP[0])**2) + m*g) + (mu/I3)/INP[0]
Y[3] = (1/(I3*INP[0]))*(-mu*INP[0]*np.sin(INP[6]))
Y[4] = (1/(I3*INP[0]*np.sin(INP[3]))) * (mu*INP[0]*np.cos(INP[5]))
Y[5] = -np.cos(INP[3])*Y[4]
Y[6] = INP[1]*(-np.cos(INP[5])*np.cos(INP[4]) + np.sin(INP[5])*np.sin(INP[4])*np.cos(INP[3]))
Y[7] = INP[1]*(-np.cos(INP[5])*np.sin(INP[4]) - np.sin(INP[5])*np.cos(INP[4])*np.cos(INP[3]))
Y[8] = INP[1]*(-np.sin(INP[5])*np.sin(INP[3]))
return Y # For odeint
t_start = 0.0
t_end = 20
t_step = 0.01
t_range = np.arange(t_start, t_end, t_step)
RES = spi.odeint(diff_eqs, INPUT, t_range)
However, I keep getting the same problem as shown here and especially the error message :
Excess work done on this call (perhaps wrong Dfun type)
I am not quite sure what it means but it looks like the solver have troubles solving the system. In any case, when I try to display the 3D path thanks to the XYZ coordinates, I just get 3 or 4 points where there should be something like 2000.
So my questions are : - Am I doing something wrong in my code ?
- If not, is there an other maybe more sophisticated tool to solve this sytem ?
- If not, is it even possible to get what I want from this system of ODEs ?
Thanks in advance
There are several issues:
if I copy the code, it does not run
the workaround you mention does not work with odeint, the given
solution uses ode
The scipy reference for odeint says:"For new code, use
scipy.integrate.solve_ivp to solve a differential equation."
the call RES = spi.odeint(diff_eqs, INPUT, t_range) should be
consistent to the function head def diff_eqs(t, INP) . Mind the
order: RES = spi.odeint(diff_eqs,t_range, INPUT)
There are some issues about to mathematical formulas too:
have a look at the 3rd formula on your picture. It has no tendency term, it starts with a zero - what does that mean ?
it's hard to check wether you have translated the formula correctly into code since the code does not follow the formulas strictly.
Below I tried a solution with scipy solve_ivp. In case A I'm able to run a pendulum, but in case B no meaningful solution for the boomerang can be found. So check the maths, I guess some error in the mathematical expressions.
For the graphics use pandas to plot all variables together (see code below).
import scipy.integrate as spi
import numpy as np
import pandas as pd
def diff_eqs_boomerang(t,Y):
INP = Y
dY = np.zeros((9))
dY[0] = (1/I3) * (Kz*L*(INP[1]**2+(L*INP[0])**2))
dY[1] = -(lamb/m)*INP[1]
dY[2] = -(1/(m * INP[1])) * ( Kz*L*(INP[1]**2+(L*INP[0])**2) + m*g) + (mu/I3)/INP[0]
dY[3] = (1/(I3*INP[0]))*(-mu*INP[0]*np.sin(INP[6]))
dY[4] = (1/(I3*INP[0]*np.sin(INP[3]))) * (mu*INP[0]*np.cos(INP[5]))
dY[5] = -np.cos(INP[3])*INP[4]
dY[6] = INP[1]*(-np.cos(INP[5])*np.cos(INP[4]) + np.sin(INP[5])*np.sin(INP[4])*np.cos(INP[3]))
dY[7] = INP[1]*(-np.cos(INP[5])*np.sin(INP[4]) - np.sin(INP[5])*np.cos(INP[4])*np.cos(INP[3]))
dY[8] = INP[1]*(-np.sin(INP[5])*np.sin(INP[3]))
return dY
def diff_eqs_pendulum(t,Y):
dY = np.zeros((3))
dY[0] = Y[1]
dY[1] = -Y[0]
dY[2] = Y[0]*Y[1]
return dY
t_start, t_end = 0.0, 12.0
case = 'A'
if case == 'A': # pendulum
Y = np.array([0.1, 1.0, 0.0]);
Yres = spi.solve_ivp(diff_eqs_pendulum, [t_start, t_end], Y, method='RK45', max_step=0.01)
if case == 'B': # boomerang
Y = np.array([omega0, V0, Psi0, theta0, phi0, psi0, X0, Y0, Z0])
print('Y initial:'); print(Y); print()
Yres = spi.solve_ivp(diff_eqs_boomerang, [t_start, t_end], Y, method='RK45', max_step=0.01)
#---- graphics ---------------------
yy = pd.DataFrame(Yres.y).T
tt = np.linspace(t_start,t_end,yy.shape[0])
with plt.style.context('fivethirtyeight'):
plt.figure(1, figsize=(20,5))
plt.plot(tt,yy,lw=8, alpha=0.5);
plt.grid(axis='y')
for j in range(3):
plt.fill_between(tt,yy[j],0, alpha=0.2, label='y['+str(j)+']')
plt.legend(prop={'size':20})

Solving heat equation with python (NumPy)

I solve the heat equation for a metal rod as one end is kept at 100 °C and the other at 0 °C as
import numpy as np
import matplotlib.pyplot as plt
dt = 0.0005
dy = 0.0005
k = 10**(-4)
y_max = 0.04
t_max = 1
T0 = 100
def FTCS(dt,dy,t_max,y_max,k,T0):
s = k*dt/dy**2
y = np.arange(0,y_max+dy,dy)
t = np.arange(0,t_max+dt,dt)
r = len(t)
c = len(y)
T = np.zeros([r,c])
T[:,0] = T0
for n in range(0,r-1):
for j in range(1,c-1):
T[n+1,j] = T[n,j] + s*(T[n,j-1] - 2*T[n,j] + T[n,j+1])
return y,T,r,s
y,T,r,s = FTCS(dt,dy,t_max,y_max,k,T0)
plot_times = np.arange(0.01,1.0,0.01)
for t in plot_times:
plt.plot(y,T[t/dt,:])
If changing the Neumann boundary condition as one end is insulated (not flux),
then, how the calculating term
T[n+1,j] = T[n,j] + s*(T[n,j-1] - 2*T[n,j] + T[n,j+1])
should be modified?
A typical approach to Neumann boundary condition is to imagine a "ghost point" one step beyond the domain, and calculate the value for it using the boundary condition; then proceed normally (using the PDE) for the points that are inside the grid, including the Neumann boundary.
The ghost point allows us to use the symmetric finite difference approximation to the derivative at the boundary, that is (T[n, j+1] - T[n, j-1]) / (2*dy) if y is the space variable. Non-symmetric approximation (T[n, j] - T[n, j-1]) / dy, which does not involve a ghost point, is much less accurate: the error it introduces is an order of magnitude worse than the error involved in the discretization of the PDE itself.
So, when j is the maximal possible index for T, the boundary condition says that "T[n, j+1]" should be understood as T[n, j-1] and this is what is done below.
for j in range(1, c-1):
T[n+1,j] = T[n,j] + s*(T[n,j-1] - 2*T[n,j] + T[n,j+1]) # as before
j = c-1
T[n+1, j] = T[n,j] + s*(T[n,j-1] - 2*T[n,j] + T[n,j-1]) # note the last term here

Categories