Related
I am trying to assign an infinite value to a variable of gekko. I have tried with the numpy's infinite value and python's own infinite but it is still not working due to a problem of recognition of gekko.
The main objective of this idea is to force a variable to be strictly equal to 0, at least in the first iteration of the solver.
from gekko import GEKKO
from numpy import Inf
model=GEKKO()
R=model.FV(value=Inf)
T=model.Array(model.Var,2)
Q=model.FV()
model.Equation(Q==(T[1]-T[0])/R)
model.solve()
And the error I am getting:
Exception: #error: Model Expression
*** Error in syntax of function string: Invalid element: inf
Moreover, sometimes other variables are also required to be infinite, again, variables that are located in the denominator of a model equation. This is quite useful in order to try different scenarios of the simulation I am working with and check the systems behavior.
Hope you can help me, thank you.
The large-scale NLP and MINLP solvers don't know how to compute gradients with a np.nan value so initializing with NaN generally doesn't help. Please post example code that demonstrates the issue that you are observing with improved performance from NaN initialization.
Below are four unconstrained optimization methods compared on the same sample problem. The algorithms do not benefit from NaN for initialization. Some solvers substitute NaN with 0 or a high or low number. I suggest that you try giving np.nan as an initial condition to these solution methods to see how it affects the search for the minimum.
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
# define objective function
def f(x):
x1 = x[0]
x2 = x[1]
obj = x1**2 - 2.0 * x1 * x2 + 4 * x2**2
return obj
# define objective gradient
def dfdx(x):
x1 = x[0]
x2 = x[1]
grad = []
grad.append(2.0 * x1 - 2.0 * x2)
grad.append(-2.0 * x1 + 8.0 * x2)
return grad
# Exact 2nd derivatives (hessian)
H = [[2.0, -2.0],[-2.0, 8.0]]
# Start location
x_start = [-3.0, 2.0]
# Design variables at mesh points
i1 = np.arange(-4.0, 4.0, 0.1)
i2 = np.arange(-4.0, 4.0, 0.1)
x1_mesh, x2_mesh = np.meshgrid(i1, i2)
f_mesh = x1_mesh**2 - 2.0 * x1_mesh * x2_mesh + 4 * x2_mesh**2
# Create a contour plot
plt.figure()
# Specify contour lines
lines = range(2,52,2)
# Plot contours
CS = plt.contour(x1_mesh, x2_mesh, f_mesh,lines)
# Label contours
plt.clabel(CS, inline=1, fontsize=10)
# Add some text to the plot
plt.title(r'$f(x)=x_1^2 - 2x_1x_2 + 4x_2^2$')
plt.xlabel(r'$x_1$')
plt.ylabel(r'$x_2$')
##################################################
# Newton's method
##################################################
xn = np.zeros((2,2))
xn[0] = x_start
# Get gradient at start location (df/dx or grad(f))
gn = dfdx(xn[0])
# Compute search direction and magnitude (dx)
# with dx = -inv(H) * grad
delta_xn = np.empty((1,2))
delta_xn = -np.linalg.solve(H,gn)
xn[1] = xn[0]+delta_xn
plt.plot(xn[:,0],xn[:,1],'k-o')
##################################################
# Steepest descent method
##################################################
# Number of iterations
n = 8
# Use this alpha for every line search
alpha = 0.15
# Initialize xs
xs = np.zeros((n+1,2))
xs[0] = x_start
# Get gradient at start location (df/dx or grad(f))
for i in range(n):
gs = dfdx(xs[i])
# Compute search direction and magnitude (dx)
# with dx = - grad but no line searching
xs[i+1] = xs[i] - np.dot(alpha,dfdx(xs[i]))
plt.plot(xs[:,0],xs[:,1],'g-o')
##################################################
# Conjugate gradient method
##################################################
# Number of iterations
n = 8
# Use this alpha for the first line search
alpha = 0.15
neg = [[-1.0,0.0],[0.0,-1.0]]
# Initialize xc
xc = np.zeros((n+1,2))
xc[0] = x_start
# Initialize delta_gc
delta_cg = np.zeros((n+1,2))
# Initialize gc
gc = np.zeros((n+1,2))
# Get gradient at start location (df/dx or grad(f))
for i in range(n):
gc[i] = dfdx(xc[i])
# Compute search direction and magnitude (dx)
# with dx = - grad but no line searching
if i==0:
beta = 0
delta_cg[i] = - np.dot(alpha,dfdx(xc[i]))
else:
beta = np.dot(gc[i],gc[i]) / np.dot(gc[i-1],gc[i-1])
delta_cg[i] = alpha * np.dot(neg,dfdx(xc[i])) + beta * delta_cg[i-1]
xc[i+1] = xc[i] + delta_cg[i]
plt.plot(xc[:,0],xc[:,1],'y-o')
##################################################
# Quasi-Newton method
##################################################
# Number of iterations
n = 8
# Use this alpha for every line search
alpha = np.linspace(0.1,1.0,n)
# Initialize delta_xq and gamma
delta_xq = np.zeros((2,1))
gamma = np.zeros((2,1))
part1 = np.zeros((2,2))
part2 = np.zeros((2,2))
part3 = np.zeros((2,2))
part4 = np.zeros((2,2))
part5 = np.zeros((2,2))
part6 = np.zeros((2,1))
part7 = np.zeros((1,1))
part8 = np.zeros((2,2))
part9 = np.zeros((2,2))
# Initialize xq
xq = np.zeros((n+1,2))
xq[0] = x_start
# Initialize gradient storage
g = np.zeros((n+1,2))
g[0] = dfdx(xq[0])
# Initialize hessian storage
h = np.zeros((n+1,2,2))
h[0] = [[1, 0.0],[0.0, 1]]
for i in range(n):
# Compute search direction and magnitude (dx)
# with dx = -alpha * inv(h) * grad
delta_xq = -np.dot(alpha[i],np.linalg.solve(h[i],g[i]))
xq[i+1] = xq[i] + delta_xq
# Get gradient update for next step
g[i+1] = dfdx(xq[i+1])
# Get hessian update for next step
gamma = g[i+1]-g[i]
part1 = np.outer(gamma,gamma)
part2 = np.outer(gamma,delta_xq)
part3 = np.dot(np.linalg.pinv(part2),part1)
part4 = np.outer(delta_xq,delta_xq)
part5 = np.dot(h[i],part4)
part6 = np.dot(part5,h[i])
part7 = np.dot(delta_xq,h[i])
part8 = np.dot(part7,delta_xq)
part9 = np.dot(part6,1/part8)
h[i+1] = h[i] + part3 - part9
plt.plot(xq[:,0],xq[:,1],'r-o')
plt.tight_layout()
plt.savefig('contour.png',dpi=600)
plt.show()
More information is available in the design optimization course.
Response to Edit
Thanks for clarifying the question and for including a source code example. While it isn't possible to include Inf as a guess, an equivalent form with an additional variable x may be able to accomplish the desired behavior. This sets the term (T[1]-T[0])/R initially equal to zero at the beginning iteration.
from gekko import GEKKO
from numpy import Inf
model=GEKKO()
R=model.FV(value=1e20)
T=model.Array(model.Var,2)
x=model.Var(value=0)
Q=model.FV()
model.Equations([x==(T[1]-T[0])/R,
Q==x])
model.solve()
I am trying to model a system of coupled ODEs which represent a three-box ocean model of phosphorous concentration (y) in the low-latitude surface ocean (Box 1), high-latitude deep ocean (Box 2), and deep ocean (Box 3). The ODEs are given below:
dy1dt = (Q/V1)*(y3-y1) - y1/tau # dP/dt in Box 1
dy2dt = (Q/V2)*(y1-y2) + (qh/V2)*(y3-y2) - (fh/V2) # dP/dt in Box 2
dy3dt = (Q/V3)*(y2-y3) + (qh/V3)*(y2-y3) + (V1*y1)/(V3*tau) + (fh/V3) # dP/dt in Box 3
The constants and box volumes are given by:
### Define Constants
tau = 86400 # s
VT = 1.37e18 # m3
Q = 25e6 # m3/s
qh = 38e6 # m3/s
fh = 0.0022 # mol/m3
avp = 0.00215 # mol/m3
### Calculate Surface Area of Ocean
r = 6.4e6 # m
earth = 4*np.pi*(r**2) # m2
ocean = .70*earth # m2
### Calculate Volumes for Each Box
V1 = .85*100*ocean # m3
V2 = .15*250*ocean # m3
V3 = VT-V1-V2 # m3
This can be put into matrix form y = Ay + f, where y = [y1, y2, y3]. I have provided the matrices and initial conditions below:
A = np.array([[(-Q/V1)-(1/tau),0,(Q/V1)],
[(Q/V2),(-Q/V2)-(qh/V2),(qh/V2)],
[(V1/(V3*tau)),((Q+qh)/V3),((-Q-qh)/V3)]])
f = np.array([[0],[-fh/V2],[fh/V3]])
y1 = y2 = y3 = 0.00215 # mol/m3
I am having trouble adapting the Forward Euler method to apply to a system on linear ODEs, rather than just one. This is what I have come up with so far (it runs with no issues but doesn't work if that makes sense; I think it has something t do with initial conditions?):
### Define a Function for the Euler-Forward Scheme
import numpy as np
def ForwardEuler(t0,y0,N,dt):
N = 100
dt = 0.1
# Create empty 2D arrays for t and y
t = np.zeros([N+1,3,3]) # steps, # variables, # solutions
y = np.zeros([N+1,3,3])
# Assign each ODE to a vector element
y1 = y[0]
y2 = y[1]
y3 = y[2]
# Set initial conditions for each solution
t[0, 0] = t0[0]
y[0, 0] = y0[0]
t[0, 1] = t0[1]
y[0, 1] = y0[1]
t[0, 2] = t0[2]
y[0, 2] = y0[2]
for i in trange(int(N)):
t[i+1] = t[i] + t[i]*dt
y1[i+1] = y1[i] + ((Q/V1)*(y3[i]-y1[i]) - (y1[i]/tau))*dt
y2[i+1] = y2[i] + ((Q/V2)*(y1[i]-y2[i]) + (qh/V2)*(y3[i]-y2[i]) - (fh/V2))*dt
y3[i+1] = y3[i] + ((Q/V3)*(y2[i]-y3[i]) + (qh/V3)*(y2[i]-y3[i]) + (V1*y1[i])/(V3*tau) + (fh/V3))*dt
return t, y1, y2, y3
Any help on this is greatly appreciated. I have not found any resources online that go through the Euler Forward for a system of 3 ODEs, and am at a loss. I am happy to explain further if there are more questions.
As Lutz Lehmann pointed out, you need to design a simple ODE system. You could define the whole ODE system inside a function as follows:
import numpy as np
def fun(t, RHS):
# get initial boundary condition values
y1 = RHS[0]
y2 = RHS[1]
y3 = RHS[2]
# calculte rate of respective variables
dy1dt = (Q/V1)*(y3-y1) - y1/tau
dy2dt = (Q/V2)*(y1-y2) + (qh/V2)*(y3-y2) - (fh/V2)
dy3dt = (Q/V3)*(y2-y3) + (qh/V3)*(y2-y3) + (V1*y1)/(V3*tau) + (fh/V3)
# Left-hand side of ODE
LHS = np.zeros([3,])
LHS[0] = dy1dt
LHS[1] = dy2dt
LHS[2] = dy3dt
return LHS
In the above function, we get time t as an argument and the initial values of y1, y2, and y3 as a list in the variable RHS, which is then unpacked to get the respective variables. Afterward, the rate equation of each variable is defined. In the end, the calculated rates are returned also as a list in the variable LHS.
Now, we can define a simple Euler Forward method to solve this ODE system as follows:
def ForwardEuler(fun,t0,y0,N,dt):
t = np.arange(t0,N+dt,dt)
y = np.zeros([len(t), len(y0)])
y[0] = y0
for i in range(len(t)-1):
y[i+1,:] = y[i,:] + fun(t[i],y[i,:]) * dt
return t, y
Here, we create a time range from 0 to 100 with a step size of 0.1. Afterward, we create an array of zeros with the shape (len(t), len(y0)) which is in this case (1001,3). We need to do this because we want to solve fun for the range of t (1001) and the RHS variable of fun has a shape of (3,) ([y1, y2, y3]). So for each and every point in t, we will solve for the three variables of RHS, which will be returned as LHS.
In the end, we can solve this ODE system as follows:
dt = 0.1
N = 100
y0 = [0.00215, 0.00215, 0.00215]
t0 = 0
t,y = ForwardEuler(fun,t0,y0,N,dt)
Solution using scipy.integrate
As Lutz Lehmann also pointed out, you can use scipy.integrate for this purpose as well which is far easier. Here you can use the above defined fun and simply solve the ODE as follows:
import numpy as np
from scipy.integrate import odeint
dt = 0.1
N = 100
t = np.linspace(0,N,int(N/dt))
y0 = [0.00215, 0.00215, 0.00215]
res = odeint(fun, y0, t, tfirst=True)
print(res)
I have tried to no avail for a week while trying to solve a system of coupled differential equations and reproduce the results shown in the attached image. I seem to be getting weird results as shown also. I don't seem to know what I might be doing wrong.The set of coupled differential equations were solved using Newman's BAND. Here's a link to the python implementation: python solution using BAND . And another link to the original image of the problem in case the attached is not clear enough: here you find a clearer image of the problem. Now what I am trying to do is to solve the same problem by creating a sparse array directly from the discretized equations using a combination of sympy and numpy and then solving using scipy's spsolve. Here is my code below. I need some help to figure out what I am doing wrong.
I have represented the variables as c1 = cA, c2 = cB, c3 = cC, c4 = cD in my code. Equation 2 has been linearized and phi10 and phi20 are the trial values of the variables cC and cD.
# import modules
import numpy as np
import sympy
from sympy.core.function import _mexpand
import scipy as sp
import scipy.sparse as ss
import scipy.sparse.linalg as ssl
import matplotlib.pyplot as plt
# define functions
def flatten(t):
"""
function to flatten lists
"""
return [item for sublist in t for item in sublist]
def get_coeffs(coeff_dict, func_vars):
"""
function to extract coefficients from variables
and form the sparse symbolic array
"""
c = coeff_dict
for i in list(c.keys()):
b, _ = i.as_base_exp()
if b == i:
continue
if b in c:
c[i] = 0
if any(k.has(b) for k in c):
c[i] = 0
return [coeff_dict[val] for val in func_vars]
# Constants for the problem
I = 0.1 # A/cm2
L = 1.0 # distance (x) in cm
m = 100 # grid spacing
h = L / (m-1)
a = 23300 # 1/cm
io = 2e-7 # A/cm2
n = 1
F = 96500 # C/mol
R = 8.314 # J/mol-K
T = 298 # K
sigma = 20 # S/cm
kappa = 0.06 # S/cm
alpha = 0.5
beta = -(1-alpha)*n*F/R/T
phi10 , phi20 = 5, 0.5 # these are just guesses
P = a*io*np.exp(beta*(phi10-phi20))
j = sympy.symbols('j',integer = True)
cA = sympy.IndexedBase('cA')
cB = sympy.IndexedBase('cB')
cC = sympy.IndexedBase('cC')
cD = sympy.IndexedBase('cD')
# write the boundary conditions at x = 0
bc=[cA[1], cB[1],
(4/3) * cC[2] - (1/3)*cC[3], # use a three point approximation for cC_prime
cD[1]
]
# form a list of expressions from the boundary conditions and equations
expr=flatten([bc,flatten([[
-cA[j-1] - cB[j-1] + cA[j+1] + cB[j+1],
cB[j-1] - 2*h*P*beta*cC[j] + 2*h*P*beta*cD[j] - cB[j+1],
-sigma*cC[j-1] + 2*h*cA[j] + sigma * cC[j+1],
-kappa * cD[j-1] + 2*h * cB[j] + kappa * cD[j+1]] for j in range(2, m)])])
vars = [cA[j], cB[j], cC[j], cD[j]]
# flatten the list of variables
unknowns = flatten([[cA[j], cB[j], cC[j], cD[j]] for j in range(1,m)])
var_len = len(unknowns)
# # # substitute in the boundary conditions at x = L while getting the coefficients
A = sympy.SparseMatrix([get_coeffs(_mexpand(i.subs({cA[m]:I}))\
.as_coefficients_dict(), unknowns) for i in expr])
# convert to a numpy array
mat_temp = np.array(A).astype(np.float64)
# you can view the sparse array with this
fig = plt.figure(figsize=(6,6))
ax = fig.add_axes([0,0, 1,1])
cmap = plt.cm.binary
plt.spy(mat_temp, cmap = cmap, alpha = 0.8)
def solve_sparse(b0, error):
# create the b column vector
b = np.copy(b0)
b[0:4] = np.array([0.0, I, 0.0, 0.0])
b[var_len-4] = I
b[var_len-3] = 0
b[var_len-2] = 0
b[var_len-1] = 0
print(b.shape)
old = np.copy(b0)
mat = np.copy(mat_temp)
b_2 = np.copy(b)
resid = 10
lss = 0
while lss < 100:
mat_2 = np.copy(mat)
for j in range(3, var_len - 3, 4):
# update the forcing term of equation 2
b_2[j+2] = 2*h*(1-beta*old[j+3]+beta*old[j+4])*a*io*np.exp(beta*(old[j+3]-old[j+4]))
# update the sparse array at every iteration for variables cC and cD in equation2
mat_2[j+2, j+3] += 2*h*beta*a*io*np.exp(beta*(old[j+3]-old[j+4]))
mat_2[j+2, j+4] += 2*h*beta*a*io*np.exp(beta*(old[j+3]-old[j+4]))
# form the column sparse matrix
A_s = ss.csc_matrix(mat_2)
new = ssl.spsolve(A_s, b_2).flatten()
resid = np.sum((new - old)**2)/var_len
lss += 1
old = np.copy(new)
return new
val0 = np.array([[0.0, 0.0, 0.0, 0.0] for _ in range(m-1)]).flatten() # form an array of initial values
error = 1e-7
## Run the code
conc = solve_sparse(val0, error).reshape(m-1, len(vars))
conc.shape # gives (99, 4)
# Plot result for cA:
plt.plot(conc[:,0], marker = 'o', linestyle = '')
What happens seems pretty clear now, after having seen that the plotted solution indeed oscillates between the upper and lower values. You are using the central Euler method as discretization, for u'=F(u) this reads as
u[j+1]-u[j-1] = 2*h*F(u[j])
This method is only weakly stable and allows the sub-sequences of odd and even indices to evolve rather independently. As equation this would mean that the solution might approximate the system ue'=F(uo), uo'=F(ue) with independent functions ue, uo that follow the path of the even or odd sub-sequence.
These even and odd parts are only tied together by the treatment of the boundary points, two or three points deep. So to avoid or reduce the oscillation requires a very careful handling of boundary conditions and also the differential equations for the boundary points.
But one can avoid all this unpleasantness by using the trapezoidal method
u[j+1]-u[j] = 0.5*h*(F(u[j+1])+F(u[j]))
This also reduces the band-width of the system matrix.
To properly implement the implied Newton method correctly (linearizing via Taylor and solving the linearized equation is what the Newton-Kantorovich method does) you need to replace F(u[j]) with F(u_old[j])+F'(u_old[j])*(u[j]-u_old[j]). This then gives a linear system of equations in u for the iteration step.
For the trapezoidal method this gives
(I-0.5*h*F'(u_old[j+1]))*u[j+1] - (I+0.5*h*F'(u_old[j]))*u[j]
= 0.5*h*(F(u_old[j+1])-F'(u_old[j+1])*u_old[j+1] + F(u_old[j])-F'(u_old[j])*u_old[j])
In general, the derivatives values and thus the system matrix need not be updated every step, only the function value (else the iteration does not move forward).
I am trying to estimate the parameters of the models jointly using nonlinear least squares, minimizing the sum of squared differences between the actual and model based estimates. However the resulting value is higher than the SSE with my guessed values. Guessed values SSE is 2,951,687, optimized parameters SSE is 4,281,096.
versions: python 3.7.6, numpy 1.19.2, scipy 1.5.2
import numpy as np
import pandas as pd
from scipy.optimize import minimize
###################### importing the excel file ######################
df = pd.read_csv('data2.csv')
###################### Setting up variables and arrays ######################
a = df.loc[:,'C(ADD)'].values #measured added customers
l = df.loc[:,'C(Loss)'].values #measured lost customers
m = df.loc[:,'m'].values #number of months
mkt = df.loc[:,'Marketing Expense'].values #maketing dollars in each month
e = 5596 #end measured value, Calculated from the cac/total marketing spend over the time period
n = len(df) #creates a variable of the length of the dataframe
###################### Defining equations ######################
g0 = np.zeros(n) #guess values
g0[0] = 0.0001
g0[1] = 0.006
g0[2] = 96755.00
g0[3] = 1.7
g0[4] = 0.6
g0[5] = 0.1
g0[6] = 0.006
g0[7] = 1.7
g0[8] = 0.6
def addhat(g): #Add predict values
pNT = g[0]
r = g[1]
alpha = g[2]
c = g[3]
Bm = g[4]
ah = np.empty(len(df)) #an empty array for the add hat values
b = np.empty(n) #an empty array for the B(m,m') values
b[0] = np.exp(np.log(mkt[0])*Bm)
ah[0] = 400000*((1-pNT) * (1 - (alpha/(alpha + b[0]))**r))
for i in range(1, n):
b[i] = b[i-1] + (m[i]**c - m[i-1]**c)*np.exp(np.log(mkt[i])*Bm)
ah[i] = 400000*((1-pNT) * (1 - (alpha/(alpha + (b[i])))**r))
return ah
print('add pred values: ' + str(addhat(g0)))
def rethat(g): #Retention percentage
rr = g[5]
alphar = g[6]
cr = g[7]
Bmr = g[8]
k = np.empty(n) #an empty array for exponent section of the formula
w = np.empty(n) #an empty array for the retention values
#The value of b(t)r when i = 0
k[0] = np.exp(np.log(mkt[0])*Bmr)
w[0] = 1 - (alphar/(alphar + k[0]))**rr
# the value of B(t) for all other values of q
for i in range(1, n):
k[i] = k[i-1] + (m[i]**cr - m[i-1]**cr)*np.exp(np.log(mkt[i])*Bmr)
w[i] = 1 - (alphar/(alphar + (k[i])))**rr
return w
def endpred(g): #predicting the end hat values
eh = np.empty(n) #an empty array for the end hat values
eh[0] = 213
for i in range(1, n):
eh[i] = (eh[i-1] * rethat(g)[i]) + addhat(g)[i]
return eh
endhat = sum(endpred(g0))
def losshat(g):
lh = np.empty(n) #an empty array for the loss hat values
lh[0] = 0
for i in range(1, n):
lh[i] = endpred(g)[i-1] - (endpred(g)[i] - addhat(g)[i])
return lh
###################### Sum of square errors ######################
def objective(g):
sse = sum((addhat(g)-a)**2 + (losshat(g)-l)**2) + (endhat-e)**2
return sse
print("SSE Initial: " + str(objective(g0)))
###################### Constraints ######################
def constraint1(g): #c is greater than 1
return g[3] - 1
def constraint2(g): #cr is greater than 1
return g[7] - 1
def constraint3(g): #pNT is greater than 0
return g[0]
con1 = {'type': 'ineq', 'fun': constraint1}
con2 = {'type': 'ineq', 'fun': constraint2}
con3 = {'type': 'ineq', 'fun': constraint3}
cons = [con1, con2, con3]
###################### Optimize ######################
s = minimize(objective, g0, method='SLSQP', constraints = cons)
g = s.x
print(g)
print("SSE Final: " + str(objective(g)))
The resulting SSE value is 4,281,096.9 with the values being:
3.48133574e+02, 6.84452015e+02, 9.67550032e+04, 2.22008198e+00, -3.28153006e+03, -1.91454144e+02, 2.20947909e+02, 1.70207912e+00, -1.24649708e+01
The initial guess values I have used are quite close to the actual result values (I'm checking my code with a problem I know the result to). The results should be 0.0001001361, 0.006035783, 96,755.64542, 1.78204741, 0.636357403, 0.152, 0.0065432195, 1.73490796, 0.62625507 which have a SSE of 912,278.
Link to the data2.csv.
Thanks again for your help
It looks like you are using the same variable l for 2 different purposes.
It is first initialized with a fixed value from the CSV file, but also used as an internal variable in function rethat. And is also used in the objective function - which means that every time you optimize, you also change the objective function.
That does not look good...
I am new here and new in programming, so excuse me if the question is not formulated clearly enough.
For a uni assignment, my labpartner and I are programming a predator-prey system.
In this predator-prey system, there is a certain load factor 'W0'.
We want to find a load factor W0, accurate to 5 significant digits, for which applies that there will never be less than 250 predators (wnum[1] in our code). We want to find this value of W0 and we need the code to carry on further calculations with this found value of W0. Here is what we've tried so far, but python does not seem to give any response:
# Import important stuff and settings
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
print ('Results of Group 4')
def W0():
W0 = 2.0
while any(wnum[1])<250:
W0 = W0-0.0001
return W0
def W(t):
if 0 <= t < 3/12:
Wt = 0
elif 3/12 <= t <= 8/12:
Wt = W0
elif 8/12 < t < 1:
Wt = 0
else:
Wt = W(t - 1)
return Wt
# Define the right-hand-side function
def rhsf(t,y):
y1 = y[0]
y2 = y[1]
f1 = (2-2*10**-3*y2)*y1-W(t)*y1
f2 = (-3.92+7*10**-3*y1)*y2
return np.array([f1,f2])
# Define one step of the RK4 method
def RK4Step(tn,wn,Dt,f):
# tn = current time
# wn = known approximation at time tn
# Dt = the time step to use
# f = the right-hand-side function to use
# wnplus1 = the new approximation at time tn+Dt
k1 = Dt*f(tn,wn)
k2 = Dt*f(tn+0.5*Dt,wn+0.5*k1)
k3 = Dt*f(tn+0.5*Dt,wn+0.5*k2)
k4 = Dt*f(tn+Dt,wn+k3)
wnplus1 = wn + 1/6*(k1 +2*k2 +2*k3 +k4)
return wnplus1
# Define the complete RK4 method
def RK4Method(t0,tend,Dt,f,y0):
# t0 = initial time of simulation
# tend = final time of simulation
# Dt = the time step to use
# f = the right-hand-side function to use
# y0 = the initial values
# calculate the number of time steps to take
N = int(np.round((tend-t0)/Dt))
# make the list of times t which we want the solution
time = np.linspace(t0,tend,num=N+1)
# make sure Dt matches with the number of time steps
Dt = (tend-t0)/N
# Allocate memory for the approximations
# row i represents all values of variable i at all times
# column j represents all values of all variables at time t_j
w = np.zeros((y0.size,N+1))
# store the (given) initial value
w[:,0] = y0
# Perform all time steps
for n,tn in enumerate(time[:-1]):
w[:,n+1] = RK4Step(tn,w[:,n],Dt,f)
return time, w
# Set all known values and settings
t0 = 0.0
tend = 10.0
y0 = np.array([600.0,1000.0])
Dt = 0.5/(2**7)
# Execute the method
tnum, wnum = RK4Method(t0,tend,Dt,rhsf,y0)
# Make a nice table
alldata = np.concatenate(([tnum],wnum),axis=0).transpose()
table = pd.DataFrame(alldata,columns=['t','y1(t)','y2(t)'])
print('\nA nice table of the simulation:\n')
print(table)
# Make a nice picture
plt.close('all')
plt.figure()
plt.plot(tnum,wnum[0,:],label='$y_1$',marker='o',linestyle='-')
plt.plot(tnum,wnum[1,:],label='$y_2$',marker='o',linestyle='-')
plt.xlabel('$t$')
plt.ylabel('$y(t)$')
plt.title('Simulation')
plt.legend()
# Do an error computation
# Execute the method again with a doubled time step
tnum2, wnum2 = RK4Method(t0,tend,2.0*Dt,rhsf,y0)
# Calculate the global truncation errors at the last simulated time
errors = (wnum[:,-1] - wnum2[:,-1])/(2**4-1)
print('\nThe errors are ',errors[0],' for y1 and ',errors[1],' for y2 at time t=',tnum[-1])