Related
I am trying to approximate the inverse of the Fourier transform in two dimensions. For the inverse Fourier transform, I use the definition,
$$
F^{-1}[\phi](x, y) = \dfrac{1}{4\pi} \int_{-\infty}^{\infty}\int_{-\infty}^{\infty} e^{-i (ux + wy} \phi(u, w) \text{d}u \text{d}w,
$$
for the inverse of the Fourier transform. The integral will be approximated by use of the trapezoidal integration rule. The grid structure is chosen as $x_j = x_0 + j\Delta_x$, $y_k = y_0 + k\Delta_y$, $u_n = \left(n - \frac{N}{2}\right) \Delta_u$ and $w_m = \left(m - \frac{N}{2}\right) \Delta_u$,
for $j, k, n, m, N \in \mathbb{N}$. By choosing the relations $\Delta_u \Delta_x = \dfrac{2\pi}{N}$ and $\Delta_w \Delta_y = \dfrac{2\pi}{N}$, the approximation of the two dimensional Fourier transform can be written in terms of the fast Fourier transform (FFT).
I implemented the results in Python, and tried to recover the bivariate normal density for different parameters $\sigma$ and $\mu$. As the normal density decays to zero, the grid can be constructed in terms of the mean and the variance.
Below, I have included the approximation in code:
from matplotlib import pyplot as plt
import matplotlib.patches as mpatches
import numpy as np
from scipy.fft import fft, fft2
from scipy.stats import multivariate_normal
i = complex(0.0, 1.0)
def chF(uu, ww, mu, sigma):
return np.exp(
i * (uu * mu[0] + ww * mu[1])
- 0.5 * (
uu**2 * sigma[0,0]
+ ww**2 * sigma[1,1]
+ uu * ww * (sigma[0,1] + sigma[1,0])
)
)
def pdf(xx, yy, mu, sigma):
norm = multivariate_normal(mu, sigma)
return norm.pdf(np.dstack([xx, yy]))
def powerSequence(a, N, offset=0):
# Returns a sequence of a ^ (n + offset) for n in N
return a ** (np.arange(N) + offset)
def powerSequence2d(a, N1, N2, offset=0):
# Returns a sequence of a ^ (n + j + offset) for n,j in N
return a ** (np.array([
j + np.arange(N2) for j in range(0, N1)]) + offset)
def createSimple1dGrid(c1, c2, c4=0, N:int=2**9, L:float=10):
x0 = c1 - L * np.sqrt(c2)
dx = 2 * L * np.sqrt(c2 + np.sqrt(c4)) / N
du = 2 * np.pi / (N * dx)
x = x0 + np.arange(N) * dx
u = (np.arange(N) - N / 2) * du
return x, u, du
def createSimpleGrid(c11, c21, c12, c22, c14=0, c24=0,
N:int=2**9, L1:float=10, L2:float=10):
x, u, du = createSimple1dGrid(c11, c12, c14, N=N, L=L1)
y, w , dw = createSimple1dGrid(c21, c22, c24, N=N, L=L2)
return x, y, u, w, du, dw
def inverseTransform(x, y, u, w, du, dw, phinn, N):
# Compute edges of the trapezoidal integration rule for 2-dimensional integrals.
e1 = np.exp(-i * (u[0] * x + w[0] * y)) * phinn[0, 0]
e2 = np.exp(-i * (u[0] * x + w[N-1] * y)) * phinn[0, N-1]
e3 = np.exp(-i * (u[N-1] * x + w[0] * y)) * phinn[N-1, 0]
e4 = np.exp(-i * (u[N-1] * x + w[N-1] * y)) * phinn[N-1, N-1]
e = 0.25 * (e1 + e2 + e3 + e4)
# Compute boundaries of the trapezoidal integration rule
# for 2-dimensional integrals. These can be written in
# one-dimensional Fourier transforms.
b1 = np.exp(-i * w[0] * y) * powerSequence(-1, N)\
* fft(phinn[:, 0] * np.exp(-i * x[0] * u))
b2 = np.exp(-i * w[N-1] * y) * powerSequence(-1, N)\
* fft(phinn[:, N-1] * np.exp(-i * x[0] * u))
b3 = np.exp(-i * u[0] * x) * powerSequence(-1, N)\
* fft(phinn[0, :] * np.exp(-i * y[0] * w))
b4 = np.exp(-i * u[N-1] * x) * powerSequence(-1, N)\
* fft(phinn[N-1, :] * np.exp(-i * y[0] * w))
b = 0.5 * (b1 + b2 + b3 + b4)
# Compute IFFT-2d
func = phinn * np.exp(-i *(x[0] * u + y[0] * w))
invFour2d = powerSequence2d(-1, N, N)\
* fft2(func)
invTransform = 1 / (4 * np.pi**2) * du * dw * (invFour2d - b + e)
return invTransform
def test_recover_normal_density_off_2d_non_centred():
# Initialize parameters of the two-dimensional normal density
s1, s2 = 4.0, 10.0
mu, sigma = np.array([10, 10]), np.array([
[s1, 0.0],
[0.0, s2]
])
# Create the Fourier grid
N, L = 2**9, 10
x, y, u, w, du, dw = createSimpleGrid(mu[0], mu[1], s1, s2, N=N, L1=L, L2=L)
uu, ww = np.meshgrid(u, w, indexing="ij")
xx, yy = np.meshgrid(x, y, indexing="ij")
phi = chF(uu, ww, mu, sigma)
expected = pdf(xx, yy, mu,sigma)
# Compute the inverse transform
result = inverseTransform(x, y, u, w, du, dw, phi, N)
# Plot results
_, ax = plt.subplots(subplot_kw={"projection": "3d"})
ax.plot_surface(xx, yy, result)
ax.plot_wireframe(xx, yy, expected, color="r", rstride=10, cstride=10)
# manually make legend for plot
col1_patch = mpatches.Patch(color="b", label="approximation")
col2_patch = mpatches.Patch(color="r", label="expected")
legends = [col1_patch, col2_patch]
ax.legend(handles=legends)
plt.show()
The resulting plot is given as:
bivariate normal approximation
The approximation is correct, however, misplaced on the grid. I thought this had something to do with the way the fft algorithm places the origin. However, trying different shifts from scipy.fft import fftshift, ifftshift only worsen the results.
Using different values for $\mu$ yielded results that were closer to the approximation or sometimes even further off.
How do I get the approximation result to coincide with the expected plot of the bivariate normal?
I have a code which performs optimization to infer a parameter:
import numpy as np
from scipy.integrate import odeint
import matplotlib.pyplot as plt
from scipy.optimize import root
from scipy.optimize import minimize
import pandas as pd
d = {'Week': [1, 2,3,4,5,6,7,8,9,10,11], 'incidence': [206.1705794,2813.420201,11827.9453,30497.58655,10757.66954,7071.878779,3046.752723,1314.222882,765.9763902,201.3800578,109.8982006]}
df = pd.DataFrame(data=d)
def peak_infections(beta, df):
# Weeks for which the ODE system will be solved
weeks = df.Week.to_numpy()
# Total population, N.
N = 100000
# Initial number of infected and recovered individuals, I0 and R0.
I0, R0 = 10, 0
# Everyone else, S0, is susceptible to infection initially.
S0 = N - I0 - R0
J0 = I0
# Contact rate, beta, and mean recovery rate, gamma, (in 1/days).
#reproductive no. R zero is beta/gamma
gamma = 1/7 * 7 #rate should be in weeks now
# A grid of time points
t = np.linspace(0, weeks[-1], weeks[-1] + 1)
# The SIR model differential equations.
def deriv(y, t, N, beta, gamma):
S, I, R, J = y
dS = ((-beta * S * I) / N)
dI = ((beta * S * I) / N) - (gamma * I)
dR = (gamma * I)
dJ = ((beta * S * I) / N)
return dS, dI, dR, dJ
# Initial conditions are S0, I0, R0
# Integrate the SIR equations over the time grid, t.
solve = odeint(deriv, (S0, I0, R0, J0), t, args=(N, beta, gamma))
S, I, R, J = solve.T
return I/N
def residual(x, df):
# Total population, N.
N = 100000
incidence = df.incidence.to_numpy()/N
return np.sum((peak_infections(x, df)[1:] - incidence) ** 2)
x0 = 0.5
res = minimize(residual, x0, args=(df), method="Nelder-Mead").x
print(res)
However, it is not giving the correct values, so instead of taking weeks as 1,2,3... in the line d = {'Week': [1, 2,3,4,5,6,7,8,9,10,11], 'incidence': [206.1705794,2813.420201,11827.9453,30497.58655,10757.66954,7071.878779,3046.752723,1314.222882,765.9763902,201.3800578,109.8982006]} I'd like to use days instead so Python has clearer information to work with. I'd like to slice the linspace of days as weekly intervals. However, I'm having some shape alignment issues:
import numpy as np
from scipy.integrate import odeint
import matplotlib.pyplot as plt
from scipy.optimize import root
from scipy.optimize import minimize
import pandas as pd
time = np.linspace(0, 77, 77 + 1)
d = {'Week': [time[7],time[14],time[21],time[28],time[35],time[42],time[49],time[56],time[63],time[70],time[77]], 'incidence': [206.1705794,2813.420201,11827.9453,30497.58655,10757.66954,7071.878779,3046.752723,1314.222882,765.9763902,201.3800578,109.8982006]}
#d = {'Week': [1, 2,3,4,5,6,7,8,9,10,11], 'incidence': [206.1705794,2813.420201,11827.9453,30497.58655,10757.66954,7071.878779,3046.752723,1314.222882,765.9763902,201.3800578,109.8982006]}
df = pd.DataFrame(data=d)
def peak_infections(beta, df):
# Weeks for which the ODE system will be solved
weeks = df.Week.to_numpy()
# Total population, N.
N = 100000
# Initial number of infected and recovered individuals, I0 and R0.
I0, R0 = 10, 0
# Everyone else, S0, is susceptible to infection initially.
S0 = N - I0 - R0
J0 = I0
# Contact rate, beta, and mean recovery rate, gamma, (in 1/days).
#reproductive no. R zero is beta/gamma
gamma = 1/7 * 7 #rate should be in weeks now
# A grid of time points
t = np.linspace(0, 77, 77 + 1)
# The SIR model differential equations.
def deriv(y, t, N, beta, gamma):
S, I, R, J = y
dS = ((-beta * S * I) / N)
dI = ((beta * S * I) / N) - (gamma * I)
dR = (gamma * I)
dJ = ((beta * S * I) / N)
return dS, dI, dR, dJ
# Initial conditions are S0, I0, R0
# Integrate the SIR equations over the time grid, t.
solve = odeint(deriv, (S0, I0, R0, J0), t, args=(N, beta, gamma))
S, I, R, J = solve.T
return I/N
def residual(x, df):
# Total population, N.
N = 100000
incidence = df.incidence.to_numpy()/N
return np.sum((peak_infections(x, df)[1:] - incidence) ** 2)
x0 = 0.5
res = minimize(residual, x0, args=(df), method="Nelder-Mead").x
print(res)
The approach I tried here was recreating the dataframe by slicing time which is 77 days, so 11 weeks. It still returns that the shape error, 77 against 11 elements occurs within my function residual in the line return np.sum((peak_infections(x, df)[1:] - incidence) ** 2). Where is my approach going wrong?
-----------EDIT----------
updated code
import numpy as np
from scipy.integrate import odeint
import matplotlib.pyplot as plt
from scipy.optimize import minimize
import pandas as pd
t = np.arange(7,84,7)
d = {'Week': t, 'incidence': [206.1705794,2813.420201,11827.9453,30497.58655,10757.66954,7071.878779,3046.752723,1314.222882,765.9763902,201.3800578,109.8982006]}
#d = {'Week': [time[7],time[14],time[21],time[28],time[35],time[42],time[49],time[56],time[63],time[70],time[77]], 'incidence': [206.1705794,2813.420201,11827.9453,30497.58655,10757.66954,7071.878779,3046.752723,1314.222882,765.9763902,201.3800578,109.8982006]}
#d = {'Week': [1, 2,3,4,5,6,7,8,9,10,11], 'incidence': [206.1705794,2813.420201,11827.9453,30497.58655,10757.66954,7071.878779,3046.752723,1314.222882,765.9763902,201.3800578,109.8982006]}
df = pd.DataFrame(data=d)
def peak_infections(beta, df):
# Weeks for which the ODE system will be solved
weeks = df.Week.to_numpy()
# Total population, N.
N = 100000
# Initial number of infected and recovered individuals, I0 and R0.
I0, R0 = 10, 0
# Everyone else, S0, is susceptible to infection initially.
S0 = N - I0 - R0
J0 = I0
# Contact rate, beta, and mean recovery rate, gamma, (in 1/days).
#reproductive no. R zero is beta/gamma
gamma = 1/7 * 7 #rate should be in weeks now
# A grid of time points
t = np.linspace(0, 77, 77 + 1)
# The SIR model differential equations.
def deriv(y, t, N, beta, gamma):
S, I, R, J = y
dS = ((-beta * S * I) / N)
dI = ((beta * S * I) / N) - (gamma * I)
dR = (gamma * I)
dJ = ((beta * S * I) / N)
return dS, dI, dR, dJ
# Initial conditions are S0, I0, R0
# Integrate the SIR equations over the time grid, t.
solve = odeint(deriv, (S0, I0, R0, J0), t, args=(N, beta, gamma))
S, I, R, J = solve.T
return I/N
def residual(x, df):
# Total population, N.
N = 100000
incidence = df.incidence.to_numpy()/N
return np.sum((peak_infections(x, df) - incidence) ** 2)
x0 = 0.5
res = minimize(residual, x0, args=(df), method="Nelder-Mead").x
print(res)
Your problem occurs at line 52, where you are getting 77 values by solving peak_infections(x, df)[1:] and you have 11 values of incidence, as you have mentioned.
This arises because you are solving your ode at t (line 29) which has 78 values. To avoid this, generate a time vector with 7 values in your peak_infections function as follows:
t = np.linspace(0, 77, 77 + 1)
t = [t[7],t[14],t[21],t[28],t[35],t[42],t[49],t[56],t[63],t[70],t[77]]
or a completely new one as:
t = np.arange(7,84,7)
and change your residual function (don't slice peak_infections(x, df)[1:]) as follows:
def residual(x, df):
# Total population, N.
N = 100000
incidence = df.incidence.to_numpy()/N
return np.sum((peak_infections(x, df) - incidence) ** 2)
this will solve your problem as now you are comparing NumPy arrays with shapes (7,) and (7,) which will not produce an error.
Assume if I have all but one parameters in my ODE system. And I wish to infer this. Would I have to simply rearrange the equation to isolate the value? How is that done in a system where you have several equations? For example:
import numpy as np
from scipy.integrate import odeint
import matplotlib.pyplot as plt
#three compartments, Susceptible S, infected I, recovered R
#dS/dt, dI/dt, dR/dt
#susceptible sees birth rate coming in, deaths leaving and force of infection leaving
#infected sees FOI coming in, deaths leaving and recovery rates
#recovered sees recovery rate coming in, deaths leaving
#beta is tranmission coefficient, FOI is beta * (I/N) where N is total pop
#initially consider a model not accounting for births and deaths
# Total population, N.
N = 1000
# Initial number of infected and recovered individuals, I0 and R0.
I0, R0 = 10, 0
# Everyone else, S0, is susceptible to infection initially.
S0 = N - I0 - R0
J0 = I0
# Contact rate, beta, and mean recovery rate, gamma, (in 1/days).
#beta =
gamma = 1/7
# A grid of time points (in days)
t = np.linspace(0, 100, 100+1)
# The SIR model differential equations.
def deriv(y, t, N, beta, gamma):
S, I, R, J = y
dS = ((-beta * S * I) / N)
dI = ((beta * S * I) / N) - (gamma * I)
dR = (gamma * I)
dJ = ((beta * S * I) / N)
return dS, dI, dR, dJ
# Initial conditions are S0, I0, R0
# Integrate the SIR equations over the time grid, t.
solve = odeint(deriv, (S0, I0, R0, J0), t, args=(N, beta, gamma))
S, I, R, J = solve.T
As you can see, beta I have left empty, commented out. If I have all the other values, and know that at the peak of the epidemic, 10% of the population is infected, can beta be found from all the information? What I tried was this:
sol= solve_ivp(lambda beta: deriv,
[t], t_eval= t)
print(sol)
However this syntax does not work, I have realised. What is wrong about my approach? How can I estimate a value for beta?
The easiest approach here is to parameterize your code above by beta and plot the result, which is peak infections for you, as a function of beta, and then see where it crosses the treshold. Define the function:
def peak_infections_pct(beta, n_days_total = 100):
# Total population, N.
N = 1000
# Initial number of infected and recovered individuals, I0 and R0.
I0, R0 = 10, 0
# Everyone else, S0, is susceptible to infection initially.
S0 = N - I0 - R0
J0 = I0
# Contact rate, beta, and mean recovery rate, gamma, (in 1/days).
gamma = 1/7
# A grid of time points (in days)
t = np.linspace(0, n_days_total, n_days_total+1)
# The SIR model differential equations.
def deriv(y, t, N, beta, gamma):
S, I, R, J = y
dS = ((-beta * S * I) / N)
dI = ((beta * S * I) / N) - (gamma * I)
dR = (gamma * I)
dJ = ((beta * S * I) / N)
return dS, dI, dR, dJ
# Initial conditions are S0, I0, R0
# Integrate the SIR equations over the time grid, t.
solve = odeint(deriv, (S0, I0, R0, J0), t, args=(N, beta, gamma))
S, I, R, J = solve.T
return np.max(I)/N
calculate and plot:
betas = np.linspace(0,1,101,endpoint = True)
peak_inf = [peak_infections_pct(b) for b in betas]
plt.plot(betas, peak_inf)
plt.plot(betas, 0.1*np.ones(len(betas)))
to get
so the answer is about beta ~ 0.25
To be more precise just solve for beta:
from scipy.optimize import root
root(lambda b: peak_infections_pct(b)-0.1, x0 = 0.5).x
output:
array([0.23847079])
Note I left the time interval as an input to the function -- you may want to use different length as the epidemic may last longer that 100 days
Just to double check let's plot infections as a function of time for our beta=0.2384..:
indeed the peak is at 100 (with is 10%)
Apologies for the (maybe misleading) title and the probably confusing question itself, i struggle a lot with wording my problem and especially compressing it into one sentence for the title. I want to find the roots of a function f(w, t, some_other_args) with two variables, w and t, using python. The real function structure is really long and complicated, you can find it on the end of this post. The important thing is that it contains the following line:
k = 1.5 * m.sqrt((1.0 - w) / (1.0 - 0.25 * w))
This means that w can't exceed 1, because that would lead to calculating the square root of a negative number, which, of course, is impossible. I have algorithms for calculating the approximate values of w and t using other values in my function, but they are very inaccurate.
So, i try to calculate the roots with scipy.optimize.fsolve (after trying literally every root finding algorithm i could find online, i found this one to be the best for my function) using these approximate values as starting points, which would look like this:
solution = optimize.fsolve(f, x0=np.array([t_approx, w_approx]), args=(some_other_args))
For most values, this works perfectly fine. If w is too close to 1, however, there always comes a point when fsolve tries some value bigger than 1 for w, which, in turn, raises a ValueError(because calculating the root of a negative number is mathematically impossible). This is an example printing out the values that fsolveis using, where w should be somewhere around 0.997:
w_approx: 0.9960090844989311
t_approx: 24.26777844720981
Values: t:24.26777844720981, w:0.9960090844989311
Values: t:24.26777844720981, w:0.9960090844989311
Values: t:24.26777844720981, w:0.9960090844989311
Values: t:24.267778808827888, w:0.9960090844989311
Values: t:24.26777844720981, w:0.996009099340623
Values: t:16.319554685876746, w:1.0096680915775516
solution = optimize.fsolve(f, x0=np.array([t_approx, w_approx]), args=(some_other_args))
File "C:\Users\...\venv\lib\site-packages\scipy\optimize\minpack.py", line 148, in fsolve
res = _root_hybr(func, x0, args, jac=fprime, **options)
File "C:\Users\...\venv\lib\site-packages\scipy\optimize\minpack.py", line 227, in _root_hybr
ml, mu, epsfcn, factor, diag)
File "C:\Users\...\algorithm.py", line 9, in f
k = 1.5 * m.sqrt((1.0 - w) / (1.0 - 0.25 * w))
ValueError: math domain error
So, how can i tell optimize.fsolve that w can't get bigger than 1? Or what are alternative algorithms for doing something like this (i know about brentq and so on, but all of those require giving an interval for both roots, which i don't want to do.)?
Code for testing (What's important to note here: even though func theoretically is supposed to calculate R and T given t and w, i have to use it the other way around. It's a bit clunky, but i simply don't manage to rewrite the function so that it accepts T, R to calculate t, w - it's a bit too much for my mediocre mathematical expertise ;)) :
import math as m
from scipy import optimize
import numpy as np
def func(t, w, r_1, r_2, r_3):
k = 1.5 * m.sqrt((1.0 - w) / (1.0 - 0.25 * w))
k23 = 2 * k / 3
z1 = 1 / (1 + k23)
z2 = 1 / (1 - k23)
z3 = 3 * ((1 / 5 + r_1 - r_2 - 1 / 5 * r_1 * r_2) / (z1 - r_2 * z2)) * m.exp(t * (k - 1))
z4 = -(z2 - r_2 * z1) / (z1 - r_2 * z2) * m.exp(2 * k * t)
z5 = -(z1 - r_2 * z2) / (z2 - r_2 * z1)
z6 = 3 * (1 - r_2 / 5) / (z2 - r_2 * z1)
beta_t = r_3 / (z2 / z1 * m.exp(2 * k * t) + z5) * (z6 - 3 / (5 * z1) * m.exp(t * (k - 1)))
alpha_t = beta_t * z5 - r_3 * z6
beta_r = (z3 - r_1 / 5 / z2 * m.exp(-2 * t) * 3 - 3 / z2) / (z1 / z2 + z4)
alpha_r = -z1 / z2 * beta_r - 3 / z2 - 3 / 5 * r_1 / z2 * m.exp(-2 * t)
It_1 = 1 / 4 * w / (1 - 8 / 5 * w) * (alpha_t * z2 * m.exp(-k * t) + beta_t * z1 * m.exp(k * t) + 3 * r_3 * m.exp(-t))
Ir_1 = (1 / 4 * w / (1 - 8 / 5 * w)) * (z1 * alpha_r + z2 * beta_r + 3 / 5 + 3 * r_1 * m.exp(-2 * t))
T = It_1 + m.exp(-t) * r_3
R = Ir_1 + m.exp(-2 * t) * r_1
return [T, R]
def calc_1(t, w, T, R, r_1, r_2, r_3):
t_begin = float(t[0])
T_new, R_new = func(t_begin, w, r_1, r_2, r_3)
a = abs(-1 + T_new/T)
b = abs(-1 + R_new/R)
return np.array([a, b])
def calc_2(x, T, R, r_1, r_2, r_3):
t = x[0]
w = x[1]
T_new, R_new = func(t, w, r_1, r_2, r_3)
a = abs(T - T_new)
b = abs(R - R_new)
return np.array([a, b])
def approximate_w(R):
k = (1 - R) / (R + 2 / 3)
w_approx = (1 - ((2 / 3 * k) ** 2)) / (1 - ((1 / 3 * k) ** 2))
return w_approx
def approximate_t(w, T, R, r_1, r_2, r_3):
t = optimize.root(calc_1, x0=np.array([10, 0]), args=(w, T, R, r_1, r_2, r_3))
return t.x[0]
def solve(T, R, r_1, r_2, r_3):
w_x = approximate_w(R)
t_x = approximate_t(w_x, T, R, r_1, r_2, r_3)
sol = optimize.fsolve(calc_2, x0=np.array([t_x, w_x]), args=(T, R, r_1, r_2, r_3))
return sol
# Values for testing:
T = 0.09986490557943692
R = 0.8918728343037964
r_1 = 0
r_2 = 0
r_3 = 1
print(solve(T, R, r_1, r_2, r_3))
What about logisticing the argument that you want to constrain? I mean, inside f, you could do
import numpy as np
def f(free_w, ...):
w = 1/(1 + np.exp(-free_w)) # w will always lie between 0 and 1
...
return zeros
And then, you would just have to apply the same logistic-transformation to the solution value of free_w to get w*. See
solution = optimize.fsolve(f, x0=np.array([t_approx, w_approx]), args=(some_other_args))
free_w = solution[0]
w = 1/(1 + np.exp(-free_w))
Your reported error occurs as fsolve can not deal with the implicit restrictions in the conversion of w to k. This can be solved radically by inverting that dependence, making func dependent on t and k instead.
def w2k(w): return 3 * m.sqrt((1.0 - w) / (4.0 - w))
#k = 1.5 * m.sqrt((1.0 - w) / (1.0 - 0.25 * w))
# (k/3)**2 * (4-w)= 1-w
def k2w(k): return 4 - 3/(1-(k/3)**2)
def func(t, k, r_1, r_2, r_3):
w = k2w(k)
print "t=%20.15f, k=%20.15f, w=%20.15f"%(t,k,w)
...
Then remove the absolute values from the function values in calc1 and calc2. This only renders your solutions as non-differentiable points which is bad for any root-finding algorithm. Sign changes and smooth roots are good for Newton-like methods.
def calc_2(x, T, R, r_1, r_2, r_3):
t = x[0]
k = x[1]
T_new, R_new = func(t, k, r_1, r_2, r_3)
a = T - T_new
b = R - R_new
return np.array([a, b])
It makes not much sense to find the value for t by solving the equation keeping w resp. k fixed, it just doubles the computational effort.
def approximate_k(R):
k = (1 - R) / (R + 2 / 3)
return k
def solve(T, R, r_1, r_2, r_3):
k_x = approximate_k(R)
t_x = 10
sol = optimize.fsolve(calc_2, x0=np.array([t_x, k_x]), args=(T, R, r_1, r_2, r_3))
return sol
t,k = solve(T, R, r_1, r_2, r_3)
print "t=%20.15f, k=%20.15f, w=%20.15f"%(t, k, k2w(k))
With these modifications the solution
t= 14.860121342410327, k= 0.026653140486605, w= 0.999763184675043
is found within 15 function evaluations.
You should try defining explicitly your function before optimizing it, that way you can check for domain more easily.
Essentially you have a function of T and R. this worked for me:
def func_to_solve(TR_vector, r_1, r_2, r_3):
T, R = TR_vector # what you are trying to find
w_x = approximate_w(R)
t_x = approximate_t(w_x, T, R, r_1, r_2, r_3)
return (calc_2([t_x, w_x], T, R, r_1, r_2, r_3))
def solve(TR, r_1, r_2, r_3):
sol = optimize.fsolve(func_to_solve, x0=TR, args=(r_1, r_2, r_3))
return sol
Also, replace m.exp by np.exp
I am trying to develop an algorithm (use scipy.integrate.odeint()) that predicts the changing concentration of cells, substrate and product (i.e., 𝑋, 𝑆, 𝑃) over time until the system reaches steady- state (~100 or 200 hours). The initial concentration of cells in the bioreactor is 0.1 𝑔/𝐿 and there is no glucose or product in the reactor initially. I want to test the algorithm for a range of different flow rates, 𝑄, between 0.01 𝐿/ℎ and 0.25 𝐿/ℎ and analyze the impact of the flow rate on product production (i.e., 𝑄 ⋅ 𝑃 in 𝑔/ℎ). Eventually, I would like to generate a plot that shows product production rate (y-axis) versus flow rate, 𝑄, on the x-axis. My goal is to estimate the flow rate that results in the maximum (or critical) production rate. This is my code so far:
from scipy.integrate import odeint
import numpy as np
# Constants
u_max = 0.65
K_s = 0.14
K_1 = 0.48
V = 2
X_in = 0
S_in = 4
Y_s = 0.38
Y_p = 0.2
# Variables
# Q - Flow Rate (L/h), value between 0.01 and 0.25 that produces best Q * P
# X - Cell Concentration (g/L)
# S - The glucose concentration (g/L)
# P - Product Concentration (g/L)
# Equations
def func_dX_dt(X, t, S):
u = (u_max) / (1 + (K_s / S))
dX_dt = (((Q * S_in) - (Q * S)) / V) + (u * X)
return dX_dt
def func_dS_dt(S, t, X):
u = (u_max) / (1 + (K_s / S))
dS_dt = (((Q * S_in) - (Q * S)) / V) - (u * (X / Y_s))
return dS_dt
def func_dP_dt(P, t, X, S):
u = (u_max) / (1 + (K_s / S))
dP_dt = ((-Q * P) / V) - (u * (X / Y_p))
return dP_dt
t = np.linspace(0, 200, 200)
# Q placeholder
Q = 0.01
# Attempt to solve the Ordinary differential equations
sol_dX_dt = odeint(func_dX_dt, 0.1, t, args=(S,))
sol_dS_dt = odeint(func_dS_dt, 0.1, t, args=(X,))
sol_dP_dt = odeint(func_dP_dt, 0.1, t, args=(X,S))
In the programs current state there does not seem to be be a way to generate the steady state value for P. I attempted to make this modification to get the value of X.
sol_dX_dt = odeint(func_dX_dt, 0.1, t, args=(odeint(func_dS_dt, 0.1, t, args=(X,)),))
It produces the error:
NameError: name 'X' is not defined
At this point I am not sure how to move forward.
(Edit 1: Added Original Equations)
First Equation
Second Equation and Third Equation
You do not have to apply the functions to each part but return a tuple of the derivatives as I show below:
import numpy as np
from scipy.integrate import odeint
import matplotlib.pyplot as plt
Q = 0.01
V = 2
Ys = 0.38
Sin = 4
Yp = 0.2
Xin = 0
umax = 0.65
Ks = 0.14
K1 = 0.48
def mu(S, umax, Ks, K1):
return umax/((1+Ks/S)*(1+S/K1))
def dxdt(x, t, *args):
X, S, P = x
Q, V, Xin, Ys, Sin, Yp, umax, Ks, K1 = args
m = mu(S, umax, Ks, K1)
dXdt = (Q*Xin - Q*X)/V + m*X
dSdt = (Q*Sin - Q*S)/V - m*X/Ys
dPdt = -Q*P/V - m*X/Yp
return dXdt, dSdt, dPdt
t = np.linspace(0, 200, 200)
X0 = 0.1
S0 = 0.1
P0 = 0.1
x0 = X0, S0, P0
sol = odeint(dxdt, x0, t, args=(Q, V, Xin, Ys, Sin, Yp, umax, Ks, K1))
plt.plot(t, sol[:, 0], 'r', label='X(t)')
plt.plot(t, sol[:, 1], 'g', label='S(t)')
plt.plot(t, sol[:, 2], 'b', label='P(t)')
plt.legend(loc='best')
plt.xlabel('t')
plt.grid()
plt.show()
Output: