In this case, there are 3 ODE's that describe a SIR model. The issue comes in I want to calculate which beta and gamma values are the best to fit onto the datapoints from the x_axis and y_axisvalues. The method I'm currently using is to integrate the ODE's using odeintfrom the scipy library and the curve_fit method also from the same library. In this case, how would you calculate the values for beta and gamma to fit the datapoints?
P.S. the current error is this: ValueError: operands could not be broadcast together with shapes (3,) (14,)
#initial values
S_I_R = (0.762/763, 1/763, 0)
x_axis = [m for m in range(1,15)]
y_axis = [3,8,28,75,221,291,255,235,190,125,70,28,12,5]
# ODE's that describe the system
def equation(SIR_Values,t,beta,gamma):
Array = np.zeros((3))
SIR = SIR_Values
Array[0] = -beta * SIR[0] * SIR[1]
Array[1] = beta * SIR[0] * SIR[1] - gamma * SIR[1]
Array[2] = gamma * SIR[1]
return Array
# Results = spi.odeint(equation,S_I_R,time)
#fitting the values
beta_values,gamma_values = curve_fit(equation, x_axis,y_axis)
# Starting values
S0 = 762/763
I0 = 1/763
R0 = 0
x_axis = np.array([m for m in range(0,15)])
y_axis = np.array([1,3,8,28,75,221,291,255,235,190,125,70,28,12,5])
y_axis = np.divide(y_axis,763)
def sir_model(y, x, beta, gamma):
S = -beta * y[0] * y[1]
R = gamma * y[1]
I = beta * y[0] * y[1] - gamma * y[1]
return S, I, R
def fit_odeint(x, beta, gamma):
return spi.odeint(sir_model, (S0, I0, R0), x, args=(beta, gamma))[:,1]
popt, pcov = curve_fit(fit_odeint, x_axis, y_axis)
beta,gamma = popt
fitted = fit_odeint(x_axis,*popt)
plt.plot(x_axis,y_axis, 'o', label = "infected per day")
plt.plot(x_axis, fitted, label = "fitted graph")
plt.xlabel("Time (in days)")
plt.ylabel("Fraction of infected")
plt.title("Fitted beta and gamma values")
plt.legend()
plt.show()
As this example from scipy documentation, the function must output an array with the same size as x_axis and y_axis.
Related
Writing this algorithm for my final year project. Used gradient descent to find the best fit line. I tried solving it with excel too using Multi-regression. The values are different.
The csv file is attached here https://drive.google.com/file/d/1-UaU34w3c5-VunYrVz9fD7vRb0c-XDqk/view?usp=sharing. The first 3 columns are independent variables (x1,x2,x3) and the last is dependent (y).
Its a different question, If you could explain why the answer is different from excel values?
import numpy as np
import random
import pandas as pd
def gradientDescent(x, y, theta, alpha, m, numIterations):
xTrans = x.transpose()
for i in range(0, numIterations):
hypothesis = np.dot(x, theta)
loss = hypothesis - y
cost = np.sum(loss ** 2) / (2 * m)
print("Iteration %d | Cost: %f" % (i, cost))
gradient = np.dot(xTrans, loss) / m
theta = theta - alpha * gradient
return theta
df = pd.read_csv(r'C:\Users\WELCOME\Desktop\FinalYearPaper\ConferencePaper\NewTrain.csv', 'rU', delimiter=",",header=None)
df.columns = ['x0','Speed','Feed','DOC','Roughness']
print(df)
y = np.array(df['Roughness'])
#x = np.array(d)
x = np.array(df.drop(['Roughness'],1))
#x[:,2:3] = 1.0
print (x)
print(y)
m, n = np.shape(x)
print(m,n)
numIterations= 50000
alpha = 0.000001
theta = np.ones(n)
theta = gradientDescent(x, y, theta, alpha, m, numIterations)
print(theta)
For the past few days, I have been trying to code this application of Gradient Descent for my final-year project in Mechanical Engineering. https://drive.google.com/open?id=1tIGqZ2Lb0sN4GEpgYEZLFvtmhigXnot0 The HTML file is attached above. Just download the file, and if you see the results. There are only 3 values in theta, whereas x has 3 independent variables. So it should have 4 values in theta.
The code is as follows. For the result, it is theta [-0.03312393 0.94409351 0.99853041]
import numpy as np
import random
import pandas as pd
def gradientDescent(x, y, theta, alpha, m, numIterations):
xTrans = x.transpose()
for i in range(0, numIterations):
hypothesis = np.dot(x, theta)
loss = hypothesis - y
# avg cost per example (the 2 in 2*m doesn't really matter here.
# But to be consistent with the gradient, I include it)
cost = np.sum(loss ** 2) / (2 * m)
print("Iteration %d | Cost: %f" % (i, cost))
# avg gradient per example
gradient = np.dot(xTrans, loss) / m
# update
theta = theta - alpha * gradient
return theta
df = pd.read_csv(r'C:\Users\WELCOME\Desktop\FinalYearPaper\ConferencePaper\NewTrain.csv', 'rU', delimiter=",",header=None)
x = df.loc[:,'0':'2']
y = df[3]
print (x)
m, n = np.shape(x)
numIterations= 200
alpha = 0.000001
theta = np.ones(n)
theta = gradientDescent(x, y, theta, alpha, m, numIterations)
print(theta)
I am trying to mimic the gradient descent algorithm for linear regression from Andrew NG's Machine learning course to Python, but for some reason my implementation is not working correctly.
Here's my implementation in Octave, it works correctly:
function [theta, J_history] = gradientDescent(X, y, theta, alpha, num_iters)
J_history = zeros(num_iters, 1);
for iter = 1:num_iters
prediction = X*theta;
margin_error = prediction - y;
gradient = 1/m * (alpha * (X' * margin_error));
theta = theta - gradient;
J_history(iter) = computeCost(X, y, theta);
end
end
However, when I translate this to Python for some reason it is not giving me accurate results. The cost seems to be going up rather than descending.
Here's my implementation in Python:
def gradientDescent(x, y, theta, alpha, iters):
m = len(y)
J_history = np.matrix(np.zeros((iters,1)))
for i in range(iters):
prediction = x*theta.T
margin_error = prediction - y
gradient = 1/m * (alpha * (x.T * margin_error))
theta = theta - gradient
J_history[i] = computeCost(x,y,theta)
return theta,J_history
My code is compiling and there isn't anything wrong. Please note this is theta:
theta = np.matrix(np.array([0,0]))
Alpha and iters is set to this:
alpha = 0.01
iters = 1000
When I run it, opt_theta, cost = gradientDescent(x, y, theta, alpha, iters) and print out opt_theta, I get this:
matrix([[ 2.36890383e+16, -1.40798902e+16],
[ 2.47503758e+17, -2.36890383e+16]])
when I should get this:
matrix([[-3.24140214, 1.1272942 ]])
What am I doing wrong?
Edit:
Cost function
def computeCost(x, y, theta):
# Get length of data set
m = len(y)
# We get theta transpose because we are working with a numpy array [0,0] for example
prediction = x * theta.T
J = 1/(2*m) * np.sum(np.power((prediction - y), 2))
return J
Look there:
>>> A = np.matrix([3,3,3])
>>> B = np.matrix([[1,1,1], [2,2,2]])
>>> A-B
matrix([[2, 2, 2],
[1, 1, 1]])
Matrices are broadcasted together.
"it's because np.matrix inherits from np.array. np.matrix overrides multiplication, but not addition and subtraction"
In yours situation theta(1x2) subtract gradient(2x1) and in result you have got 2x2. Try to transpose gradient before subtracting.
theta = theta - gradient.T
I am trying to develop an algorithm (use scipy.integrate.odeint()) that predicts the changing concentration of cells, substrate and product (i.e., 𝑋, 𝑆, 𝑃) over time until the system reaches steady- state (~100 or 200 hours). The initial concentration of cells in the bioreactor is 0.1 𝑔/𝐿 and there is no glucose or product in the reactor initially. I want to test the algorithm for a range of different flow rates, 𝑄, between 0.01 𝐿/ℎ and 0.25 𝐿/ℎ and analyze the impact of the flow rate on product production (i.e., 𝑄 ⋅ 𝑃 in 𝑔/ℎ). Eventually, I would like to generate a plot that shows product production rate (y-axis) versus flow rate, 𝑄, on the x-axis. My goal is to estimate the flow rate that results in the maximum (or critical) production rate. This is my code so far:
from scipy.integrate import odeint
import numpy as np
# Constants
u_max = 0.65
K_s = 0.14
K_1 = 0.48
V = 2
X_in = 0
S_in = 4
Y_s = 0.38
Y_p = 0.2
# Variables
# Q - Flow Rate (L/h), value between 0.01 and 0.25 that produces best Q * P
# X - Cell Concentration (g/L)
# S - The glucose concentration (g/L)
# P - Product Concentration (g/L)
# Equations
def func_dX_dt(X, t, S):
u = (u_max) / (1 + (K_s / S))
dX_dt = (((Q * S_in) - (Q * S)) / V) + (u * X)
return dX_dt
def func_dS_dt(S, t, X):
u = (u_max) / (1 + (K_s / S))
dS_dt = (((Q * S_in) - (Q * S)) / V) - (u * (X / Y_s))
return dS_dt
def func_dP_dt(P, t, X, S):
u = (u_max) / (1 + (K_s / S))
dP_dt = ((-Q * P) / V) - (u * (X / Y_p))
return dP_dt
t = np.linspace(0, 200, 200)
# Q placeholder
Q = 0.01
# Attempt to solve the Ordinary differential equations
sol_dX_dt = odeint(func_dX_dt, 0.1, t, args=(S,))
sol_dS_dt = odeint(func_dS_dt, 0.1, t, args=(X,))
sol_dP_dt = odeint(func_dP_dt, 0.1, t, args=(X,S))
In the programs current state there does not seem to be be a way to generate the steady state value for P. I attempted to make this modification to get the value of X.
sol_dX_dt = odeint(func_dX_dt, 0.1, t, args=(odeint(func_dS_dt, 0.1, t, args=(X,)),))
It produces the error:
NameError: name 'X' is not defined
At this point I am not sure how to move forward.
(Edit 1: Added Original Equations)
First Equation
Second Equation and Third Equation
You do not have to apply the functions to each part but return a tuple of the derivatives as I show below:
import numpy as np
from scipy.integrate import odeint
import matplotlib.pyplot as plt
Q = 0.01
V = 2
Ys = 0.38
Sin = 4
Yp = 0.2
Xin = 0
umax = 0.65
Ks = 0.14
K1 = 0.48
def mu(S, umax, Ks, K1):
return umax/((1+Ks/S)*(1+S/K1))
def dxdt(x, t, *args):
X, S, P = x
Q, V, Xin, Ys, Sin, Yp, umax, Ks, K1 = args
m = mu(S, umax, Ks, K1)
dXdt = (Q*Xin - Q*X)/V + m*X
dSdt = (Q*Sin - Q*S)/V - m*X/Ys
dPdt = -Q*P/V - m*X/Yp
return dXdt, dSdt, dPdt
t = np.linspace(0, 200, 200)
X0 = 0.1
S0 = 0.1
P0 = 0.1
x0 = X0, S0, P0
sol = odeint(dxdt, x0, t, args=(Q, V, Xin, Ys, Sin, Yp, umax, Ks, K1))
plt.plot(t, sol[:, 0], 'r', label='X(t)')
plt.plot(t, sol[:, 1], 'g', label='S(t)')
plt.plot(t, sol[:, 2], 'b', label='P(t)')
plt.legend(loc='best')
plt.xlabel('t')
plt.grid()
plt.show()
Output:
I am trying to make a gaussian fit over many data points. E.g. I have a 256 x 262144 array of data. Where the 256 points need to be fitted to a gaussian distribution, and I need 262144 of them.
Sometimes the peak of the gaussian distribution is outside the data-range, so to get an accurate mean result curve-fitting is the best approach. Even if the peak is inside the range, curve-fitting gives a better sigma because other data is not in the range.
I have this working for one data point, using code from http://www.scipy.org/Cookbook/FittingData .
I have tried to just repeat this algorithm, but it looks like it is going to take something in the order of 43 minutes to solve this. Is there an already-written fast way of doing this in parallel or more efficiently?
from scipy import optimize
from numpy import *
import numpy
# Fitting code taken from: http://www.scipy.org/Cookbook/FittingData
class Parameter:
def __init__(self, value):
self.value = value
def set(self, value):
self.value = value
def __call__(self):
return self.value
def fit(function, parameters, y, x = None):
def f(params):
i = 0
for p in parameters:
p.set(params[i])
i += 1
return y - function(x)
if x is None: x = arange(y.shape[0])
p = [param() for param in parameters]
optimize.leastsq(f, p)
def nd_fit(function, parameters, y, x = None, axis=0):
"""
Tries to an n-dimensional array to the data as though each point is a new dataset valid across the appropriate axis.
"""
y = y.swapaxes(0, axis)
shape = y.shape
axis_of_interest_len = shape[0]
prod = numpy.array(shape[1:]).prod()
y = y.reshape(axis_of_interest_len, prod)
params = numpy.zeros([len(parameters), prod])
for i in range(prod):
print "at %d of %d"%(i, prod)
fit(function, parameters, y[:,i], x)
for p in range(len(parameters)):
params[p, i] = parameters[p]()
shape[0] = len(parameters)
params = params.reshape(shape)
return params
Note that the data isn't necessarily 256x262144 and i've done some fudging around in nd_fit to make this work.
The code I use to get this to work is
from curve_fitting import *
import numpy
frames = numpy.load("data.npy")
y = frames[:,0,0,20,40]
x = range(0, 512, 2)
mu = Parameter(x[argmax(y)])
height = Parameter(max(y))
sigma = Parameter(50)
def f(x): return height() * exp (-((x - mu()) / sigma()) ** 2)
ls_data = nd_fit(f, [mu, sigma, height], frames, x, 0)
Note: The solution posted below by #JoeKington is great and solves really fast. However it doesn't appear to work unless the significant area of the gaussian is inside the appropriate area. I will have to test if the mean is still accurate though, as that is the main thing I use this for.
The easiest thing to do is to linearlize the problem. You're using a non-linear, iterative method which will be slower than a linear least squares solution.
Basically, you have:
y = height * exp(-(x - mu)^2 / (2 * sigma^2))
To make this a linear equation, take the (natural) log of both sides:
ln(y) = ln(height) - (x - mu)^2 / (2 * sigma^2)
This then simplifies to the polynomial:
ln(y) = -x^2 / (2 * sigma^2) + x * mu / sigma^2 - mu^2 / sigma^2 + ln(height)
We can recast this in a bit simpler form:
ln(y) = A * x^2 + B * x + C
where:
A = 1 / (2 * sigma^2)
B = mu / (2 * sigma^2)
C = mu^2 / sigma^2 + ln(height)
However, there's one catch. This will become unstable in the presence of noise in the "tails" of the distribution.
Therefore, we need to use only the data near the "peaks" of the distribution. It's easy enough to only include data that falls above some threshold in the fitting. In this example, I'm only including data that's greater than 20% of the maximum observed value for a given gaussian curve that we're fitting.
Once we've done this, though, it's rather fast. Solving for 262144 different gaussian curves takes only ~1 minute (Be sure to removing the plotting portion of the code if you run it on something that large...). It's also quite easy to parallelize, if you want...
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import itertools
def main():
x, data = generate_data(256, 6)
model = [invert(x, y) for y in data.T]
sigma, mu, height = [np.array(item) for item in zip(*model)]
prediction = gaussian(x, sigma, mu, height)
plot(x, data, linestyle='none', marker='o')
plot(x, prediction, linestyle='-')
plt.show()
def invert(x, y):
# Use only data within the "peak" (20% of the max value...)
key_points = y > (0.2 * y.max())
x = x[key_points]
y = y[key_points]
# Fit a 2nd order polynomial to the log of the observed values
A, B, C = np.polyfit(x, np.log(y), 2)
# Solve for the desired parameters...
sigma = np.sqrt(-1 / (2.0 * A))
mu = B * sigma**2
height = np.exp(C + 0.5 * mu**2 / sigma**2)
return sigma, mu, height
def generate_data(numpoints, numcurves):
np.random.seed(3)
x = np.linspace(0, 500, numpoints)
height = 100 * np.random.random(numcurves)
mu = 200 * np.random.random(numcurves) + 200
sigma = 100 * np.random.random(numcurves) + 0.1
data = gaussian(x, sigma, mu, height)
noise = 5 * (np.random.random(data.shape) - 0.5)
return x, data + noise
def gaussian(x, sigma, mu, height):
data = -np.subtract.outer(x, mu)**2 / (2 * sigma**2)
return height * np.exp(data)
def plot(x, ydata, ax=None, **kwargs):
if ax is None:
ax = plt.gca()
colorcycle = itertools.cycle(mpl.rcParams['axes.color_cycle'])
for y, color in zip(ydata.T, colorcycle):
ax.plot(x, y, color=color, **kwargs)
main()
The only thing we'd need to change for a parallel version is the main function. (We also need a dummy function because multiprocessing.Pool.imap can't supply additional arguments to its function...) It would look something like this:
def parallel_main():
import multiprocessing
p = multiprocessing.Pool()
x, data = generate_data(256, 262144)
args = itertools.izip(itertools.repeat(x), data.T)
model = p.imap(parallel_func, args, chunksize=500)
sigma, mu, height = [np.array(item) for item in zip(*model)]
prediction = gaussian(x, sigma, mu, height)
def parallel_func(args):
return invert(*args)
Edit: In cases where the simple polynomial fitting isn't working well, try weighting the problem by the y-values, as mentioned in the link/paper that #tslisten shared (and Stefan van der Walt implemented, though my implementation is a bit different).
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import itertools
def main():
def run(x, data, func, threshold=0):
model = [func(x, y, threshold=threshold) for y in data.T]
sigma, mu, height = [np.array(item) for item in zip(*model)]
prediction = gaussian(x, sigma, mu, height)
plt.figure()
plot(x, data, linestyle='none', marker='o', markersize=4)
plot(x, prediction, linestyle='-', lw=2)
x, data = generate_data(256, 6, noise=100)
threshold = 50
run(x, data, weighted_invert, threshold=threshold)
plt.title('Weighted by Y-Value')
run(x, data, invert, threshold=threshold)
plt.title('Un-weighted Linear Inverse'
plt.show()
def invert(x, y, threshold=0):
mask = y > threshold
x, y = x[mask], y[mask]
# Fit a 2nd order polynomial to the log of the observed values
A, B, C = np.polyfit(x, np.log(y), 2)
# Solve for the desired parameters...
sigma, mu, height = poly_to_gauss(A,B,C)
return sigma, mu, height
def poly_to_gauss(A,B,C):
sigma = np.sqrt(-1 / (2.0 * A))
mu = B * sigma**2
height = np.exp(C + 0.5 * mu**2 / sigma**2)
return sigma, mu, height
def weighted_invert(x, y, weights=None, threshold=0):
mask = y > threshold
x,y = x[mask], y[mask]
if weights is None:
weights = y
else:
weights = weights[mask]
d = np.log(y)
G = np.ones((x.size, 3), dtype=np.float)
G[:,0] = x**2
G[:,1] = x
model,_,_,_ = np.linalg.lstsq((G.T*weights**2).T, d*weights**2)
return poly_to_gauss(*model)
def generate_data(numpoints, numcurves, noise=None):
np.random.seed(3)
x = np.linspace(0, 500, numpoints)
height = 7000 * np.random.random(numcurves)
mu = 1100 * np.random.random(numcurves)
sigma = 100 * np.random.random(numcurves) + 0.1
data = gaussian(x, sigma, mu, height)
if noise is None:
noise = 0.1 * height.max()
noise = noise * (np.random.random(data.shape) - 0.5)
return x, data + noise
def gaussian(x, sigma, mu, height):
data = -np.subtract.outer(x, mu)**2 / (2 * sigma**2)
return height * np.exp(data)
def plot(x, ydata, ax=None, **kwargs):
if ax is None:
ax = plt.gca()
colorcycle = itertools.cycle(mpl.rcParams['axes.color_cycle'])
for y, color in zip(ydata.T, colorcycle):
#kwargs['color'] = kwargs.get('color', color)
ax.plot(x, y, color=color, **kwargs)
main()
If that's still giving you trouble, then try iteratively-reweighting the least-squares problem (The final "best" reccomended method in the link #tslisten mentioned). Keep in mind that this will be considerably slower, however.
def iterative_weighted_invert(x, y, threshold=None, numiter=5):
last_y = y
for _ in range(numiter):
model = weighted_invert(x, y, weights=last_y, threshold=threshold)
last_y = gaussian(x, *model)
return model