Fit vector autoregression with statsmodels - python

Consider the following signals and responses:
import numpy as np
def s1(t, delay):
"""An example of a noisy signal (heaviside function)"""
return (t > delay).astype(int) + np.random.normal(scale=0.01, size=t.shape)
def s2(t, delay):
"""An example of another noisy signal (delayed sin)"""
return np.sin(2*np.pi*(t - delay)/36) * (t > delay).astype(int) + np.random.normal(scale=0.01, size=t.shape)
def response(signal, delay):
"""
An example of a noisy delayed response (delayed identity function)
"""
delayed_signal = np.append(np.zeros(shape=delay), signal[:-delay])
return delayed_signal + np.random.normal(scale=0.01, size=signal.shape)
t = np.arange(0, 256, 1)
x1 = s1(t, delay=12)
x2 = s2(t, delay=36)
# the response is the sum of the two signals with different delays
y = response(x1, delay=24) + response(x2, delay=48)
plt.figure(1, figsize=(10, 10))
plt.subplot(511)
plt.ylabel('signal 1')
plt.plot(t, x1, '.')
plt.subplot(512)
plt.ylabel('signal 2')
plt.plot(t, x2, '.')
plt.subplot(513)
plt.ylabel('response')
plt.plot(t, y, '.')
I want to recover, using vector autoregression (VAR) and Python, the fact that the signal y has a delayed response to x1 of 24, and a delayed response to x2 of 48. I would prefer to have Lasso to reduce the number of relevant parameters, and that I do not model x1 nor x2 as a function of themselves and y, just y as a function of x1 and x2.
How can this be done in Python?

I was able to achieve this using sklearn as follows (continue the code above):
def build_matrix(x):
"""
Converts a signal into a matrix of delayed signals.
I.e. `row_j` is each time `t`, `column_i` is the signal at `t - i`,
It assumes that no past signal => no signal: each row is left-padded with zeros.
For example, for 3 times, the matrix would be:
```
[0, 0 , 0 ] (-> y[0])
[0, 0 , x[0]] (-> y[1])
[0, x[0], x[1]] (-> y[2])
```
I.e.
The parameter fitted to column 2, a2, is the influence of `x[t - 1]` on `y[t]`.
The parameter fitted to column 1, a1, is the influence of `x[t - 2]` on `y[t]`.
It assumes that we only measure x[t] when we measure y[t], the reason why that column does not appear
"""
data_x = []
for i in range(len(x)):
data_x.append(np.append(np.zeros(len(x) - i), x[:i]))
return np.array(data_x)
class VARClassifier:
"""
A Classifier based on any sklearn linear classifier that contain a method to return the fitted response function
"""
def __init__(self, classifier):
self.classifier = classifier
self.number_of_signals_ = None
self.time_len_ = None
def _transform_x(self, x):
for x_i in x:
assert len(x_i) == self.time_len_
assert len(x_i.shape) == 1, 'Each of the elements must be a time-series (1D)'
return np.concatenate(tuple(build_matrix(x_i) for x_i in x), axis=1)
def fit(self, x, y):
self.number_of_signals_ = len(x)
self.time_len_ = len(x[0])
return self.classifier.fit(self._transform_x(x), y)
def predict(self, x):
return self.classifier.predict(self._transform_x(x))
#property
def response_functions(self):
# ::-1 because the coefficients are reversed, see `build_matrix `
return [self.classifier.coef_[i*self.time_len_:(i+1)*self.time_len_][::-1]
for i in range(self.number_of_signals_)]
def __getattr__(self, item):
return self.classifier.__getattr__(item)
classifier = VARClassifier(Lasso(alpha=0.1))
classifier.fit((x1, x2), y)
plt.subplot(514)
plt.xlabel('delay')
plt.ylabel('fitted response\nfunction 1')
r1, r2 = classifier.response_functions
plt.plot(t, r1, '.')
plt.subplot(515)
plt.xlabel('delay')
plt.ylabel('fitted response\nfunction 2')
plt.plot(t, r2, '.')
plt.savefig('fit_var.png')
However, this does not have all the goodies from statsmodels (e.g. confidence intervals, p-values, model metrics, etc.)

Related

How do I use scipy curve_fit with a custom objective function?

I wish to do a curve fit to some tabulated data using my own objective function, not the in-built normal least squares.
I can make the normal curve_fit work, but I can't understand how to properly formulate my objective function to feed it into the method.
I am interested in knowing the values of my fitted curve at each tabulated x value.
x = np.array([-5.0,-4.5,-4.0,-3.5,-3.0,-2.5,-2.0,-1.5,-1.0,-0.5,0.0,0.5,1.0,1.5,2.0,2.5,3.0,3.5,4.0,4.5,5.0,5.5,6.0,6.5,7.0,7.5,8.0,8.5,9.0,9.5,10.0])
y = np.array([300,300,1000,350,340,1230,500,360,360,920,365,365,350,1000,375,1050,380,385,385,390,400,395,780,410,420,420,415,435,440,435,455])
e = np.array([math.sqrt(i) for i in y]) #uncertainty in y values
def test_func(x, a0, a1):
""" This is the function I want to fit to my data """
return a0 + a1*x
def norm_residual(test_func, x, y, e, params):
""" This calculates the normalised residuals, given the tabulated data and function parameters"""
yhat = test_func(x,*params)
z = (y-yhat)/e
return z
def f(z):
""" This modifies the normalised residual value, depending on it's sign."""
if z <= 0:
return z**2
else:
return 6*np.log(2*z/(np.sqrt(math.pi) * sp.special.erf(z/np.sqrt(2))))-3*np.log(2)
def objective(test_func, x, y, e, params):
"""This returns the sum of the modified normalised residuals. Smaller is better"""
z = norm_residual(test_func, x, y, e, params)
return np.sum(np.array([f(i) for i in z]))
#normal scipy curve fit
params, params_covariance = sp.optimize.curve_fit(test_func, x, y, p0=[0,0])
plt.scatter(x, y, label='Data')
plt.plot(x, test_func(x, params[0], params[1]), label='Fitted function', color="orange")
plt.legend(loc='best')
plt.show()
#how do I use my objective function to do my curve fit?
This is what I came up with, for my slightly more realistic requirements.
Lesson: vectorise everything! Don't just wrap it in a np.vectorize function call. I got a speed-up of ~100x by doing so.
Some guidance taken from https://hernandis.me/2020/04/05/three-examples-of-nonlinear-least-squares-fitting-in-python-with-scipy.html
import numpy as np
import scipy as sp
from scipy import optimize
import matplotlib.pyplot as plt
import math
x_data = np.array([-5.0,-4.5,-4.0,-3.5,-3.0,-2.5,-2.0,-1.5,-1.0,-0.5,0.0,0.5,1.0,1.5,2.0,2.5,3.0,3.5,4.0,4.5,5.0,5.5,6.0,6.5,7.0,7.5,8.0,8.5,9.0,9.5,10.0])
y_data = np.array([300,300,1000,350,340,1230,500,360,360,920,365,365,350,1000,375,1050,380,385,385,390,400,395,780,410,420,420,415,435,440,435,455])
e_data = np.array([math.sqrt(i) for i in y_data]) #uncertainty in y values
# https://hernandis.me/2020/04/05/three-examples-of-nonlinear-least-squares-fitting-in-python-with-scipy.html
def model(params, x):
"""Calculates the model, given params and x. Is vectorised; can be used with numpy.arrays"""
a0, a1 = params
return a0 + a1 * x
def v_f(z):
"""Modifies the residual. Used when you need to calc your own chi2 value. Is vectorised; can be used with numpy.arrays"""
return np.where(z <= 0, np.square(z), 6*np.log(z/sp.special.erf(z*0.7071067811865475)) - 1.3547481158683645)
def v_f_2(z):
"""Modifies the residual. Used when chi2 is calc'd for you. Is vectorised; can be used with numpy.arrays"""
return np.where(z <= 0, z, np.sqrt(6*np.log(z/sp.special.erf(z*0.7071067811865475)) - 1.3547481158683645))
def objective(params, model_func, data, v_modify_residuals_func = None):
""" Calculates the residuals given a model and data. Is vectorised; can be used with numpy.arrays """
if len(data) == 3:
xd, yd, ed = data
elif len(data) == 2:
xd, yd = data
ed = np.ones(len(xd))
r = (yd - model_func(params, xd)) / ed # r is an array of residuals
if v_modify_residuals_func is not None:
r = v_modify_residuals_func(r)
return r
def objective_sum(params, model_func, data, modify_residuals_func = None):
""" Calculates the sum of the residuals given a model and data. Used when you need to calc your own chi2 value. Is vectorised; can be used with numpy.arrays """
r = objective(params, model_func, data, modify_residuals_func)
return np.sum(r)
def v_cheb(n, x):
""" Calculate a chebyshev polynomial. -1.0 <= x <= 1.0, n >= 0, int. Is vectorised; can be used with numpy.arrays """
return np.cos(n * np.arccos(x))
def bkg_model(params, x):
""" Calculate a bkg curve from a number of chebyshev polynomials. Polynomial degree given by len(params). Is vectorised; can be used with numpy.arrays """
r = 0
for i, p in enumerate(params):
r += p * v_cheb(i, x)
return r
def v_normaliseList(nparray):
""" Given a monotonically increasing x-ordinate array, normalise values in the range -1 <= x <= 1. Is vectorised; can be used with numpy.arrays """
min_ = nparray[0]
max_ = nparray[-1]
r = (2*(nparray - min_)/(max_ - min_)) - 1
return r
initial_params = [0,0]
""" least_squares takes an array of residuals, r, and minimises Sum(r**2) """
results1 = sp.optimize.least_squares(objective,
initial_params,
method = 'lm',
args = [bkg_model,
[v_normaliseList(x_data), y_data, e_data],
v_f_2])
""" minimize takes a scalar, r, and minimises r """
results2 = sp.optimize.minimize(objective_sum,
initial_params,
#method = 'SLSQP',
args = (bkg_model,
(v_normaliseList(x_data), y_data, e_data),
v_f))
print(results1.x)
print(results2.x)

How to calculate "relative error in the sum of squares" and "relative error in the approximate solution" from least squares method?

I have implemented a 3D gaussian fit using scipy.optimize.leastsq and now I would like to tweak the arguments ftol and xtol to optimize the performances. However, I don't understand the "units" of these two parameters in order to make a proper choice. Is it possible to calculate these two parameters from the results? That would give me an understanding of how to choose them. My data is numpy arrays of np.uint8. I tried to read the FORTRAN source code of MINIPACK but my FORTRAN knowledge is zero. I also read checked the Levenberg-Marquardt algorithm, but I could not really get a number that was below the ftol for example.
Here is a minimal example of what I do:
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import leastsq
class gaussian_model:
def __init__(self):
self.prev_iter_model = None
self.f_vals = []
def gaussian_1D(self, coeffs, xx):
A, sigma, mu = coeffs
# Center rotation around peak center
x0 = xx - mu
model = A*np.exp(-(x0**2)/(2*(sigma**2)))
return model
def residuals(self, coeffs, I_obs, xx, model_func):
model = model_func(coeffs, xx)
residuals = I_obs - model
if self.prev_iter_model is not None:
self.f = np.sum(((model-self.prev_iter_model)/model)**2)
self.f_vals.append(self.f)
self.prev_iter_model = model
return residuals
# x data
x_start = 1
x_stop = 10
num = 100
xx, dx = np.linspace(x_start, x_stop, num, retstep=True)
# Simulated data with some noise
A, s_x, mu = 10, 0.5, 3
coeffs = [A, s_x, mu]
model = gaussian_model()
yy = model.gaussian_1D(coeffs, xx)
noise_ampl = 0.5
noise = np.random.normal(0, noise_ampl, size=num)
yy += noise
# LM Least squares
initial_guess = [1, 1, 1]
pred_coeffs, cov_x, info, mesg, ier = leastsq(model.residuals, initial_guess,
args=(yy, xx, model.gaussian_1D),
ftol=1E-6, full_output=True)
yy_fit = model.gaussian_1D(pred_coeffs, xx)
rel_SSD = np.sum(((yy-yy_fit)/yy)**2)
RMS_SSD = np.sqrt(rel_SSD/num)
print(RMS_SSD)
print(model.f)
print(model.f_vals)
fig, ax = plt.subplots(1,2)
# Plot results
ax[0].scatter(xx, yy)
ax[0].plot(xx, yy_fit, c='r')
ax[1].scatter(range(len(model.f_vals)), model.f_vals, c='r')
# ax[1].set_ylim(0, 1E-6)
plt.show()
rel_SSD is around 1 and definitely not something below ftol = 1E-6.
EDIT: Based on #user12750353 answer below I updated my minimal example to try to recreate how lmdif determines termination with ftol. The problem is that my f_vals are too small, so they are not the right values. The reason I would like to recreate this is that I would like to see what kind of numbers I am getting on my main code to decide on a ftol that would terminate the fitting process earlier.
Since you are giving a function without the gradient, the method called is lmdif. Instead of gradients it will use forward difference gradient estimate, f(x + delta) - f(x) ~ delta * df(x)/dx (I will write as if the parameter).
There you find the following description
c ftol is a nonnegative input variable. termination
c occurs when both the actual and predicted relative
c reductions in the sum of squares are at most ftol.
c therefore, ftol measures the relative error desired
c in the sum of squares.
c
c xtol is a nonnegative input variable. termination
c occurs when the relative error between two consecutive
c iterates is at most xtol. therefore, xtol measures the
c relative error desired in the approximate solution.
Looking in the code the actual reduction acred = 1 - (fnorm1/fnorm)**2 is what you calculated for rel_SSD, but between the two last iterations, not between the fitted function and the target points.
Example
The problem here is that we need to discover what are the values assumed by the internal variables. An attempt to do so is to save the coefficients and the residual norm every time the function is called as follows.
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import leastsq
class gaussian_model:
def __init__(self):
self.prev_iter_model = None
self.fnorm = []
self.x = []
def gaussian_1D(self, coeffs, xx):
A, sigma, mu = coeffs
# Center rotation around peak center
x0 = xx - mu
model = A*np.exp(-(x0**2)/(2*(sigma**2)))
grad = np.array([
model / A,
model * x0**2 / (sigma**3),
model * 2 * x0 / (2*(sigma**2))
]).transpose();
return model, grad
def residuals(self, coeffs, I_obs, xx, model_func):
model, grad = model_func(coeffs, xx)
residuals = I_obs - model
self.x.append(np.copy(coeffs));
self.fnorm.append(np.sqrt(np.sum(residuals**2)))
return residuals
def grad(self, coeffs, I_obs, xx, model_func):
model, grad = model_func(coeffs, xx)
residuals = I_obs - model
return -grad
def plot_progress(self):
x = np.array(self.x)
dx = np.sqrt(np.sum(np.diff(x, axis=0)**2, axis=1))
plt.plot(dx / np.sqrt(np.sum(x[1:, :]**2, axis=1)))
fnorm = np.array(self.fnorm)
plt.plot(1 - (fnorm[1:]/fnorm[:-1])**2)
plt.legend(['$||\Delta f||$', '$||\Delta x||$'], loc='upper left');
# x data
x_start = 1
x_stop = 10
num = 100
xx, dx = np.linspace(x_start, x_stop, num, retstep=True)
# Simulated data with some noise
A, s_x, mu = 10, 0.5, 3
coeffs = [A, s_x, mu]
model = gaussian_model()
yy, _ = model.gaussian_1D(coeffs, xx)
noise_ampl = 0.5
noise = np.random.normal(0, noise_ampl, size=num)
yy += noise
Then we can see the relative variation of $x$ and $f$
initial_guess = [1, 1, 1]
pred_coeffs, cov_x, info, mesg, ier = leastsq(model.residuals, initial_guess,
args=(yy, xx, model.gaussian_1D),
xtol=1e-6,
ftol=1e-6, full_output=True)
plt.figure(figsize=(14, 6))
plt.subplot(121)
model.plot_progress()
plt.yscale('log')
plt.grid()
plt.subplot(122)
yy_fit,_ = model.gaussian_1D(pred_coeffs, xx)
# Plot results
plt.scatter(xx, yy)
plt.plot(xx, yy_fit, c='r')
plt.show()
The problem with this is that the function is evaluated both to compute f and to compute the gradient of f. To produce a cleaner plot what can be done is to implement pass Dfun so that it evaluate func only once per iteration.
# x data
x_start = 1
x_stop = 10
num = 100
xx, dx = np.linspace(x_start, x_stop, num, retstep=True)
# Simulated data with some noise
A, s_x, mu = 10, 0.5, 3
coeffs = [A, s_x, mu]
model = gaussian_model()
yy, _ = model.gaussian_1D(coeffs, xx)
noise_ampl = 0.5
noise = np.random.normal(0, noise_ampl, size=num)
yy += noise
# LM Least squares
initial_guess = [1, 1, 1]
pred_coeffs, cov_x, info, mesg, ier = leastsq(model.residuals, initial_guess,
args=(yy, xx, model.gaussian_1D),
Dfun=model.grad,
xtol=1e-6,
ftol=1e-6, full_output=True)
plt.figure(figsize=(14, 6))
plt.subplot(121)
model.plot_progress()
plt.yscale('log')
plt.grid()
plt.subplot(122)
yy_fit,_ = model.gaussian_1D(pred_coeffs, xx)
# Plot results
plt.scatter(xx, yy)
plt.plot(xx, yy_fit, c='r')
plt.show()
Well, the value I am obtaining for xtol is not exactly what is in the lmdif implementation.

Python curve_fit with multiple independent variables

Python's curve_fit calculates the best-fit parameters for a function with a single independent variable, but is there a way, using curve_fit or something else, to fit for a function with multiple independent variables? For example:
def func(x, y, a, b, c):
return log(a) + b*log(x) + c*log(y)
where x and y are the independent variable and we would like to fit for a, b, and c.
You can pass curve_fit a multi-dimensional array for the independent variables, but then your func must accept the same thing. For example, calling this array X and unpacking it to x, y for clarity:
import numpy as np
from scipy.optimize import curve_fit
def func(X, a, b, c):
x,y = X
return np.log(a) + b*np.log(x) + c*np.log(y)
# some artificially noisy data to fit
x = np.linspace(0.1,1.1,101)
y = np.linspace(1.,2., 101)
a, b, c = 10., 4., 6.
z = func((x,y), a, b, c) * 1 + np.random.random(101) / 100
# initial guesses for a,b,c:
p0 = 8., 2., 7.
print(curve_fit(func, (x,y), z, p0))
Gives the fit:
(array([ 9.99933937, 3.99710083, 6.00875164]), array([[ 1.75295644e-03, 9.34724308e-05, -2.90150983e-04],
[ 9.34724308e-05, 5.09079478e-06, -1.53939905e-05],
[ -2.90150983e-04, -1.53939905e-05, 4.84935731e-05]]))
optimizing a function with multiple input dimensions and a variable number of parameters
This example shows how to fit a polynomial with a two dimensional input (R^2 -> R) by an increasing number of coefficients. The design is very flexible so that the callable f from curve_fit is defined once for any number of non-keyword arguments.
minimal reproducible example
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
def poly2d(xy, *coefficients):
x = xy[:, 0]
y = xy[:, 1]
proj = x + y
res = 0
for order, coef in enumerate(coefficients):
res += coef * proj ** order
return res
nx = 31
ny = 21
range_x = [-1.5, 1.5]
range_y = [-1, 1]
target_coefficients = (3, 0, -19, 7)
xs = np.linspace(*range_x, nx)
ys = np.linspace(*range_y, ny)
im_x, im_y = np.meshgrid(xs, ys)
xdata = np.c_[im_x.flatten(), im_y.flatten()]
im_target = poly2d(xdata, *target_coefficients).reshape(ny, nx)
fig, axs = plt.subplots(2, 3, figsize=(29.7, 21))
axs = axs.flatten()
ax = axs[0]
ax.set_title('Unknown polynomial P(x+y)\n[secret coefficients: ' + str(target_coefficients) + ']')
sm = ax.imshow(
im_target,
cmap = plt.get_cmap('coolwarm'),
origin='lower'
)
fig.colorbar(sm, ax=ax)
for order in range(5):
ydata=im_target.flatten()
popt, pcov = curve_fit(poly2d, xdata=xdata, ydata=ydata, p0=[0]*(order+1) )
im_fit = poly2d(xdata, *popt).reshape(ny, nx)
ax = axs[1+order]
title = 'Fit O({:d}):'.format(order)
for o, p in enumerate(popt):
if o%2 == 0:
title += '\n'
if o == 0:
title += ' {:=-{w}.1f} (x+y)^{:d}'.format(p, o, w=int(np.log10(max(abs(p), 1))) + 5)
else:
title += ' {:=+{w}.1f} (x+y)^{:d}'.format(p, o, w=int(np.log10(max(abs(p), 1))) + 5)
title += '\nrms: {:.1f}'.format( np.mean((im_fit-im_target)**2)**.5 )
ax.set_title(title)
sm = ax.imshow(
im_fit,
cmap = plt.get_cmap('coolwarm'),
origin='lower'
)
fig.colorbar(sm, ax=ax)
for ax in axs.flatten():
ax.set_xlabel('x')
ax.set_ylabel('y')
plt.show()
P.S. The concept of this answer is identical to my other answer here, but the code example is way more clear. At the time given, I will delete the other answer.
Fitting to an unknown numer of parameters
In this example, we try to reproduce some measured data measData.
In this example measData is generated by the function measuredData(x, a=.2, b=-2, c=-.8, d=.1). I practice, we might have measured measData in a way - so we have no idea, how it is described mathematically. Hence the fit.
We fit by a polynomial, which is described by the function polynomFit(inp, *args). As we want to try out different orders of polynomials, it is important to be flexible in the number of input parameters.
The independent variables (x and y in your case) are encoded in the 'columns'/second dimension of inp.
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
def measuredData(inp, a=.2, b=-2, c=-.8, d=.1):
x=inp[:,0]
y=inp[:,1]
return a+b*x+c*x**2+d*x**3 +y
def polynomFit(inp, *args):
x=inp[:,0]
y=inp[:,1]
res=0
for order in range(len(args)):
print(14,order,args[order],x)
res+=args[order] * x**order
return res +y
inpData=np.linspace(0,10,20).reshape(-1,2)
inpDataStr=['({:.1f},{:.1f})'.format(a,b) for a,b in inpData]
measData=measuredData(inpData)
fig, ax = plt.subplots()
ax.plot(np.arange(inpData.shape[0]), measData, label='measuered', marker='o', linestyle='none' )
for order in range(5):
print(27,inpData)
print(28,measData)
popt, pcov = curve_fit(polynomFit, xdata=inpData, ydata=measData, p0=[0]*(order+1) )
fitData=polynomFit(inpData,*popt)
ax.plot(np.arange(inpData.shape[0]), fitData, label='polyn. fit, order '+str(order), linestyle='--' )
ax.legend( loc='upper left', bbox_to_anchor=(1.05, 1))
print(order, popt)
ax.set_xticklabels(inpDataStr, rotation=90)
Result:
Yes. We can pass multiple variables for curve_fit. I have written a piece of code:
import numpy as np
x = np.random.randn(2,100)
w = np.array([1.5,0.5]).reshape(1,2)
esp = np.random.randn(1,100)
y = np.dot(w,x)+esp
y = y.reshape(100,)
In the above code I have generated x a 2D data set in shape of (2,100) i.e, there are two variables with 100 data points. I have fit the dependent variable y with independent variables x with some noise.
def model_func(x,w1,w2,b):
w = np.array([w1,w2]).reshape(1,2)
b = np.array([b]).reshape(1,1)
y_p = np.dot(w,x)+b
return y_p.reshape(100,)
We have defined a model function that establishes relation between y & x.
Note: The shape of output of the model function or predicted y should be (length of x,)
popt, pcov = curve_fit(model_func,x,y)
The popt is an 1D numpy array containing predicted parameters. In our case there are 3 parameters.
Yes, there is: simply give curve_fit a multi-dimensional array for xData.

Python parameter transformation according to MINUIT

I am writing an automated curve fitting routine for 2D data based on scipy's optimize.leastsq, and it works. However when adding many curves with starting values slighly off I get non-physical results (negative amplitude, for example).
I found this post Scipy: bounds for fitting parameter(s) when using optimize.leastsq and was trying to use the parameter transformation according to Minuit from Cern. In the above mentioned question somebody provided a link to some python code.
code.google.com/p/nmrglue/source/browse/trunk/nmrglue/analysis/leastsqbound.py
I wrote this minimal working example (extending the code)
"""
http://code.google.com/p/nmrglue/source/browse/trunk/nmrglue/analysis/leastsqbound.py
Constrained multivariate Levenberg-Marquardt optimization
"""
from scipy.optimize import leastsq
import numpy as np
import matplotlib.pyplot as plt #new
def internal2external_grad(xi, bounds):
"""
Calculate the internal to external gradiant
Calculates the partial of external over internal
"""
ge = np.empty_like(xi)
for i, (v, bound) in enumerate(zip(xi, bounds)):
a = bound[0] # minimum
b = bound[1] # maximum
if a == None and b == None: # No constraints
ge[i] = 1.0
elif b == None: # only min
ge[i] = v / np.sqrt(v ** 2 + 1)
elif a == None: # only max
ge[i] = -v / np.sqrt(v ** 2 + 1)
else: # both min and max
ge[i] = (b - a) * np.cos(v) / 2.
return ge
def i2e_cov_x(xi, bounds, cov_x):
grad = internal2external_grad(xi, bounds)
grad = grad = np.atleast_2d(grad)
return np.dot(grad.T, grad) * cov_x
def internal2external(xi, bounds):
""" Convert a series of internal variables to external variables"""
xe = np.empty_like(xi)
for i, (v, bound) in enumerate(zip(xi, bounds)):
a = bound[0] # minimum
b = bound[1] # maximum
if a == None and b == None: # No constraints
xe[i] = v
elif b == None: # only min
xe[i] = a - 1. + np.sqrt(v ** 2. + 1.)
elif a == None: # only max
xe[i] = b + 1. - np.sqrt(v ** 2. + 1.)
else: # both min and max
xe[i] = a + ((b - a) / 2.) * (np.sin(v) + 1.)
return xe
def external2internal(xe, bounds):
""" Convert a series of external variables to internal variables"""
xi = np.empty_like(xe)
for i, (v, bound) in enumerate(zip(xe, bounds)):
a = bound[0] # minimum
b = bound[1] # maximum
if a == None and b == None: # No constraints
xi[i] = v
elif b == None: # only min
xi[i] = np.sqrt((v - a + 1.) ** 2. - 1)
elif a == None: # only max
xi[i] = np.sqrt((b - v + 1.) ** 2. - 1)
else: # both min and max
xi[i] = np.arcsin((2.*(v - a) / (b - a)) - 1.)
return xi
def err(p, bounds, efunc, args):
pe = internal2external(p, bounds) # convert to external variables
return efunc(pe, *args)
def calc_cov_x(infodic, p):
"""
Calculate cov_x from fjac, ipvt and p as is done in leastsq
"""
fjac = infodic['fjac']
ipvt = infodic['ipvt']
n = len(p)
# adapted from leastsq function in scipy/optimize/minpack.py
perm = np.take(np.eye(n), ipvt - 1, 0)
r = np.triu(np.transpose(fjac)[:n, :])
R = np.dot(r, perm)
try:
cov_x = np.linalg.inv(np.dot(np.transpose(R), R))
except LinAlgError:
cov_x = None
return cov_x
def leastsqbound(func, x0, bounds, args = (), **kw):
"""
Constrained multivariant Levenberg-Marquard optimization
Minimize the sum of squares of a given function using the
Levenberg-Marquard algorithm. Contraints on parameters are inforced using
variable transformations as described in the MINUIT User's Guide by
Fred James and Matthias Winkler.
Parameters:
* func functions to call for optimization.
* x0 Starting estimate for the minimization.
* bounds (min,max) pair for each element of x, defining the bounds on
that parameter. Use None for one of min or max when there is
no bound in that direction.
* args Any extra arguments to func are places in this tuple.
Returns: (x,{cov_x,infodict,mesg},ier)
Return is described in the scipy.optimize.leastsq function. x and con_v
are corrected to take into account the parameter transformation, infodic
is not corrected.
Additional keyword arguments are passed directly to the
scipy.optimize.leastsq algorithm.
"""
# check for full output
if "full_output" in kw and kw["full_output"]:
full = True
else:
full = False
# convert x0 to internal variables
i0 = external2internal(x0, bounds)
# perfrom unconstrained optimization using internal variables
r = leastsq(err, i0, args = (bounds, func, args), **kw)
# unpack return convert to external variables and return
if full:
xi, cov_xi, infodic, mesg, ier = r
xe = internal2external(xi, bounds)
cov_xe = i2e_cov_x(xi, bounds, cov_xi)
# XXX correct infodic 'fjac','ipvt', and 'qtf'
return xe, cov_xe, infodic, mesg, ier
else:
xi, ier = r
xe = internal2external(xi, bounds)
return xe, ier
# new below
def _evaluate(x, p):
'''
Linear plus Lorentzian curve
p = list with three parameters ([a, b, I, Pos, FWHM])
'''
return p[0] + p[1] * x + p[2] / (1 + np.power((x - p[3]) / (p[4] / 2), 2))
def residuals(p, y, x):
err = _evaluate(x, p) - y
return err
if __name__ == '__main__':
data = np.loadtxt('constraint.dat') # read data
p0 = [5000., 0., 500., 2450., 3] #Start values for a, b, I, Pos, FWHM
constraints = [(4000., None), (-50., 20.), (0., 2000.), (2400., 2451.), (None, None)]
p, res = leastsqbound(residuals, p0, constraints, args = (data[:, 1], data[:, 0]), maxfev = 20000)
print p, res
plt.plot(data[:, 0], data[:, 1]) # plot data
plt.plot(data[:, 0], _evaluate(data[:, 0], p0)) # plot start values
plt.plot(data[:, 0], _evaluate(data[:, 0], p)) # plot fit values
plt.show()
Thats the plot output, where green is the starting conditions and red the fit result:
Is this the correct usage? External2internal conversion just throws a nan if outside the bounds. leastsq seems to be able to handle this?
I uploaded the fitting data here. Just paste into a text file named constraint.dat.
There is already an existing popular constrained Lev-Mar code
http://adsabs.harvard.edu/abs/2009ASPC..411..251M
with the implementation in python
http://code.google.com/p/astrolibpy/source/browse/mpfit/mpfit.py
I would suggest not to reinvent the wheel.
Following sega_sai's answer I came up with this minimal working example using mpfit.py
import matplotlib.pyplot as plt
from mpfit import mpfit
import numpy as np
def _evaluate(p, x):
'''
Linear plus Lorentzian curve
p = list with three parameters ([a, b, I, Pos, FWHM])
'''
return p[0] + p[1] * x + p[2] / (1 + np.power((x - p[3]) / (p[4] / 2), 2))
def residuals(p, fjac = None, x = None, y = None, err = None):
status = 0
error = _evaluate(p, x) - y
return [status, error / err]
if __name__ == '__main__':
data = np.loadtxt('constraint.dat') # read data
x = data[:, 0]
y = data[:, 1]
err = 0 * np.ones(y.shape, dtype = 'float64')
parinfo = [{'value':5000., 'fixed':0, 'limited':[0, 0], 'limits':[0., 0.], 'parname':'a'},
{'value':0., 'fixed':0, 'limited':[0, 0], 'limits':[0., 0.], 'parname':'b'},
{'value':500., 'fixed':0, 'limited':[0, 0], 'limits':[0., 0.], 'parname':'I'},
{'value':2450., 'fixed':0, 'limited':[0, 0], 'limits':[0., 0.], 'parname':'Pos'},
{'value':3., 'fixed':0, 'limited':[0, 0], 'limits':[0., 0.], 'parname':'FWHM'}]
fa = {'x':x, 'y':y, 'err':err}
m = mpfit(residuals, parinfo = parinfo, functkw = fa)
print m
The fit results are:
mpfit.py: 3714.97545, 0.484193283, 2644.47271, 2440.13385, 22.1898496
leastsq: 3714.97187, 0.484194545, 2644.46890, 2440.13391, 22.1899295
So conclusion: Both methods work, both allow constraints. But as mpfit comes from a very established sourceI trust it more. Also it honors error values, if available.
Try lmfit-py - https://github.com/newville/lmfit-py
It also uses the Levenberg-Marquardt (LM) algorithm via scipy.optimize.leastsq. Uncertaintes are OK.
It allows you not only to constrain your fitting parameters with bounds but also with mathematical expressions between them without modification of your fitting function.
Forget about using those awful p[0], p[1] ... in fitting function. Just use names of the fitting parameters via the Parameters class.

Fitting data to system of ODEs using Python via Scipy & Numpy

I am having some trouble translating my MATLAB code into Python via Scipy & Numpy. I am stuck on how to find optimal parameter values (k0 and k1) for my system of ODEs to fit to my ten observed data points. I currently have an initial guess for k0 and k1. In MATLAB, I can using something called 'fminsearch' which is a function that takes the system of ODEs, the observed data points, and the initial values of the system of ODEs. It will then calculate a new pair of parameters k0 and k1 that will fit the observed data. I have included my code to see if you can help me implement some kind of 'fminsearch' to find the optimal parameter values k0 and k1 that will fit my data. I want to add whatever code to do this to my lsqtest.py file.
I have three .py files - ode.py, lsq.py, and lsqtest.py
ode.py:
def f(y, t, k):
return (-k[0]*y[0],
k[0]*y[0]-k[1]*y[1],
k[1]*y[1])
lsq.py:
import pylab as py
import numpy as np
from scipy import integrate
from scipy import optimize
import ode
def lsq(teta,y0,data):
#INPUT teta, the unknowns k0,k1
# data, observed
# y0 initial values needed by the ODE
#OUTPUT lsq value
t = np.linspace(0,9,10)
y_obs = data #data points
k = [0,0]
k[0] = teta[0]
k[1] = teta[1]
#call the ODE solver to get the states:
r = integrate.odeint(ode.f,y0,t,args=(k,))
#the ODE system in ode.py
#at each row (time point), y_cal has
#the values of the components [A,B,C]
y_cal = r[:,1] #separate the measured B
#compute the expression to be minimized:
return sum((y_obs-y_cal)**2)
lsqtest.py:
import pylab as py
import numpy as np
from scipy import integrate
from scipy import optimize
import lsq
if __name__ == '__main__':
teta = [0.2,0.3] #guess for parameter values k0 and k1
y0 = [1,0,0] #initial conditions for system
y = [0.000,0.416,0.489,0.595,0.506,0.493,0.458,0.394,0.335,0.309] #observed data points
data = y
resid = lsq.lsq(teta,y0,data)
print resid
For these kind of fitting tasks you could use the package lmfit. The outcome of the fit would look like this; as you can see, the data are reproduced very well:
For now, I fixed the initial concentrations, you could also set them as variables if you like (just remove the vary=False in the code below). The parameters you obtain are:
[[Variables]]
x10: 5 (fixed)
x20: 0 (fixed)
x30: 0 (fixed)
k0: 0.12183301 +/- 0.005909 (4.85%) (init= 0.2)
k1: 0.77583946 +/- 0.026639 (3.43%) (init= 0.3)
[[Correlations]] (unreported correlations are < 0.100)
C(k0, k1) = 0.809
The code that reproduces the plot looks like this (some explanation can be found in the inline comments):
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint
from lmfit import minimize, Parameters, Parameter, report_fit
from scipy.integrate import odeint
def f(y, t, paras):
"""
Your system of differential equations
"""
x1 = y[0]
x2 = y[1]
x3 = y[2]
try:
k0 = paras['k0'].value
k1 = paras['k1'].value
except KeyError:
k0, k1 = paras
# the model equations
f0 = -k0 * x1
f1 = k0 * x1 - k1 * x2
f2 = k1 * x2
return [f0, f1, f2]
def g(t, x0, paras):
"""
Solution to the ODE x'(t) = f(t,x,k) with initial condition x(0) = x0
"""
x = odeint(f, x0, t, args=(paras,))
return x
def residual(paras, t, data):
"""
compute the residual between actual data and fitted data
"""
x0 = paras['x10'].value, paras['x20'].value, paras['x30'].value
model = g(t, x0, paras)
# you only have data for one of your variables
x2_model = model[:, 1]
return (x2_model - data).ravel()
# initial conditions
x10 = 5.
x20 = 0
x30 = 0
y0 = [x10, x20, x30]
# measured data
t_measured = np.linspace(0, 9, 10)
x2_measured = np.array([0.000, 0.416, 0.489, 0.595, 0.506, 0.493, 0.458, 0.394, 0.335, 0.309])
plt.figure()
plt.scatter(t_measured, x2_measured, marker='o', color='b', label='measured data', s=75)
# set parameters including bounds; you can also fix parameters (use vary=False)
params = Parameters()
params.add('x10', value=x10, vary=False)
params.add('x20', value=x20, vary=False)
params.add('x30', value=x30, vary=False)
params.add('k0', value=0.2, min=0.0001, max=2.)
params.add('k1', value=0.3, min=0.0001, max=2.)
# fit model
result = minimize(residual, params, args=(t_measured, x2_measured), method='leastsq') # leastsq nelder
# check results of the fit
data_fitted = g(np.linspace(0., 9., 100), y0, result.params)
# plot fitted data
plt.plot(np.linspace(0., 9., 100), data_fitted[:, 1], '-', linewidth=2, color='red', label='fitted data')
plt.legend()
plt.xlim([0, max(t_measured)])
plt.ylim([0, 1.1 * max(data_fitted[:, 1])])
# display fitted statistics
report_fit(result)
plt.show()
If you have data for additional variables, you can simply update the function residual.
The following worked for me:
import pylab as pp
import numpy as np
from scipy import integrate, interpolate
from scipy import optimize
##initialize the data
x_data = np.linspace(0,9,10)
y_data = np.array([0.000,0.416,0.489,0.595,0.506,0.493,0.458,0.394,0.335,0.309])
def f(y, t, k):
"""define the ODE system in terms of
dependent variable y,
independent variable t, and
optinal parmaeters, in this case a single variable k """
return (-k[0]*y[0],
k[0]*y[0]-k[1]*y[1],
k[1]*y[1])
def my_ls_func(x,teta):
"""definition of function for LS fit
x gives evaluation points,
teta is an array of parameters to be varied for fit"""
# create an alias to f which passes the optional params
f2 = lambda y,t: f(y, t, teta)
# calculate ode solution, retuen values for each entry of "x"
r = integrate.odeint(f2,y0,x)
#in this case, we only need one of the dependent variable values
return r[:,1]
def f_resid(p):
""" function to pass to optimize.leastsq
The routine will square and sum the values returned by
this function"""
return y_data-my_ls_func(x_data,p)
#solve the system - the solution is in variable c
guess = [0.2,0.3] #initial guess for params
y0 = [1,0,0] #inital conditions for ODEs
(c,kvg) = optimize.leastsq(f_resid, guess) #get params
print "parameter values are ",c
# fit ODE results to interpolating spline just for fun
xeval=np.linspace(min(x_data), max(x_data),30)
gls = interpolate.UnivariateSpline(xeval, my_ls_func(xeval,c), k=3, s=0)
#pick a few more points for a very smooth curve, then plot
# data and curve fit
xeval=np.linspace(min(x_data), max(x_data),200)
#Plot of the data as red dots and fit as blue line
pp.plot(x_data, y_data,'.r',xeval,gls(xeval),'-b')
pp.xlabel('xlabel',{"fontsize":16})
pp.ylabel("ylabel",{"fontsize":16})
pp.legend(('data','fit'),loc=0)
pp.show()
Look at the scipy.optimize module. The minimize function looks fairly similar to fminsearch, and I believe that both basically use a simplex algorithm for optimization.
# cleaned up a bit to get my head around it - thanks for sharing
import pylab as pp
import numpy as np
from scipy import integrate, optimize
class Parameterize_ODE():
def __init__(self):
self.X = np.linspace(0,9,10)
self.y = np.array([0.000,0.416,0.489,0.595,0.506,0.493,0.458,0.394,0.335,0.309])
self.y0 = [1,0,0] # inital conditions ODEs
def ode(self, y, X, p):
return (-p[0]*y[0],
p[0]*y[0]-p[1]*y[1],
p[1]*y[1])
def model(self, X, p):
return integrate.odeint(self.ode, self.y0, X, args=(p,))
def f_resid(self, p):
return self.y - self.model(self.X, p)[:,1]
def optim(self, p_quess):
return optimize.leastsq(self.f_resid, p_guess) # fit params
po = Parameterize_ODE(); p_guess = [0.2, 0.3]
c, kvg = po.optim(p_guess)
# --- show ---
print "parameter values are ", c, kvg
x = np.linspace(min(po.X), max(po.X), 2000)
pp.plot(po.X, po.y,'.r',x, po.model(x, c)[:,1],'-b')
pp.xlabel('X',{"fontsize":16}); pp.ylabel("y",{"fontsize":16}); pp.legend(('data','fit'),loc=0); pp.show()

Categories