Suppose I'm fitting some data points by a simple linear regression. Now I'd like to perform several joint linear regressions for several sets of data points. More specifically, I want one parameter to be equal among all fits, which is schematically depicted here for the y-axis intersection.
After searching Google for some time I could neither find any Python (Scipy) routine which does that, nor any general literature, how one would accomplish this.
Ideally, I want to perform those joint fits not only in the case of simple linear regressions, but also for more general fit functions (for instance, power-law fits with joint exponent).
The lmfit module allows you to do this, as mentioned in their FAQ:
from lmfit import minimize, Parameters, fit_report
import numpy as np
# residual function to minimize
def fit_function(params, x=None, dat1=None, dat2=None):
model1 = params['offset'] + x * params['slope1']
model2 = params['offset'] + x * params['slope2']
resid1 = dat1 - model1
resid2 = dat2 - model2
return np.concatenate((resid1, resid2))
# setup fit parameters
params = Parameters()
params.add('slope1', value=1)
params.add('slope2', value=-1)
params.add('offset', value=0.5)
# generate sample data
x = np.arange(0, 10)
slope1, slope2, offset = 1.1, -0.9, 0.2
y1 = slope1 * x + offset
y2 = slope2 * x + offset
# fit
out = minimize(residual, params, kws={"x": x, "dat1": y1, "dat2": y2})
print(fit_report(out))
# [[Fit Statistics]]
# # fitting method = leastsq
# # function evals = 9
# # data points = 20
# # variables = 3
# chi-square = 1.4945e-31
# reduced chi-square = 8.7913e-33
# Akaike info crit = -1473.48128
# Bayesian info crit = -1470.49408
# [[Variables]]
# slope1: 1.10000000 +/- 8.2888e-18 (0.00%) (init = 1)
# slope2: -0.90000000 +/- 8.2888e-18 (0.00%) (init = -1)
# offset: 0.20000000 +/- 3.8968e-17 (0.00%) (init = 0.5)
# [[Correlations]] (unreported correlations are < 0.100)
# C(slope1, offset) = -0.742
# C(slope2, offset) = -0.742
# C(slope1, slope2) = 0.551
I think this graphing code example does what you want, fitting two data sets with a single shared parameter. Note that if the data sets are of unequal length, that can effectively weight the fit toward the data set with more individual points. This example explicitly sets the initial parameter values to 1,0 - the curve_fit() defaults - and does not use scipy's genetic algorithm to help find initial parameter estimates.
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
y1 = np.array([ 16.00, 18.42, 20.84, 23.26])
y2 = np.array([-20.00, -25.50, -31.00, -36.50, -42.00])
comboY = np.append(y1, y2)
x1 = np.array([5.0, 6.1, 7.2, 8.3])
x2 = np.array([15.0, 16.1, 17.2, 18.3, 19.4])
comboX = np.append(x1, x2)
if len(y1) != len(x1):
raise(Exception('Unequal x1 and y1 data length'))
if len(y2) != len(x2):
raise(Exception('Unequal x2 and y2 data length'))
def function1(data, a, b, c): # not all parameters are used here, c is shared
return a * data + c
def function2(data, a, b, c): # not all parameters are used here, c is shared
return b * data + c
def combinedFunction(comboData, a, b, c):
# single data reference passed in, extract separate data
extract1 = comboData[:len(x1)] # first data
extract2 = comboData[len(x1):] # second data
result1 = function1(extract1, a, b, c)
result2 = function2(extract2, a, b, c)
return np.append(result1, result2)
# some initial parameter values
initialParameters = np.array([1.0, 1.0, 1.0])
# curve fit the combined data to the combined function
fittedParameters, pcov = curve_fit(combinedFunction, comboX, comboY, initialParameters)
# values for display of fitted function
a, b, c = fittedParameters
y_fit_1 = function1(x1, a, b, c) # first data set, first equation
y_fit_2 = function2(x2, a, b, c) # second data set, second equation
plt.plot(comboX, comboY, 'D') # plot the raw data
plt.plot(x1, y_fit_1) # plot the equation using the fitted parameters
plt.plot(x2, y_fit_2) # plot the equation using the fitted parameters
plt.show()
print('a, b, c:', fittedParameters)
Related
I have implemented a 3D gaussian fit using scipy.optimize.leastsq and now I would like to tweak the arguments ftol and xtol to optimize the performances. However, I don't understand the "units" of these two parameters in order to make a proper choice. Is it possible to calculate these two parameters from the results? That would give me an understanding of how to choose them. My data is numpy arrays of np.uint8. I tried to read the FORTRAN source code of MINIPACK but my FORTRAN knowledge is zero. I also read checked the Levenberg-Marquardt algorithm, but I could not really get a number that was below the ftol for example.
Here is a minimal example of what I do:
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import leastsq
class gaussian_model:
def __init__(self):
self.prev_iter_model = None
self.f_vals = []
def gaussian_1D(self, coeffs, xx):
A, sigma, mu = coeffs
# Center rotation around peak center
x0 = xx - mu
model = A*np.exp(-(x0**2)/(2*(sigma**2)))
return model
def residuals(self, coeffs, I_obs, xx, model_func):
model = model_func(coeffs, xx)
residuals = I_obs - model
if self.prev_iter_model is not None:
self.f = np.sum(((model-self.prev_iter_model)/model)**2)
self.f_vals.append(self.f)
self.prev_iter_model = model
return residuals
# x data
x_start = 1
x_stop = 10
num = 100
xx, dx = np.linspace(x_start, x_stop, num, retstep=True)
# Simulated data with some noise
A, s_x, mu = 10, 0.5, 3
coeffs = [A, s_x, mu]
model = gaussian_model()
yy = model.gaussian_1D(coeffs, xx)
noise_ampl = 0.5
noise = np.random.normal(0, noise_ampl, size=num)
yy += noise
# LM Least squares
initial_guess = [1, 1, 1]
pred_coeffs, cov_x, info, mesg, ier = leastsq(model.residuals, initial_guess,
args=(yy, xx, model.gaussian_1D),
ftol=1E-6, full_output=True)
yy_fit = model.gaussian_1D(pred_coeffs, xx)
rel_SSD = np.sum(((yy-yy_fit)/yy)**2)
RMS_SSD = np.sqrt(rel_SSD/num)
print(RMS_SSD)
print(model.f)
print(model.f_vals)
fig, ax = plt.subplots(1,2)
# Plot results
ax[0].scatter(xx, yy)
ax[0].plot(xx, yy_fit, c='r')
ax[1].scatter(range(len(model.f_vals)), model.f_vals, c='r')
# ax[1].set_ylim(0, 1E-6)
plt.show()
rel_SSD is around 1 and definitely not something below ftol = 1E-6.
EDIT: Based on #user12750353 answer below I updated my minimal example to try to recreate how lmdif determines termination with ftol. The problem is that my f_vals are too small, so they are not the right values. The reason I would like to recreate this is that I would like to see what kind of numbers I am getting on my main code to decide on a ftol that would terminate the fitting process earlier.
Since you are giving a function without the gradient, the method called is lmdif. Instead of gradients it will use forward difference gradient estimate, f(x + delta) - f(x) ~ delta * df(x)/dx (I will write as if the parameter).
There you find the following description
c ftol is a nonnegative input variable. termination
c occurs when both the actual and predicted relative
c reductions in the sum of squares are at most ftol.
c therefore, ftol measures the relative error desired
c in the sum of squares.
c
c xtol is a nonnegative input variable. termination
c occurs when the relative error between two consecutive
c iterates is at most xtol. therefore, xtol measures the
c relative error desired in the approximate solution.
Looking in the code the actual reduction acred = 1 - (fnorm1/fnorm)**2 is what you calculated for rel_SSD, but between the two last iterations, not between the fitted function and the target points.
Example
The problem here is that we need to discover what are the values assumed by the internal variables. An attempt to do so is to save the coefficients and the residual norm every time the function is called as follows.
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import leastsq
class gaussian_model:
def __init__(self):
self.prev_iter_model = None
self.fnorm = []
self.x = []
def gaussian_1D(self, coeffs, xx):
A, sigma, mu = coeffs
# Center rotation around peak center
x0 = xx - mu
model = A*np.exp(-(x0**2)/(2*(sigma**2)))
grad = np.array([
model / A,
model * x0**2 / (sigma**3),
model * 2 * x0 / (2*(sigma**2))
]).transpose();
return model, grad
def residuals(self, coeffs, I_obs, xx, model_func):
model, grad = model_func(coeffs, xx)
residuals = I_obs - model
self.x.append(np.copy(coeffs));
self.fnorm.append(np.sqrt(np.sum(residuals**2)))
return residuals
def grad(self, coeffs, I_obs, xx, model_func):
model, grad = model_func(coeffs, xx)
residuals = I_obs - model
return -grad
def plot_progress(self):
x = np.array(self.x)
dx = np.sqrt(np.sum(np.diff(x, axis=0)**2, axis=1))
plt.plot(dx / np.sqrt(np.sum(x[1:, :]**2, axis=1)))
fnorm = np.array(self.fnorm)
plt.plot(1 - (fnorm[1:]/fnorm[:-1])**2)
plt.legend(['$||\Delta f||$', '$||\Delta x||$'], loc='upper left');
# x data
x_start = 1
x_stop = 10
num = 100
xx, dx = np.linspace(x_start, x_stop, num, retstep=True)
# Simulated data with some noise
A, s_x, mu = 10, 0.5, 3
coeffs = [A, s_x, mu]
model = gaussian_model()
yy, _ = model.gaussian_1D(coeffs, xx)
noise_ampl = 0.5
noise = np.random.normal(0, noise_ampl, size=num)
yy += noise
Then we can see the relative variation of $x$ and $f$
initial_guess = [1, 1, 1]
pred_coeffs, cov_x, info, mesg, ier = leastsq(model.residuals, initial_guess,
args=(yy, xx, model.gaussian_1D),
xtol=1e-6,
ftol=1e-6, full_output=True)
plt.figure(figsize=(14, 6))
plt.subplot(121)
model.plot_progress()
plt.yscale('log')
plt.grid()
plt.subplot(122)
yy_fit,_ = model.gaussian_1D(pred_coeffs, xx)
# Plot results
plt.scatter(xx, yy)
plt.plot(xx, yy_fit, c='r')
plt.show()
The problem with this is that the function is evaluated both to compute f and to compute the gradient of f. To produce a cleaner plot what can be done is to implement pass Dfun so that it evaluate func only once per iteration.
# x data
x_start = 1
x_stop = 10
num = 100
xx, dx = np.linspace(x_start, x_stop, num, retstep=True)
# Simulated data with some noise
A, s_x, mu = 10, 0.5, 3
coeffs = [A, s_x, mu]
model = gaussian_model()
yy, _ = model.gaussian_1D(coeffs, xx)
noise_ampl = 0.5
noise = np.random.normal(0, noise_ampl, size=num)
yy += noise
# LM Least squares
initial_guess = [1, 1, 1]
pred_coeffs, cov_x, info, mesg, ier = leastsq(model.residuals, initial_guess,
args=(yy, xx, model.gaussian_1D),
Dfun=model.grad,
xtol=1e-6,
ftol=1e-6, full_output=True)
plt.figure(figsize=(14, 6))
plt.subplot(121)
model.plot_progress()
plt.yscale('log')
plt.grid()
plt.subplot(122)
yy_fit,_ = model.gaussian_1D(pred_coeffs, xx)
# Plot results
plt.scatter(xx, yy)
plt.plot(xx, yy_fit, c='r')
plt.show()
Well, the value I am obtaining for xtol is not exactly what is in the lmdif implementation.
I am new to python and I am using curve_fit for different calculations in my model and one of my equations include two unknown variables so it is impossible for me to check manually if my code is correct.
my equation is
ln(S(b)/S(b50))= -b D + 1/6 b**2 D**2 K
My unknowns are D and K
ln(S(b)/S(b50)) is my ydata
b is my xdata
so I used the following:
xdata = np.array([50,300,600,1000])
ydata_beforelog = np.array([426.0938, 259.2896, 166.8042, 80.9248])
ydata = np.log(ydata_before/426.0938)
def func(x, D, K):
return (-x * D) + (1/6 * (x **2)* (D **2) * K)
popt, pcov = curve_fit(func, xdata, ydata)
popt[0] = popt[0] * 1000 # I need that for unit scaling
popt = ([ 1.48687053, -0.46540487])
'''
I would assume that those are my D and K?
Allow me to suggest using lmfit (https://lmfit.github.io/lmfit-py) - disclaimer: I am a lead author. That would change your fit to look like this:
import numpy as np
from lmfit import Model
xdata = np.array([50,300,600,1000])
ydata_before = np.array([426.0938, 259.2896, 166.8042, 80.9248])
ydata = np.log(ydata_before/426.0938)
def func(x, D, K):
return (-x * D) + (1/6 * (x **2)* (D **2) * K)
# create model from this function
mymodel = Model(func)
# create a dictionary of named parameters using the argument names of
# your model function, so 'D' and 'K'. Give initial values:
params = mymodel.make_params(D=1.5, K=-1)
# do the fit: fitting ydata with the parameters and
# independent variable "x" as defined by your model function:
result = mymodel.fit(ydata, params, x=xdata)
# print the results and fit statistics or just get the best-fit parameters:
print(result.fit_report())
for key, param in result.params.items():
print(param)
That is, you refer to parameters by name. This fill will print out:
[[Model]]
Model(func)
[[Fit Statistics]]
# fitting method = leastsq
# function evals = 12
# data points = 4
# variables = 2
chi-square = 0.00709560
reduced chi-square = 0.00354780
Akaike info crit = -21.3382982
Bayesian info crit = -22.5657095
[[Variables]]
D: 0.00148687 +/- 1.9408e-04 (13.05%) (init = 1.5)
K: -0.46540487 +/- 0.71331763 (153.27%) (init = -1)
[[Correlations]] (unreported correlations are < 0.100)
C(D, K) = 0.977
<Parameter 'D', value=0.0014868705336113577 +/- 0.000194, bounds=[-inf:inf]>
<Parameter 'K', value=-0.4654048673207782 +/- 0.713, bounds=[-inf:inf]>
OK, I have a function which uses a range of parameters to calculate the effect on two separate variables over time. These variables have already been curve-matched to some existing data to minimize the variation (shown below)
I want to be able to check the previous working, and match new data. I have been trying to use the scipy.optimize.curve_fit function, by stacking the x and y data resulting from my function (as suggested here: fit multiple parametric curves with scipy).
It may not be the right method, or I may just be misunderstanding, but my code keeps running into a type error TypeError: Improper input: N=3 must not exceed M=2
My simplified prototype code was initially taken from here: https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.curve_fit.html
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
def func(x, a, b, c):
result = ([],[])
for i in x:
#set up 2 example curves
result[0].append(a * np.exp(-b * i) + c)
result[1].append(a * np.exp(-b * i) + c**2)
return result #as a tuple containing 2 lists
#Define the data to be fit with some noise:
xdata = list(np.arange(0, 10, 1))
y = func(xdata, 2.5, 5, 0.5)[0]
y2 = func(xdata, 1, 1, 2)[1]
#Add some noise
y_noise = 0.1 * np.random.normal(size=len(xdata))
y2_noise = 0.1 * np.random.normal(size=len(xdata))
ydata=[]
ydata2=[]
for i in range(len(y)): #clunky
ydata.append(y[i] + y_noise[i])
ydata2.append(y2[i] + y2_noise[i])
plt.scatter(xdata, ydata, label='data')
plt.scatter(xdata, ydata2, label='data2')
#plt.plot(xdata, y, 'k-', label='data (original function)')
#plt.plot(xdata, y2, 'k-', label='data2 (original function)')
#stack the data
xdat = xdata+xdata
ydat = ydata+ydata2
popt, pcov = curve_fit(func, xdat, ydat)
plt.plot(xdata, func(xdata, *popt), 'r-',
label='fit: a=%5.3f, b=%5.3f, c=%5.3f' % tuple(popt))
plt.xlabel('x')
plt.ylabel('y')
plt.legend()
plt.show()
Any help much appreciated !
Here is graphing example code that fits two different equations with a single shared parameter, if this looks like what you need it can easily be adapted for your specific problem.
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
y1 = np.array([ 16.00, 18.42, 20.84, 23.26])
y2 = np.array([-20.00, -25.50, -31.00, -36.50, -42.00])
comboY = np.append(y1, y2)
x1 = np.array([5.0, 6.1, 7.2, 8.3])
x2 = np.array([15.0, 16.1, 17.2, 18.3, 19.4])
comboX = np.append(x1, x2)
if len(y1) != len(x1):
raise(Exception('Unequal x1 and y1 data length'))
if len(y2) != len(x2):
raise(Exception('Unequal x2 and y2 data length'))
def function1(data, a, b, c): # not all parameters are used here, c is shared
return a * data + c
def function2(data, a, b, c): # not all parameters are used here, c is shared
return b * data + c
def combinedFunction(comboData, a, b, c):
# single data reference passed in, extract separate data
extract1 = comboData[:len(x1)] # first data
extract2 = comboData[len(x1):] # second data
result1 = function1(extract1, a, b, c)
result2 = function2(extract2, a, b, c)
return np.append(result1, result2)
# some initial parameter values
initialParameters = np.array([1.0, 1.0, 1.0])
# curve fit the combined data to the combined function
fittedParameters, pcov = curve_fit(combinedFunction, comboX, comboY, initialParameters)
# values for display of fitted function
a, b, c = fittedParameters
y_fit_1 = function1(x1, a, b, c) # first data set, first equation
y_fit_2 = function2(x2, a, b, c) # second data set, second equation
plt.plot(comboX, comboY, 'D') # plot the raw data
plt.plot(x1, y_fit_1) # plot the equation using the fitted parameters
plt.plot(x2, y_fit_2) # plot the equation using the fitted parameters
plt.show()
print('a, b, c:', fittedParameters)
I am trying to fit this function to some data:
But when I use my code
import numpy as np
from scipy.optimize import curve_fit
import matplotlib.pyplot as plt
def f(x, start, end):
res = np.empty_like(x)
res[x < start] =-1
res[x > end] = 1
linear = np.all([[start <= x], [x <= end]], axis=0)[0]
res[linear] = np.linspace(-1., 1., num=np.sum(linear))
return res
if __name__ == '__main__':
xdata = np.linspace(0., 1000., 1000)
ydata = -np.ones(1000)
ydata[500:1000] = 1.
ydata = ydata + np.random.normal(0., 0.25, len(ydata))
popt, pcov = curve_fit(f, xdata, ydata, p0=[495., 505.])
print(popt, pcov)
plt.figure()
plt.plot(xdata, f(xdata, *popt), 'r-', label='fit')
plt.plot(xdata, ydata, 'b-', label='data')
plt.show()
I get the error
OptimizeWarning: Covariance of the parameters could not be estimated
Output:
In this example start and end should be closer to 500, but they dont change at all from my initial guess.
The warning (not error) of
OptimizeWarning: Covariance of the parameters could not be estimated
means that the fit could not determine the uncertainties (variance) of the fitting parameters.
The main problem is that your model function f treats the parameters start and end as discrete values -- they are used as integer locations for the change in functional form. scipy's curve_fit (and all other optimization routines in scipy.optimize) assume that parameters are continuous variables, not discrete.
The fitting procedure will try to take small steps (typically around machine precision) in the parameters to get a numerical derivative of the residual with respect to the variables (the Jacobian). With values used as discrete variables, these derivatives will be zero and the fitting procedure will not know how to change the values to improve the fit.
It looks like you're trying to fit a step function to some data. Allow me to recommend trying lmfit (https://lmfit.github.io/lmfit-py) which provides a higher-level interface to curve fitting, and has many built-in models. For example, it includes a StepModel that should be able to model your data.
For a slight modification of your data (so that it has a finite step), the following script with lmfit can fit such data:
#!/usr/bin/python
import numpy as np
from lmfit.models import StepModel, LinearModel
import matplotlib.pyplot as plt
np.random.seed(0)
xdata = np.linspace(0., 1000., 1000)
ydata = -np.ones(1000)
ydata[500:1000] = 1.
# note that a linear step is added here:
ydata[490:510] = -1 + np.arange(20)/10.0
ydata = ydata + np.random.normal(size=len(xdata), scale=0.1)
# model data as Step + Line
step_mod = StepModel(form='linear', prefix='step_')
line_mod = LinearModel(prefix='line_')
model = step_mod + line_mod
# make named parameters, giving initial values:
pars = model.make_params(line_intercept=ydata.min(),
line_slope=0,
step_center=xdata.mean(),
step_amplitude=ydata.std(),
step_sigma=2.0)
# fit data to this model with these parameters
out = model.fit(ydata, pars, x=xdata)
# print results
print(out.fit_report())
# plot data and best-fit
plt.plot(xdata, ydata, 'b')
plt.plot(xdata, out.best_fit, 'r-')
plt.show()
which prints out a report of
[[Model]]
(Model(step, prefix='step_', form='linear') + Model(linear, prefix='line_'))
[[Fit Statistics]]
# fitting method = leastsq
# function evals = 49
# data points = 1000
# variables = 5
chi-square = 9.72660131
reduced chi-square = 0.00977548
Akaike info crit = -4622.89074
Bayesian info crit = -4598.35197
[[Variables]]
step_sigma: 20.6227793 +/- 0.77214167 (3.74%) (init = 2)
step_center: 490.167878 +/- 0.44804412 (0.09%) (init = 500)
step_amplitude: 1.98946656 +/- 0.01304854 (0.66%) (init = 0.996283)
line_intercept: -1.00628058 +/- 0.00706005 (0.70%) (init = -1.277259)
line_slope: 1.3947e-05 +/- 2.2340e-05 (160.18%) (init = 0)
[[Correlations]] (unreported correlations are < 0.100)
C(step_amplitude, line_slope) = -0.875
C(step_sigma, step_center) = -0.863
C(line_intercept, line_slope) = -0.774
C(step_amplitude, line_intercept) = 0.461
C(step_sigma, step_amplitude) = 0.170
C(step_sigma, line_slope) = -0.147
C(step_center, step_amplitude) = -0.146
C(step_center, line_slope) = 0.127
and produces a plot of
Lmfit has lots of extra features. For example, if you want to set bounds on some of the parameter values or fix some from varying, you can do the following:
# make named parameters, giving initial values:
pars = model.make_params(line_intercept=ydata.min(),
line_slope=0,
step_center=xdata.mean(),
step_amplitude=ydata.std(),
step_sigma=2.0)
# now set max and min values for step amplitude"
pars['step_amplitude'].min = 0
pars['step_amplitude'].max = 100
# fix the offset of the line to be -1.0
pars['line_offset'].value = -1.0
pars['line_offset'].vary = False
# then run fit with these parameters
out = model.fit(ydata, pars, x=xdata)
If you know the model should be Step+Constant and that the constant should be fixed, you could also modify the model to be
from lmfit.models import ConstantModel
# model data as Step + Constant
step_mod = StepModel(form='linear', prefix='step_')
const_mod = ConstantModel(prefix='const_')
model = step_mod + const_mod
pars = model.make_params(const_c=-1,
step_center=xdata.mean(),
step_amplitude=ydata.std(),
step_sigma=2.0)
pars['const_c'].vary = False
I have a data set that is described by two free parameters which I want to determine using optimalization.curve_fit. The model is defined as follows
def func(x, a, b,):
return a*x*np.sqrt(1-b*x)
And the fitting part as
popt, pcov = opt.curve_fit(f = func, xdata = x_data, ydata= y_data, p0
= init_guess, bounds = ([a_min, b_min], [a_max, b_max]))
The outcome of the solutions for a and b depends quite strong on my choice of init_guess, i.e. the initial guess and also on the choice of the bounds.
Is there a way the solve this?
The authors of the Python scipy module have included the Differential Evolution genetic algorithm in scipy's optimization code as the module scipy.optimize.differential_evolution. This module can be used to stochastically find initial parameter values for non-linear regression.
Here is example code from RamanSpectroscopyFit, which uses scipy's genetic algorithm for initial parameter estimation for fitting Raman spectroscopy data:
import numpy as np
import pickle # for loading pickled test data
import matplotlib
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
import warnings
from scipy.optimize import differential_evolution
# Double Lorentzian peak function
# bounds on parameters are set in generate_Initial_Parameters() below
def double_Lorentz(x, a, b, A, w, x_0, A1, w1, x_01):
return a*x+b+(2*A/np.pi)*(w/(4*(x-x_0)**2 + w**2))+(2*A1/np.pi)*(w1/(4*(x-x_01)**2 + w1**2))
# function for genetic algorithm to minimize (sum of squared error)
# bounds on parameters are set in generate_Initial_Parameters() below
def sumOfSquaredError(parameterTuple):
warnings.filterwarnings("ignore") # do not print warnings by genetic algorithm
return np.sum((yData - double_Lorentz(xData, *parameterTuple)) ** 2)
def generate_Initial_Parameters():
# min and max used for bounds
maxX = max(xData)
minX = min(xData)
maxY = max(yData)
minY = min(yData)
parameterBounds = []
parameterBounds.append([-1.0, 1.0]) # parameter bounds for a
parameterBounds.append([maxY/-2.0, maxY/2.0]) # parameter bounds for b
parameterBounds.append([0.0, maxY*100.0]) # parameter bounds for A
parameterBounds.append([0.0, maxY/2.0]) # parameter bounds for w
parameterBounds.append([minX, maxX]) # parameter bounds for x_0
parameterBounds.append([0.0, maxY*100.0]) # parameter bounds for A1
parameterBounds.append([0.0, maxY/2.0]) # parameter bounds for w1
parameterBounds.append([minX, maxX]) # parameter bounds for x_01
# "seed" the numpy random number generator for repeatable results
result = differential_evolution(sumOfSquaredError, parameterBounds, seed=3)
return result.x
# load the pickled test data from original Raman spectroscopy
data = pickle.load(open('data.pkl', 'rb'))
xData = data[0]
yData = data[1]
# generate initial parameter values
initialParameters = generate_Initial_Parameters()
# curve fit the test data
fittedParameters, niepewnosci = curve_fit(double_Lorentz, xData, yData, initialParameters)
# create values for display of fitted peak function
a, b, A, w, x_0, A1, w1, x_01 = fittedParameters
y_fit = double_Lorentz(xData, a, b, A, w, x_0, A1, w1, x_01)
plt.plot(xData, yData) # plot the raw data
plt.plot(xData, y_fit) # plot the equation using the fitted parameters
plt.show()
print(fittedParameters)