TypeError when fitting curve - python

I'm trying to fit a curve to some data that I have but for some reason I just get the error "'numpy.float64' object cannot be interpreted as an integer" and I don't understand why or how to fix it. Would be grateful for some help, the code is below:
import numpy as np
import matplotlib.pyplot as plt
from scipy import optimize
mud=[0.0014700734999999996,
0.0011840320799999997,
0.0014232304799999995,
0.0008501509799999997,
0.0007235751599999999,
0.0005770661399999999,
0.0005581295999999999,
0.00028703807999999994,
0.00014850233999999998]
F=[0.5750972123893806,
0.5512177433628319,
0.5638906194690266,
0.5240915044247788,
0.5217873451327435,
0.5066008407079646,
0.5027256637168142,
0.4847113274336283,
0.46502123893805314]
fitfunc = lambda p, x: p[0]+p[1]*x # Target function
errfunc = lambda p, x, y: fitfunc(p, x) - y # Distance to the target function
p0 = [0.46,80,1] # Initial guess for the parameters
p1, success = optimize.leastsq(errfunc, p0[:], args=(mud, F))
m = np.linspace(max(mud),min(mud), 9)
ax = plot(mud,F,"b^")
ax3 = plot(m,fitfunc(p2,m),"g-")

Your problem is that your arguments, mud and F are lists, not arrays, which means that you cannot just multiply them with a number. Hence the error. If you define those parameters as np.ndarrays, it will work:
import numpy as np
import matplotlib.pyplot as plt
from scipy import optimize
mud=np.array([0.0014700734999999996,
0.0011840320799999997,
0.0014232304799999995,
0.0008501509799999997,
0.0007235751599999999,
0.0005770661399999999,
0.0005581295999999999,
0.00028703807999999994,
0.00014850233999999998])
F=np.array([0.5750972123893806,
0.5512177433628319,
0.5638906194690266,
0.5240915044247788,
0.5217873451327435,
0.5066008407079646,
0.5027256637168142,
0.4847113274336283,
0.46502123893805314])
fitfunc = lambda p, x: p[0]+p[1]*x # Target function
errfunc = lambda p, x, y: fitfunc(p, x) - y # Distance to the target function
p0 = [0.46,80,1] # Initial guess for the parameters
p1, success = optimize.leastsq(errfunc, p0[:], args=(mud, F))
print(p1, success)
gives
[ 0.46006301 76.7920086 1. ] 2

Here is a graphical fitter using the Van Deemter Chromatography equation, it gives a good fit to your data.
import numpy, scipy, matplotlib
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
# mud
xData=numpy.array([0.0014700734999999996,
0.0011840320799999997,
0.0014232304799999995,
0.0008501509799999997,
0.0007235751599999999,
0.0005770661399999999,
0.0005581295999999999,
0.00028703807999999994,
0.00014850233999999998])
# F
yData=numpy.array([0.5750972123893806,
0.5512177433628319,
0.5638906194690266,
0.5240915044247788,
0.5217873451327435,
0.5066008407079646,
0.5027256637168142,
0.4847113274336283,
0.46502123893805314])
def func(x, a, b, c): # Van Deemter chromatography equation
return a + b/x + c*x
# these are the same as the scipy defaults
initialParameters = numpy.array([1.0, 1.0, 1.0])
# curve fit the test data
fittedParameters, pcov = curve_fit(func, xData, yData, initialParameters)
modelPredictions = func(xData, *fittedParameters)
absError = modelPredictions - yData
SE = numpy.square(absError) # squared errors
MSE = numpy.mean(SE) # mean squared errors
RMSE = numpy.sqrt(MSE) # Root Mean Squared Error, RMSE
Rsquared = 1.0 - (numpy.var(absError) / numpy.var(yData))
print('Parameters:', fittedParameters)
print('RMSE:', RMSE)
print('R-squared:', Rsquared)
print()
##########################################################
# graphics output section
def ModelAndScatterPlot(graphWidth, graphHeight):
f = plt.figure(figsize=(graphWidth/100.0, graphHeight/100.0), dpi=100)
axes = f.add_subplot(111)
# first the raw data as a scatter plot
axes.plot(xData, yData, 'D')
# create data for the fitted equation plot
xModel = numpy.linspace(min(xData), max(xData))
yModel = func(xModel, *fittedParameters)
# now the model as a line plot
axes.plot(xModel, yModel)
axes.set_xlabel('X Data (mud)') # X axis data label
axes.set_ylabel('Y Data (F)') # Y axis data label
plt.show()
plt.close('all') # clean up after using pyplot
graphWidth = 800
graphHeight = 600
ModelAndScatterPlot(graphWidth, graphHeight)

Related

opt.curve_fit with only one parameter

I'm having trouble fitting a curve with only one parameter using scipy.opt.curve_fit:
import scipy.optimize as opt
import numpy as np
def func(T):
return 76.881324*np.exp((-L)/(8.314*T))
best_params, cov_matrix = opt.curve_fit(func, xdata = x, ydata = y, p0=[])
I have arrays of values, x (T in the below eq) and y (P) that I'm trying to fit to the equation
but it seems it wants func() to have more than one argument. How do I fix this?
Here is a graphical Python fitter using your equation with some test data. Replace the example data with your own and you should be done.
import numpy, scipy, matplotlib
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
xData = numpy.array([1.1, 2.2, 3.3, 4.4, 5.0, 6.6, 7.7])
yData = numpy.array([1.1, 20.2, 30.3, 60.4, 50.0, 60.6, 70.7])
def func(T, L):
return 76.881324*numpy.exp((-L)/(8.314*T))
# all "1.0" is the same as the scipy defaults
initialParameters = numpy.array([1.0])
# curve fit the test data
fittedParameters, pcov = curve_fit(func, xData, yData, initialParameters)
modelPredictions = func(xData, *fittedParameters)
absError = modelPredictions - yData
SE = numpy.square(absError) # squared errors
MSE = numpy.mean(SE) # mean squared errors
RMSE = numpy.sqrt(MSE) # Root Mean Squared Error, RMSE
Rsquared = 1.0 - (numpy.var(absError) / numpy.var(yData))
print('Parameters:', fittedParameters)
print('RMSE:', RMSE)
print('R-squared:', Rsquared)
print()
##########################################################
# graphics output section
def ModelAndScatterPlot(graphWidth, graphHeight):
f = plt.figure(figsize=(graphWidth/100.0, graphHeight/100.0), dpi=100)
axes = f.add_subplot(111)
# first the raw data as a scatter plot
axes.plot(xData, yData, 'D')
# create data for the fitted equation plot
xModel = numpy.linspace(min(xData), max(xData))
yModel = func(xModel, *fittedParameters)
# now the model as a line plot
axes.plot(xModel, yModel)
axes.set_xlabel('X Data') # X axis data label
axes.set_ylabel('Y Data') # Y axis data label
plt.show()
plt.close('all') # clean up after using pyplot
graphWidth = 800
graphHeight = 600
ModelAndScatterPlot(graphWidth, graphHeight)

Python scipy.optimise.curve_fit gives linear fit

I have come across a problem when playing with the parameters of the curve_fit from scipy. I have initially copied the code suggested by the docs. I then changed the equation slightly and it was fine, but having increased the np.linspace, the whole prediction ended up being a straight line. Any ideas?
import numpy as np
from scipy.optimize import curve_fit
import matplotlib.pyplot as plt
def f(x, a, b, c):
# This works fine on smaller numbers
return (a - c) * np.exp(-x / b) + c
xdata = np.linspace(60, 3060, 200)
ydata = f(xdata, 100, 400, 20)
# noise
np.random.seed(1729)
ydata = ydata + np.random.normal(size=xdata.size) * 0.2
# graph
fig, ax = plt.subplots()
plt.plot(xdata, ydata, marker="o")
pred, covar = curve_fit(f, xdata, ydata)
plt.plot(xdata, f(xdata, *pred), label="prediciton")
plt.show()
You may need to start with a better guess, The default initial guess (1.0, 1.0, 1.0) seems to be in the divergent region.
I use the initial guess p0 = (50,200,100) and it works
fig, ax = plt.subplots()
plt.plot(xdata, ydata, marker="o")
pred, covar = curve_fit(f, xdata, ydata, p0 = (50,200,100))
plt.plot(xdata, f(xdata, *pred), label="prediciton")
plt.show()
Here is example code using your data and equation, with the initial parameter estimates given by scipy's differential_evolution genetic algorithm module. That module uses the Latin Hypercube algorithm to ensure a thorough search of parameter space, which requires bounds within which to search. In this example those bounds are taken from the data maximum and minimum values. It is much easier to supply ranges for the initial parameter estimates rather than specific values.
import numpy, scipy, matplotlib
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
from scipy.optimize import differential_evolution
import warnings
def func(x, a, b, c):
return (a - c) * numpy.exp(-x / b) + c
xData = numpy.linspace(60, 3060, 200)
yData = func(xData, 100, 400, 20)
# noise
numpy.random.seed(1729)
yData = yData + numpy.random.normal(size=xData.size) * 0.2
# function for genetic algorithm to minimize (sum of squared error)
def sumOfSquaredError(parameterTuple):
warnings.filterwarnings("ignore") # do not print warnings by genetic algorithm
val = func(xData, *parameterTuple)
return numpy.sum((yData - val) ** 2.0)
def generate_Initial_Parameters():
# min and max used for bounds
maxX = max(xData)
minX = min(xData)
maxY = max(yData)
minY = min(yData)
parameterBounds = []
parameterBounds.append([minY, maxY]) # search bounds for a
parameterBounds.append([minX, maxX]) # search bounds for b
parameterBounds.append([minY, maxY]) # search bounds for c
# "seed" the numpy random number generator for repeatable results
result = differential_evolution(sumOfSquaredError, parameterBounds, seed=3)
return result.x
# by default, differential_evolution completes by calling curve_fit() using parameter bounds
geneticParameters = generate_Initial_Parameters()
# now call curve_fit without passing bounds from the genetic algorithm,
# just in case the best fit parameters are aoutside those bounds
fittedParameters, pcov = curve_fit(func, xData, yData, geneticParameters)
print('Fitted parameters:', fittedParameters)
print()
modelPredictions = func(xData, *fittedParameters)
absError = modelPredictions - yData
SE = numpy.square(absError) # squared errors
MSE = numpy.mean(SE) # mean squared errors
RMSE = numpy.sqrt(MSE) # Root Mean Squared Error, RMSE
Rsquared = 1.0 - (numpy.var(absError) / numpy.var(yData))
print()
print('RMSE:', RMSE)
print('R-squared:', Rsquared)
print()
##########################################################
# graphics output section
def ModelAndScatterPlot(graphWidth, graphHeight):
f = plt.figure(figsize=(graphWidth/100.0, graphHeight/100.0), dpi=100)
axes = f.add_subplot(111)
# first the raw data as a scatter plot
axes.plot(xData, yData, 'D')
# create data for the fitted equation plot
xModel = numpy.linspace(min(xData), max(xData))
yModel = func(xModel, *fittedParameters)
# now the model as a line plot
axes.plot(xModel, yModel)
axes.set_xlabel('X Data') # X axis data label
axes.set_ylabel('Y Data') # Y axis data label
plt.show()
plt.close('all') # clean up after using pyplot
graphWidth = 800
graphHeight = 600
ModelAndScatterPlot(graphWidth, graphHeight)
This is due to a limitation of Levenberg–Marquardt algorithm which curve_fit uses by default. The good way to use it is to provide some decent initial guess for parameters before optimize. In my experiense this is particularly important when optimizing exponential functions like your example. With such iterative algorithms as LM, the quality of your starting point determines that where the result will converge. The more parameters you have the more likely that your final result will converge to a completely unwanted curve. Overall the solution is finding a good initial guess somehow as other answers did.

How to use curvefit in python

I am studying nonlinear curvefit with python.
I made example like below.
But the optimized plot is not drawn well
plt.plot(basketCont, fittedData)
I guess the optimized parametes are not good also.
Could you give some recommends? Thank you.
import matplotlib
matplotlib.use('Qt4Agg')
import matplotlib.pyplot as plt
from matplotlib.pyplot import cm
import numpy as np
from scipy.optimize import curve_fit
def func(x, a, b, c):
return a - b* np.exp(c * x)
baskets = np.array([475, 108, 2, 38, 320])
scaling_factor = np.array([95.5, 57.7, 1.4, 21.9, 88.8])
popt,pcov = curve_fit(func, baskets, scaling_factor)
print (popt)
print (pcov)
basketCont=np.linspace(min(baskets),max(baskets),50)
fittedData=[func(x, *popt) for x in basketCont]
fig1 = plt.figure(1)
plt.scatter(baskets, scaling_factor, s=5)
plt.plot(basketCont, fittedData)
plt.grid()
plt.show()
I personally could not get a good fit to your data using the equation you posted, however the Hill sigmoidal equation gave a good fit. Here is the Python code for the graphical fitter I used.
import numpy, scipy, matplotlib
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
import warnings
baskets = numpy.array([475.0, 108.0, 2.0, 38.0, 320.0])
scaling_factor = numpy.array([95.5, 57.7, 1.4, 21.9, 88.8])
# rename data for simpler code re-use later
xData = baskets
yData = scaling_factor
def func(x, a, b, c): # Hill sigmoidal equation from zunzun.com
return a * numpy.power(x, b) / (numpy.power(c, b) + numpy.power(x, b))
# these are the same as the scipy defaults
initialParameters = numpy.array([1.0, 1.0, 1.0])
# do not print unnecessary warnings during curve_fit()
warnings.filterwarnings("ignore")
# curve fit the test data
fittedParameters, pcov = curve_fit(func, xData, yData, initialParameters)
modelPredictions = func(xData, *fittedParameters)
absError = modelPredictions - yData
SE = numpy.square(absError) # squared errors
MSE = numpy.mean(SE) # mean squared errors
RMSE = numpy.sqrt(MSE) # Root Mean Squared Error, RMSE
Rsquared = 1.0 - (numpy.var(absError) / numpy.var(yData))
print('Parameters:', fittedParameters)
print('RMSE:', RMSE)
print('R-squared:', Rsquared)
print()
##########################################################
# graphics output section
def ModelAndScatterPlot(graphWidth, graphHeight):
f = plt.figure(figsize=(graphWidth/100.0, graphHeight/100.0), dpi=100)
axes = f.add_subplot(111)
# first the raw data as a scatter plot
axes.plot(xData, yData, 'D')
# create data for the fitted equation plot
xModel = numpy.linspace(min(xData), max(xData))
yModel = func(xModel, *fittedParameters)
# now the model as a line plot
axes.plot(xModel, yModel)
axes.set_xlabel('X Data') # X axis data label
axes.set_ylabel('Y Data') # Y axis data label
plt.show()
plt.close('all') # clean up after using pyplot
graphWidth = 800
graphHeight = 600
ModelAndScatterPlot(graphWidth, graphHeight)

how to make a better curve fit while focusin on some better accuracy

the past two days I have been working on a specific data fit (orange line of picture 1 ).
Thing is, I want it to be accurate on the bigger θ[0.1,1]. As a matter of fact, I wanted to start at the same point (so for θ=1 we got ψ=1 too) with this form:
ψ_f=a1*(1-x)**a2 +a3*(1-x)**a4+1
but it was super bad as it gets on inf for bigger θ.
For the image 1 I used
scipy.optimize.curve_fit
for a simple form
ψ_f = a1 *x**a2
Αny other form was flat bad.
Any idea what to do? :(
EDIT:
Data is form this file using following loading code:
ww=np.load('Hot3.npy')
s=ww[3]
z=np.array([ww[0],ww[1],ww[2])
and the xdata,ydata is equal to
xdata = s/max(s)
ydata = z[2]/min(z[2])
Here is some example code that appears to give a better fit. Note that I have not taken any logs, nor plotted on a log scale.
import numpy, scipy, matplotlib
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
import warnings
ww=numpy.load('/home/zunzun/temp/hot3.npy')
xData = ww[3]/max(ww[3])
yData = ww[2]/min(ww[2])
def func(x, a, b, c): # Combined Power And Exponential equation from zunzun.com
power = numpy.power(x, b)
exponent = numpy.exp(c * x)
return a * power * exponent
# numpy defaults are all 1.0, try these instead
initialParameters = numpy.array([1.0,-1.0,-1.0])
# ignore intermediate overflow warning during curve_fit() routine
warnings.filterwarnings("ignore")
# curve fit the test data
fittedParameters, pcov = curve_fit(func, xData, yData, initialParameters)
modelPredictions = func(xData, *fittedParameters)
absError = modelPredictions - yData
SE = numpy.square(absError) # squared errors
MSE = numpy.mean(SE) # mean squared errors
RMSE = numpy.sqrt(MSE) # Root Mean Squared Error, RMSE
Rsquared = 1.0 - (numpy.var(absError) / numpy.var(yData))
print('Parameters:', fittedParameters)
print('RMSE:', RMSE)
print('R-squared:', Rsquared)
print()
##########################################################
# graphics output section
def ModelAndScatterPlot(graphWidth, graphHeight):
f = plt.figure(figsize=(graphWidth/100.0, graphHeight/100.0), dpi=100)
axes = f.add_subplot(111)
# first the raw data as a scatter plot
axes.plot(xData, yData, 'o')
# create data for the fitted equation plot
xModel = numpy.linspace(min(xData), max(xData))
yModel = func(xModel, *fittedParameters)
# now the model as a line plot
axes.plot(xModel, yModel)
axes.set_xlabel('X Data') # X axis data label
axes.set_ylabel('Y Data') # Y axis data label
plt.show()
plt.close('all') # clean up after using pyplot
graphWidth = 800
graphHeight = 600
ModelAndScatterPlot(graphWidth, graphHeight)

how to do exponential nonlinear regression in python

I am trying to do non-linear regression using the equation
y=ae^(-bT)
where T is temp with the data:
([26.67, 93.33, 148.89, 222.01, 315.56])
and y is the viscosity with the data:
([1.35, .085, .012, .0049, .00075])
the goal is to determine the value of a and b WITHOUT linearizing the equation also to plot the graph. so far one method ive tried is:
import matplotlib
matplotlib.use('Qt4Agg')
import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import curve_fit
def func(x, a, b):
return a*(np.exp(-b * x))
#data
temp = np.array([26.67, 93.33, 148.89, 222.01, 315.56])
Viscosity = np.array([1.35, .085, .012, .0049, .00075])
initialGuess=[200,1]
guessedFactors=[func(x,*initialGuess ) for x in temp]
#curve fit
popt,pcov = curve_fit(func, temp, Viscosity,initialGuess)
print (popt)
print (pcov)
tempCont=np.linspace(min(temp),max(temp),50)
fittedData=[func(x, *popt) for x in tempCont]
fig1 = plt.figure(1)
ax=fig1.add_subplot(1,1,1)
###the three sets of data to plot
ax.plot(temp,Viscosity,linestyle='',marker='o', color='r',label="data")
ax.plot(temp,guessedFactors,linestyle='',marker='^', color='b',label="initial guess")
###beautification
ax.legend(loc=0, title="graphs", fontsize=12)
ax.set_ylabel("Viscosity")
ax.set_xlabel("temp")
ax.grid()
ax.set_title("$\mathrm{curve}_\mathrm{fit}$")
###putting the covariance matrix nicely
tab= [['{:.2g}'.format(j) for j in i] for i in pcov]
the_table = plt.table(cellText=tab,
colWidths = [0.2]*3,
loc='upper right', bbox=[0.483, 0.35, 0.5, 0.25] )
plt.text(250,65,'covariance:',size=12)
###putting the plot
plt.show()
im pretty sure ive made it overly complicated and messed up.
Here is example code using your data and equation, with scipy's differential_evolution genetic algorithm used to determine initial parameter estimates for the non-linear fitter. The scipy implementation of Differential Evolution uses the Latin Hypercube algorithm to ensure a thorough search of parameter space, here I have given what I thought were ranges within which the fitted parameters should exist.
import numpy, scipy, matplotlib
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
from scipy.optimize import differential_evolution
import warnings
xData = numpy.array([26.67, 93.33, 148.89, 222.01, 315.56])
yData = numpy.array([1.35, .085, .012, .0049, .00075])
def func(T, a, b):
return a * numpy.exp(-b*T)
# function for genetic algorithm to minimize (sum of squared error)
def sumOfSquaredError(parameterTuple):
warnings.filterwarnings("ignore") # do not print warnings by genetic algorithm
val = func(xData, *parameterTuple)
return numpy.sum((yData - val) ** 2.0)
def generate_Initial_Parameters():
parameterBounds = []
parameterBounds.append([0.0, 10.0]) # search bounds for a
parameterBounds.append([-1.0, 1.0]) # search bounds for b
# "seed" the numpy random number generator for repeatable results
result = differential_evolution(sumOfSquaredError, parameterBounds, seed=3)
return result.x
# by default, differential_evolution completes by calling curve_fit() using parameter bounds
geneticParameters = generate_Initial_Parameters()
# now call curve_fit without passing bounds from the genetic algorithm,
# just in case the best fit parameters are aoutside those bounds
fittedParameters, pcov = curve_fit(func, xData, yData, geneticParameters)
print('Fitted parameters:', fittedParameters)
print()
modelPredictions = func(xData, *fittedParameters)
absError = modelPredictions - yData
SE = numpy.square(absError) # squared errors
MSE = numpy.mean(SE) # mean squared errors
RMSE = numpy.sqrt(MSE) # Root Mean Squared Error, RMSE
Rsquared = 1.0 - (numpy.var(absError) / numpy.var(yData))
print()
print('RMSE:', RMSE)
print('R-squared:', Rsquared)
print()
##########################################################
# graphics output section
def ModelAndScatterPlot(graphWidth, graphHeight):
f = plt.figure(figsize=(graphWidth/100.0, graphHeight/100.0), dpi=100)
axes = f.add_subplot(111)
# first the raw data as a scatter plot
axes.plot(xData, yData, 'D')
# create data for the fitted equation plot
xModel = numpy.linspace(min(xData), max(xData))
yModel = func(xModel, *fittedParameters)
# now the model as a line plot
axes.plot(xModel, yModel)
axes.set_xlabel('temp') # X axis data label
axes.set_ylabel('viscosity') # Y axis data label
plt.show()
plt.close('all') # clean up after using pyplot
graphWidth = 800
graphHeight = 600
ModelAndScatterPlot(graphWidth, graphHeight)

Categories