Related
I am a learner of data science and machine learning. I have written a code for gradient descent optimization of linear regression cost function without using builtin python library. However, just to confirm whether my code is correct and verify results, I have also implemented the same using builtin python library.
The coefficient and intercept values I obtained through my code are not matching with the coefficient and intercept values obtained using builtin python module. Kindly suggest what is the error in my way of gradient descent optimization of linear regression?
my method:
import pandas as pd
import numpy as np
import seaborn as sb
import matplotlib.pyplot as plt
from sklearn.linear_model import SGDRegressor
Data=pd.DataFrame({'X': list(np.arange(0,10,1)), 'Y': [1,3,2,5,7,8,8,9,10,12]})
Data.head()
sb.scatterplot(x ='X', y = 'Y', data = Data)
plt.show()
#generating column of ones
X0 = np.ones(len(Data)).reshape(-1,1)
#print(X0.shape)
X = Data.drop(['Y'], axis = 1).values
X_new = np.concatenate((X0,X), axis = 1)
#print(X_new)
#print(X_new.shape)
Y = Data.loc[:,['Y']].values
#print(Y)
#print(Y.shape)
# initial theta
theta =np.random.randint(low=0, high=1, size= X_new.shape[1]).reshape(-1,1)
#print(theta.shape)
J_history = []
theta_history = [list(theta.flatten())]
#gradient descent implementation
iterations = 1000
alpha = 0.01
m = len(Y)
for iter in range(1,iterations):
H = X_new.dot(theta)
loss = (H-Y)
J = loss/(2*m)
J_history.append(J)
G = X_new.T.dot(loss)/m
theta_new = theta - alpha*G
theta_history.append(list(theta_new.flatten()))
theta = theta_new
# collecting costs (J) and coefficients (theta_0,theta_1)
theta_history.pop()
J_history = [i[0] for i in J_history]
params = pd.DataFrame()
params['J']=J_history
for i in range(len(theta_history[0])):
params['theta_'+str(i)]=[k[i] for k in theta_history]
idx = params[params['J']==min(params['J'])].index
values = params.iloc[idx[0]][1:params.shape[1]].tolist()
print('intercept: {}, coeff: {}'.format(values[0],values[1]))
using builtin library:
import pandas as pd
import numpy as np
import seaborn as sb
import matplotlib.pyplot as plt
from sklearn.linear_model import SGDRegressor
Data=pd.DataFrame({'X': list(np.arange(0,10,1)), 'Y': [1,3,2,5,7,8,8,9,10,12]})
Data.head()
sb.scatterplot(x ='X', y = 'Y', data = Data)
plt.show()
model = SGDRegressor(loss = 'squared_loss', learning_rate = 'constant', eta0 = 0.01, max_iter= 1000)
model.fit(Data['X'].values.reshape(-1,1), Data['Y'].values.reshape(-1,1))
print('coeff: {}, intercept: {}'.format(model.coef_, model.intercept_))
First of all I appreciate your effort to understand and implement by yourself the SGD algorithm.
Now, back to your code. There are some minor errors that need to be corrected:
Your Js are not scalars but numpy.arrays but the way you're using them implies that they're assumed to be scalars hence the error raised when your code is executed.
After running your chain, you must take the theta who has the lowest error and this error is actually J^2 and not J as J may be negative as well.
The scikit learn SGDRegressor that you're actually using is, as its name suggests, stochastic by definition and given the small size of your dataset you need to run it many times and average its estimates if you want to get something reliable from it.
Your learning rate 0.01 seems to be a little big
When those changes are made, I get from your code a "comparable" results with SGDRegressor.
import pandas as pd
import numpy as np
import seaborn as sb
import matplotlib.pyplot as plt
from sklearn.linear_model import SGDRegressor
Data=pd.DataFrame({'X': list(np.arange(0,10,1)), 'Y': [1,3,2,5,7,8,8,9,10,12]})
Data.head()
sb.scatterplot(x ='X', y = 'Y', data = Data)
plt.show()
#generating column of ones
X0 = np.ones(len(Data)).reshape(-1,1)
#print(X0.shape)
X = Data.drop(['Y'], axis = 1).values
X_new = np.concatenate((X0,X), axis = 1)
#print(X_new)
#print(X_new.shape)
Y = Data.loc[:,['Y']].values
#print(Y)
#print(Y.shape)
# initial theta
theta =np.random.randint(low=0, high=1, size= X_new.shape[1]).reshape(-1,1)
#print(theta.shape)
J_history = []
theta_history = [list(theta.flatten())]
#gradient descent implementation
iterations = 2000
alpha = 0.001
m = len(Y)
for iter in range(1,iterations):
H = X_new.dot(theta)
loss = (H-Y)
J = loss/(2*m)
J_history.append(J[0]**2)
G = X_new.T.dot(loss)/m
theta_new = theta - alpha*G
theta_history.append(list(theta_new.flatten()))
theta = theta_new
theta_history.pop()
J_history = [i[0] for i in J_history]
# collecting costs (J) and coefficients (theta_0,theta_1)
params = pd.DataFrame()
params['J']=J_history
for i in range(len(theta_history[0])):
params['theta_'+str(i)]=[k[i] for k in theta_history]
idx = params[params['J']== params['J'].min()].index
values = params.iloc[idx[0]][1:params.shape[1]].tolist()
print('intercept: {}, coeff: {}'.format(values[0],values[1]))
#> intercept: 0.654041555750147, coeff: 1.2625626277290982
Now let's see the scikit learn model
from sklearn.linear_model import SGDRegressor
intercepts = []
coefs = []
for _ in range(500):
model = SGDRegressor(loss = 'squared_loss', learning_rate = 'constant', eta0 = 0.01, max_iter= 1000)
model.fit(Data['X'].values.reshape(-1,1), Data['Y'].values.reshape(-1))
intercepts.append(model.intercept_)
coefs.append(model.coef_)
intercept = np.concatenate(intercepts).mean()
coef = np.vstack(coefs).mean(0)
print('intercept: {}, coeff: {}'.format( intercept, coef))
#> intercept: 0.6912403374422401, coeff: [1.24932246]
Following the recommendations in this answer I have used several combination of values for beta0, and as shown here, the values from polyfit.
This example is UPDATED in order to show the effect of relative scales of values of X versus Y (X range is 0.1 to 100 times Y):
from random import random, seed
from scipy import polyfit
from scipy import odr
import numpy as np
from matplotlib import pyplot as plt
seed(1)
X = np.array([random() for i in range(1000)])
Y = np.array([i + random()**2 for i in range(1000)])
for num in range(1, 5):
plt.subplot(2, 2, num)
plt.title('X range is %.1f times Y' % (float(100 / max(X))))
X *= 10
z = np.polyfit(X, Y, 1)
plt.plot(X, Y, 'k.', alpha=0.1)
# Fit using odr
def f(B, X):
return B[0]*X + B[1]
linear = odr.Model(f)
mydata = odr.RealData(X, Y)
myodr = odr.ODR(mydata, linear, beta0=z)
myodr.set_job(fit_type=0)
myoutput = myodr.run()
a, b = myoutput.beta
sa, sb = myoutput.sd_beta
xp = np.linspace(plt.xlim()[0], plt.xlim()[1], 1000)
yp = a*xp+b
plt.plot(xp, yp, label='ODR')
yp2 = z[0]*xp+z[1]
plt.plot(xp, yp2, label='polyfit')
plt.legend()
plt.ylim(-1000, 2000)
plt.show()
It seems that no combination of beta0 helps... The only way to get polyfit and ODR fit similar is to swap X and Y, OR as shown here to increase the range of values of X with regard to Y, still not really a solution :)
=== EDIT ===
I do not want ODR to be the same as polyfit. I am showing polyfit just to emphasize that the ODR fit is wrong and it is not a problem of the data.
=== SOLUTION ===
thanks to #norok2 answer when Y range is 0.001 to 100000 times X:
from random import random, seed
from scipy import polyfit
from scipy import odr
import numpy as np
from matplotlib import pyplot as plt
seed(1)
X = np.array([random() / 1000 for i in range(1000)])
Y = np.array([i + random()**2 for i in range(1000)])
plt.figure(figsize=(12, 12))
for num in range(1, 10):
plt.subplot(3, 3, num)
plt.title('Y range is %.1f times X' % (float(100 / max(X))))
X *= 10
z = np.polyfit(X, Y, 1)
plt.plot(X, Y, 'k.', alpha=0.1)
# Fit using odr
def f(B, X):
return B[0]*X + B[1]
linear = odr.Model(f)
mydata = odr.RealData(X, Y,
sy=min(1/np.var(Y), 1/np.var(X))) # here the trick!! :)
myodr = odr.ODR(mydata, linear, beta0=z)
myodr.set_job(fit_type=0)
myoutput = myodr.run()
a, b = myoutput.beta
sa, sb = myoutput.sd_beta
xp = np.linspace(plt.xlim()[0], plt.xlim()[1], 1000)
yp = a*xp+b
plt.plot(xp, yp, label='ODR')
yp2 = z[0]*xp+z[1]
plt.plot(xp, yp2, label='polyfit')
plt.legend()
plt.ylim(-1000, 2000)
plt.show()
The key difference between polyfit() and the Orthogonal Distance Regression (ODR) fit is that polyfit works under the assumption that the error on x is negligible. If this assumption is violated, like it is in your data, you cannot expect the two methods to produce similar results.
In particular, ODR() is very sensitive to the errors you specify.
If you do not specify any error/weighting, it will assign a value of 1 for both x and y, meaning that any scale difference between x and y will affect the results (the so-called numerical conditioning).
On the contrary, polyfit(), before computing the fit, applies some sort of pre-whitening to the data (see around line 577 of its source code) for better numerical conditioning.
Therefore, if you want ODR() to match polyfit(), you could simply fine-tune the error on Y to change your numerical conditioning.
I tested that this works for any numerical conditioning between 1e-10 and 1e10 of your Y (it is / 10. or 1e-1 in your example).
mydata = odr.RealData(X, Y)
# equivalent to: odr.RealData(X, Y, sx=1, sy=1)
to:
mydata = odr.RealData(X, Y, sx=1, sy=1/np.var(Y))
(EDIT: note there was a typo on the line above)
I tested that this works for any numerical conditioning between 1e-10 and 1e10 of your Y (it is / 10. or 1e-1 in your example).
Note that this would only make sense for well-conditioned fits.
I cannot format source code in a comment, and so place it here. This code uses ODR to calculate fit statistics, note the line that has "parameter order for odr" such that I use a wrapper function for the ODR call to my "actual" function.
from scipy.optimize import curve_fit
import numpy as np
import scipy.odr
import scipy.stats
x = np.array([5.357, 5.797, 5.936, 6.161, 6.697, 6.731, 6.775, 8.442, 9.861])
y = np.array([0.376, 0.874, 1.049, 1.327, 2.054, 2.077, 2.138, 4.744, 7.104])
def f(x,b0,b1):
return b0 + (b1 * x)
def f_wrapper_for_odr(beta, x): # parameter order for odr
return f(x, *beta)
parameters, cov= curve_fit(f, x, y)
model = scipy.odr.odrpack.Model(f_wrapper_for_odr)
data = scipy.odr.odrpack.Data(x,y)
myodr = scipy.odr.odrpack.ODR(data, model, beta0=parameters, maxit=0)
myodr.set_job(fit_type=2)
parameterStatistics = myodr.run()
df_e = len(x) - len(parameters) # degrees of freedom, error
cov_beta = parameterStatistics.cov_beta # parameter covariance matrix from ODR
sd_beta = parameterStatistics.sd_beta * parameterStatistics.sd_beta
ci = []
t_df = scipy.stats.t.ppf(0.975, df_e)
ci = []
for i in range(len(parameters)):
ci.append([parameters[i] - t_df * parameterStatistics.sd_beta[i], parameters[i] + t_df * parameterStatistics.sd_beta[i]])
tstat_beta = parameters / parameterStatistics.sd_beta # coeff t-statistics
pstat_beta = (1.0 - scipy.stats.t.cdf(np.abs(tstat_beta), df_e)) * 2.0 # coef. p-values
for i in range(len(parameters)):
print('parameter:', parameters[i])
print(' conf interval:', ci[i][0], ci[i][1])
print(' tstat:', tstat_beta[i])
print(' pstat:', pstat_beta[i])
print()
I am using Pymc to run a Gibbs sampler on a simple model with the data set as a list with 110 elements (55 observations in each dimension).
log y[i,j,k] = alpha[i,k] + beta[j,k] + mu[k]
where log y follows a multivariate normal distribution (because k = 2) with some covariance matrix that is modeled as rho, sigma1 and sigma2.
After taking log-transformation, the data becomes a list of 110 numbers ranging from 6 to 15.
This is the piece of code that I have used:
import pymc as pm
from pymc import Normal, Uniform, MvNormal, Exponential, Gamma,InverseGamma
from pymc import MCMC
mu = np.zeros(2, dtype=object)
alpha = np.zeros([10,2], dtype = object)
beta = np.zeros([10,2], dtype = object)
for k in range(2):
mu[k] = Normal('mu_{}'.format(k), 0,1000)
for i in range(0,10):
alpha[i][k] = Normal('alpha_{}_{}'.format(i,k), 0, 1000)
beta[i][k] = Normal('beta_{}_{}'.format(i,k), 0, 1000)
rho = Uniform('rho', lower = -1, upper = 1)
sigma1 = InverseGamma('sigma1', 2.0001,1) #sigma squared
sigma2 = InverseGamma('sigma2', 2.0001,1)
#pm.deterministic
def PRECISION():
PREC = [[sigma2/(sigma1*sigma2*(1-rho)),(-rho*
(sigma1*sigma2)**0.5)/(sigma1*sigma2*(1-rho))],[(-rho*
(sigma1*sigma2)**0.5)/(sigma1*sigma2*(1-rho)), sigma1/(sigma1*sigma2*(1-
rho))]]
return PREC
mean = np.zeros([10,10,2])
mean_list_1 = []
mean_list_2 = []
for i in range(10):
for j in range(10):
mean[i,j,0] = mu[0] + alpha[i][0] + beta[j][0]
mean_list_1.append(mean[i,j,0])
mean[i,j,1] = mu[1] + alpha[i][1] + beta[j][1]
mean_list_2.append(mean[i,j,1])
#Restructure the vector
bi_mean = np.zeros(55, dtype = object)
bi_data = np.zeros(55, dtype = object)
log_Y = np.zeros(55, dtype = object)
for i in range(55):
bi_mean[i] = [mean_list_1[i], mean_list_2[i]]
bi_data[i] = [data[i], data[i+55]]
log_Y = [pm.MvNormal('log-Y_{}'.format(i), bi_mean[i], PRECISION, value =
bi_data[i], observed = True) for i in range(55)]
monitor_list = [sigma1, sigma2, rho,mu, alpha, beta,log_Y]
model = MCMC([monitor_list],calc_deviance=True)
model.sample(iter=10000, burn=5000, thin=5)
I tried running it in Pymc but the resulting values of alpha and beta is too small to match the magnitude of the observations. Is there a way that I can check where I go wrong? Thank you.
I am trying to deblend the emission lines of low resolution spectrum in order to get the gaussian components. This plot represents the kind of data I am using:
After searching a bit, the only option I found was the application of the gauest function from the kmpfit package (http://www.astro.rug.nl/software/kapteyn/kmpfittutorial.html#gauest). I have copied their example but I cannot make it work.
I wonder if anyone could please offer me any alternative to do this or how to correct my code:
import numpy as np
import matplotlib.pyplot as plt
from scipy import optimize
def CurveData():
x = np.array([3963.67285156, 3964.49560547, 3965.31835938, 3966.14111328, 3966.96362305,
3967.78637695, 3968.60913086, 3969.43188477, 3970.25463867, 3971.07714844,
3971.89990234, 3972.72265625, 3973.54541016, 3974.36791992, 3975.19067383])
y = np.array([1.75001533e-16, 2.15520995e-16, 2.85030769e-16, 4.10072843e-16, 7.17558032e-16,
1.27759917e-15, 1.57074192e-15, 1.40802933e-15, 1.45038722e-15, 1.55195653e-15,
1.09280316e-15, 4.96611341e-16, 2.68777266e-16, 1.87075114e-16, 1.64335999e-16])
return x, y
def FindMaxima(xval, yval):
xval = np.asarray(xval)
yval = np.asarray(yval)
sort_idx = np.argsort(xval)
yval = yval[sort_idx]
gradient = np.diff(yval)
maxima = np.diff((gradient > 0).view(np.int8))
ListIndeces = np.concatenate((([0],) if gradient[0] < 0 else ()) + (np.where(maxima == -1)[0] + 1,) + (([len(yval)-1],) if gradient[-1] > 0 else ()))
X_Maxima, Y_Maxima = [], []
for index in ListIndeces:
X_Maxima.append(xval[index])
Y_Maxima.append(yval[index])
return X_Maxima, Y_Maxima
def GaussianMixture_Model(p, x, ZeroLevel):
y = 0.0
N_Comps = int(len(p) / 3)
for i in range(N_Comps):
A, mu, sigma = p[i*3:(i+1)*3]
y += A * np.exp(-(x-mu)*(x-mu)/(2.0*sigma*sigma))
Output = y + ZeroLevel
return Output
def Residuals_GaussianMixture(p, x, y, ZeroLevel):
return GaussianMixture_Model(p, x, ZeroLevel) - y
Wave, Flux = CurveData()
Wave_Maxima, Flux_Maxima = FindMaxima(Wave, Flux)
EmLines_Number = len(Wave_Maxima)
ContinuumLevel = 1.64191e-16
# Define initial values
p_0 = []
for i in range(EmLines_Number):
p_0.append(Flux_Maxima[i])
p_0.append(Wave_Maxima[i])
p_0.append(2.0)
p1, conv = optimize.leastsq(Residuals_GaussianMixture, p_0[:],args=(Wave, Flux, ContinuumLevel))
Fig = plt.figure(figsize = (16, 10))
Axis1 = Fig.add_subplot(111)
Axis1.plot(Wave, Flux, label='Emission line')
Axis1.plot(Wave, GaussianMixture_Model(p1, Wave, ContinuumLevel), 'r', label='Fit with optimize.leastsq')
print p1
Axis1.plot(Wave, GaussianMixture_Model([p1[0],p1[1],p1[2]], Wave, ContinuumLevel), 'g:', label='Gaussian components')
Axis1.plot(Wave, GaussianMixture_Model([p1[3],p1[4],p1[5]], Wave, ContinuumLevel), 'g:')
Axis1.set_xlabel( r'Wavelength $(\AA)$',)
Axis1.set_ylabel('Flux' + r'$(erg\,cm^{-2} s^{-1} \AA^{-1})$')
plt.legend()
plt.show()
A typical simplistic way to fit:
def model(p,x):
A,x1,sig1,B,x2,sig2 = p
return A*np.exp(-(x-x1)**2/sig1**2) + B*np.exp(-(x-x2)**2/sig2**2)
def res(p,x,y):
return model(p,x) - y
from scipy import optimize
p0 = [1e-15,3968,2,1e-15,3972,2]
p1,conv = optimize.leastsq(res,p0[:],args=(x,y))
plot(x,y,'+') # data
#fitted function
plot(arange(3962,3976,0.1),model(p1,arange(3962,3976,0.1)),'-')
Where p0 is your initial guess. By the looks of things, you might want to use Lorentzian functions...
If you use full_output=True, you get all kind of info about the fitting. Also check out curve_fit and the fmin* functions in scipy.optimize. There are plenty of wrappers around these around, but often, like here, it's easier to use them directly.
I am working on fitting a 3d distribution function in scipy. I have a numpy array with counts in x- and y-bins, and I am trying to fit that to a rather complicated 3-d distribution function. The data is fit to 26 (!) parameters, which describe the shape of its two constituent populations.
I learned here that I have to pass my x- and y-coordinates as 'args' when I call leastsq. The code presented by unutbu works as written for me, but when I try to apply it to my specific case, I am given the error "TypeError: leastsq() got multiple values for keyword argument 'args' "
Here's my code (sorry for the length):
import numpy as np
import matplotlib.pyplot as plt
import scipy.optimize as spopt
from textwrap import wrap
import collections
cl = 0.5
ch = 3.5
rl = -23.5
rh = -18.5
mbins = 10
cbins = 10
def hist_data(mixed_data, mbins, cbins):
import numpy as np
H, xedges, yedges = np.histogram2d(mixed_data[:,1], mixed_data[:,2], bins = (mbins, cbins), weights = mixed_data[:,3])
x, y = 0.5 * (xedges[:-1] + xedges[1:]), 0.5 * (yedges[:-1] + yedges[1:])
return H.T, x, y
def gauss(x, s, mu, a):
import numpy as np
return a * np.exp(-((x - mu)**2. / (2. * s**2.)))
def tanhlin(x, p0, p1, q0, q1, q2):
import numpy as np
return p0 + p1 * (x + 20.) + q0 * np.tanh((x - q1)/q2)
def func3d(p, x, y):
import numpy as np
from sys import exit
rsp0, rsp1, rsq0, rsq1, rsq2, rmp0, rmp1, rmq0, rmq1, rmq2, rs, rm, ra, bsp0, bsp1, bsq0, bsq1, bsq2, bmp0, bmp1, bmq0, bmq1, bmq2, bs, bm, ba = p
x, y = np.meshgrid(coords[0], coords[1])
rs = tanhlin(x, rsp0, rsp1, rsq0, rsq1, rsq2)
rm = tanhlin(x, rmp0, rmp1, rmq0, rmq1, rmq2)
ra = schechter(x, rap, raa, ram) # unused
bs = tanhlin(x, bsp0, bsp1, bsq0, bsq1, bsq2)
bm = tanhlin(x, bmp0, bmp1, bmq0, bmq1, bmq2)
ba = schechter(x, bap, baa, bam) # unused
red_dist = ra / (rs * np.sqrt(2 * np.pi)) * gauss(y, rs, rm, ra)
blue_dist = ba / (bs * np.sqrt(2 * np.pi)) * gauss(y, bs, bm, ba)
result = red_dist + blue_dist
return result
def residual(p, coords, data):
import numpy as np
model = func3d(p, coords)
res = (model.flatten() - data.flatten())
# can put parameter restrictions in here
return res
def poiss_err(data):
import numpy as np
return np.where(np.sqrt(H) > 0., np.sqrt(H), 2.)
# =====
H, x, y = hist_data(mixed_data, mbins, cbins)
data = H
coords = x, y
# x and y will be the projected coordinates of the data H onto the plane z = 0
# x has bins of width 0.5, with centers at -23.25, -22.75, ... , -19.25, -18.75
# y has bins of width 0.3, with centers at 0.65, 0.95, ... , 3.05, 3.35
Param = collections.namedtuple('Param', 'rsp0 rsp1 rsq0 rsq1 rsq2 rmp0 rmp1 rmq0 rmq1 rmq2 rs rm ra bsp0 bsp1 bsq0 bsq1 bsq2 bmp0 bmp1 bmq0 bmq1 bmq2 bs bm ba')
p_guess = Param(rsp0 = 0.152, rsp1 = 0.008, rsq0 = 0.044, rsq1 = -19.91, rsq2 = 0.94, rmp0 = 2.279, rmp1 = -0.037, rmq0 = -0.108, rmq1 = -19.81, rmq2 = 0.96, rs = 1., rm = -20.5, ra = 10000., bsp0 = 0.298, bsp1 = 0.014, bsq0 = -0.067, bsq1 = -19.90, bsq2 = 0.58, bmp0 = 1.790, bmp1 = -0.053, bmq0 = -0.363, bmq1 = -20.75, bmq2 = 1.12, bs = 1., bm = -20., ba = 2000.)
opt, cov, infodict, mesg, ier = spopt.leastsq(residual, p_guess, poiss_err(H), args = coords, maxfev = 100000, full_output = True)
Here's my data, just with fewer bins:
[[ 1.00000000e+01 1.10000000e+01 2.10000000e+01 1.90000000e+01
1.70000000e+01 2.10000000e+01 2.40000000e+01 1.90000000e+01
2.80000000e+01 1.90000000e+01]
[ 1.40000000e+01 4.50000000e+01 6.00000000e+01 6.80000000e+01
1.34000000e+02 1.97000000e+02 2.23000000e+02 2.90000000e+02
3.23000000e+02 3.03000000e+02]
[ 3.00000000e+01 1.17000000e+02 3.78000000e+02 9.74000000e+02
1.71900000e+03 2.27700000e+03 2.39000000e+03 2.25500000e+03
1.85600000e+03 1.31000000e+03]
[ 1.52000000e+02 9.32000000e+02 2.89000000e+03 5.23800000e+03
6.66200000e+03 6.19100000e+03 4.54900000e+03 3.14600000e+03
2.09000000e+03 1.33800000e+03]
[ 5.39000000e+02 2.58100000e+03 6.51300000e+03 8.89900000e+03
8.52900000e+03 6.22900000e+03 3.55000000e+03 2.14300000e+03
1.19000000e+03 6.92000000e+02]
[ 1.49600000e+03 4.49200000e+03 8.77200000e+03 1.07610000e+04
9.76700000e+03 7.04900000e+03 4.23200000e+03 2.47200000e+03
1.41500000e+03 7.02000000e+02]
[ 2.31800000e+03 7.01500000e+03 1.28870000e+04 1.50840000e+04
1.35590000e+04 8.55600000e+03 4.15600000e+03 1.77100000e+03
6.57000000e+02 2.55000000e+02]
[ 1.57500000e+03 3.79300000e+03 5.20900000e+03 4.77800000e+03
3.26600000e+03 1.44700000e+03 5.31000000e+02 1.85000000e+02
9.30000000e+01 4.90000000e+01]
[ 7.01000000e+02 1.21600000e+03 1.17600000e+03 7.93000000e+02
4.79000000e+02 2.02000000e+02 8.80000000e+01 3.90000000e+01
2.30000000e+01 1.90000000e+01]
[ 2.93000000e+02 3.93000000e+02 2.90000000e+02 1.97000000e+02
1.18000000e+02 6.40000000e+01 4.10000000e+01 1.20000000e+01
1.10000000e+01 4.00000000e+00]]
Thanks very much!
So what leastsq does is try to:
"Minimize the sum of squares of a set of equations"
-scipy docs
as it says it's minimizing a set of functions and therefore doesn't actually take any x or y data inputs in the easiest manner if you look at the arguments here so you can do it as you like and pass a residual function however, it's significantly easier to just use curve_fit which does it for you :) and creates the necessary equations
For fitting you should use: curve_fit if you are ok with the generic residual they use which is actually the function you pass itself res = leastsq(func, p0, args=args, full_output=1, **kw) if you look in the code here.
e.g. If I fit the rosenbrock function in 2d and guess the y-parameter:
from scipy.optimize import curve_fit
from itertools import imap
import numpy as np
# use only an even number of arguments
def rosen2d(x,a):
return (1-x)**2 + 100*(a - (x**2))**2
#generate some random data slightly off
datax = np.array([.01*x for x in range(-10,10)])
datay = 2.3
dataz = np.array(map(lambda x: rosen2d(x,datay), datax))
optimalparams, covmatrix = curve_fit(rosen2d, datax, dataz)
print 'opt:',optimalparams
fitting the colville function in 4d:
from scipy.optimize import curve_fit
import numpy as np
# 4 dimensional colville function
# definition from http://www.sfu.ca/~ssurjano/colville.html
def colville(x,x3,x4):
x1,x2 = x[:,0],x[:,1]
return 100*(x1**2 - x2)**2 + (x1-1)**2 + (x3-1)**2 + \
90*(x3**2 - x4)**2 + \
10.1*((x2 - 1)**2 + (x4 - 1)**2) + \
19.8*(x2 - 1)*(x4 - 1)
#generate some random data slightly off
datax = np.array([[x,x] for x in range(-10,10)])
#add gaussian noise
datax+= np.random.rand(*datax.shape)
#set 2 of the 4 parameters to constants
x3 = 3.5
x4 = 4.5
#calculate the function
dataz = colville(datax, x3, x4)
#fit the function
optimalparams, covmatrix = curve_fit(colville, datax, dataz)
print 'opt:',optimalparams
Using a custom residual function:
from scipy.optimize import leastsq
import numpy as np
# 4 dimensional colville function
# definition from http://www.sfu.ca/~ssurjano/colville.html
def colville(x,x3,x4):
x1,x2 = x[:,0],x[:,1]
return 100*(x1**2 - x2)**2 + (x1-1)**2 + (x3-1)**2 + \
90*(x3**2 - x4)**2 + \
10.1*((x2 - 1)**2 + (x4 - 1)**2) + \
19.8*(x2 - 1)*(x4 - 1)
#generate some random data slightly off
datax = np.array([[x,x] for x in range(-10,10)])
#add gaussian noise
datax+= np.random.rand(*datax.shape)
#set 2 of the 4 parameters to constants
x3 = 3.5
x4 = 4.5
def residual(p, x, y):
return y - colville(x,*p)
#calculate the function
dataz = colville(datax, x3, x4)
#guess some initial parameter values
p0 = [0,0]
#calculate a minimization of the residual
optimalparams = leastsq(residual, p0, args=(datax, dataz))[0]
print 'opt:',optimalparams
Edit: you used both the position and the keyword arg for args: if you look at the docs you'll see it uses position 3, but also can be used as a keyword argument. You used both which means the function is as expected, confused.