Related
I need to fit data (x axes: sigma, y axes : Mbh) with an exponential model. This is my code:
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
#define my data
Mbh = np.array([1.8e6,2.5e6,4.5e7,3.7e7,4.4e7,1.5e7,1.4e7,4.1e7, 1.0e9,2.1e8,1.0e8,1.0e8,1.6e7,1.9e8,3.9e7,5.2e8,3.1e8,3.0e8,7.0e7,1.1e8,3.0e9,5.6e7,7.8e7,2.0e9,1.7e8,1.4e7,2.4e8,5.3e8,3.3e8,3.5e6,2.5e9])
sigma = np.array([103,75,160,209,205,151,175,140,230,205,145,206,143,182,130,315,242,225,186,190,375,162,152,385,177,90,234,290,266,67,340])
#define my model to fit
def Mbh02(alpha, sigma, beta):
return alpha * np.exp(beta*sigma);
#calculate the fit parameter:
#for second model
popt02, pcov02 = curve_fit(Mbh02, sigma, Mbh, p0=[1, 0.058])
print(f'Parameter of the second function : {popt02}')
sigma_plot = [103,75,160,209,205,151,175,140,230,205,145,206,143,182,130,315,242,225,186,190,375,162,152,385,177,90,234,290,266,67,340]
sigma_plot.sort()
sigma_plot = np.array(sigma_plot)
#plot model with data with
plt.figure(figsize=(6,6))
plt.scatter(sigma, Mbh * 1e-9, marker = '+', color ='black', label = 'Data')
plt.plot(sigma_plot , Mbh02(alpha = popt02[0], sigma = sigma_plot, beta = popt02[1]) * 1e-9, color='orange', ls ='-', label ='2. fit')
plt.ylabel(r'$M_{BH}$ in $M_\odot *10^9$ unit', fontsize=16)
plt.xlabel(r'$\sigma$', fontsize=16)
# plt.ylim(-1,10)
plt.title('Plot of the black hole mass $M_{BH}$ \nagainst the velocity dispersion $\sigma$ \nfor different elliptical galaxies', fontsize=18)
plt.grid(True)
plt.legend()
plt.show()
and I get the following parameter
:
print(popt01) = [16.13278858 0.91788691]
which looks :
If I try to find the parameter manually, and plotting them with:
plt.plot(sigma_plot , (1 * np.exp(0.058 * sigma_plot)) * 1e-9, ls ='--', label ='2. fit manual')
I get the following plot which is much better:
What is the problem ? Why is curve_fit not working and giving such parameter ?
In the curve_fit documentation, it says
Assumes ydata = f(xdata, *params) + eps
So if you change your function definition so that the x data is first in your function, it will work:
def Mbh02(sigma, alpha, beta):
return alpha * np.exp(beta*sigma);
# Rest of code
plt.plot(sigma_plot , Mbh02(sigma_plot, *popt02) * 1e-9, color='orange', ls ='-')
Have you tried fitting the log(Mbh) with a linear fit instead of fitting the exp. model directly? This usually gives a lot of stability.
import numpy as np
import matplotlib.pyplot as plt
Mbh = np.array([1.8e6,2.5e6,4.5e7,3.7e7,4.4e7,1.5e7,1.4e7,4.1e7, 1.0e9,2.1e8,1.0e8,1.0e8,1.6e7,1.9e8,3.9e7,5.2e8,3.1e8,3.0e8,7.0e7,1.1e8,3.0e9,5.6e7,7.8e7,2.0e9,1.7e8,1.4e7,2.4e8,5.3e8,3.3e8,3.5e6,2.5e9])
sigma = np.array([103,75,160,209,205,151,175,140,230,205,145,206,143,182,130,315,242,225,186,190,375,162,152,385,177,90,234,290,266,67,340])
plt.figure(2)
plt.plot(sigma,Mbh,'.')
lnMbh= np.log(Mbh)
p = np.polyfit(sigma,lnMbh,1)
plt.plot(sigma, np.exp(np.polyval(p,sigma)),'*')
alpha = np.log(p[0])
beta = p[1]
As mentioned here, scikit-learn's Gaussian process regression (GPR) permits "prediction without prior fitting (based on the GP prior)". But I have an idea for what my prior should be (i.e. it should not simply have a mean of zero but perhaps my output, y, scales linearly with my input, X, i.e. y = X). How could I encode this information into GPR?
Below is a working example, but it assumed zero mean for my prior. I read that "The GaussianProcessRegressor does not allow for the specification of the mean function, always assuming it to be the zero function, highlighting the diminished role of the mean function in calculating the posterior." I believe this is the motivation behind custom kernels (e.g. heteroscedastic) with variable scales at different X, although I'm still trying to better understand what capability they provide. Are there ways to get around the zero mean prior so that an arbitrary prior can be specified in scikit-learn?
import numpy as np
from matplotlib import pyplot as plt
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C
def f(x):
"""The function to predict."""
return 1.5*(1. - np.tanh(100.*(x-0.96))) + 1.5*x*(x-0.95) + 0.4 + 1.5*(1.-x)* np.random.random(x.shape)
# Instantiate a Gaussian Process model
kernel = C(10.0, (1e-5, 1e5)) * RBF(10.0, (1e-5, 1e5))
X = np.array([0.803,0.827,0.861,0.875,0.892,0.905,
0.91,0.92,0.925,0.935,0.941,0.947,0.96,
0.974,0.985,0.995,1.0])
X = np.atleast_2d(X).T
# Observations and noise
y = f(X).ravel()
noise = np.linspace(0.4,0.3,len(X))
y += noise
# Instantiate a Gaussian Process model
gp = GaussianProcessRegressor(kernel=kernel, alpha=noise ** 2,
n_restarts_optimizer=10)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
x = np.atleast_2d(np.linspace(0.8, 1.02, 1000)).T
y_pred, sigma = gp.predict(x, return_std=True)
plt.figure()
plt.errorbar(X.ravel(), y, noise, fmt='k.', markersize=10, label=u'Observations')
plt.plot(x, y_pred, 'k-', label=u'Prediction')
plt.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.1, fc='k', ec='None', label='95% confidence interval')
plt.xlabel('x')
plt.ylabel('y')
plt.xlim(0.8, 1.02)
plt.ylim(0, 5)
plt.legend(loc='lower left')
plt.show()
Here is an example on how to use the prior mean function to the sklearn GPR model.
import numpy as np
from matplotlib import pyplot as plt
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, ConstantKernel
A=np.linspace(5,25,num=100)
# prior mean function
prior_beta=12-0.3*A
# true function
true_beta=20-0.7*A
rng = np.random.seed(44)
# Training data
size=15
ind=np.random.randint(0,100,size=size)
# generate the posterior variance (noisy samples)
var_=np.random.uniform(0.1,10.0,size=size)
A_=A[ind][:, np.newaxis]
beta_=true_beta[ind]-prior_beta[ind]
beta_1=true_beta[ind]
plt.figure()
kernel = ConstantKernel(4) * RBF(length_scale=2, length_scale_bounds=(1e-3, 1e2))
gp = GaussianProcessRegressor(kernel=kernel,
alpha=var_,optimizer=None).fit(A_, beta_)
X_ = np.linspace(5, 25, 100)
y_mean, y_cov = gp.predict(X_[:, np.newaxis], return_cov=True)
# Now you add the prior mean function back
y_mean=y_mean+12-0.3*X_
plt.plot(X_, y_mean, 'k', lw=3, zorder=9, label='predicted')
plt.fill_between(X_, y_mean - 3*np.sqrt(np.diag(y_cov)),
y_mean + 3*np.sqrt(np.diag(y_cov)),
alpha=0.5, color='k', label='+-3sigma')
plt.plot(A,true_beta, 'r', lw=3, zorder=9,label='truth')
plt.plot(A,prior_beta, 'blue', lw=3, zorder=9,label='prior')
plt.errorbar(A_[:,0], beta_1, yerr=3*np.sqrt(var_), fmt='x',ecolor='g',marker='s',
mfc='g', ms=10,capsize=6,label='training set')
plt.title("Initial: %s\n"% (kernel))
plt.legend()
plt.show()
I am trying to fit some sample data in a semilogy plot with curve_fit function from scipy. My best fit curve looks okay with the code I am following, but I am having trouble with the 2 sigma curves, which I want to show simultaneously along with the best fit curve and grey-filled. My code looks like the following:
import sys
import os
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
import scipy.optimize as optimization
M = np.array([-2, -1, 0, 1, 2, 3,4])
Y_z = np.array([0.05, 0.2, 3, 8, 50, 344, 2400 ])
# curve fit linear function
def line(x, a, b):
return a*x+b
popt, pcov = curve_fit(line, M, np.log10(Y_z)) # change here
# plotting
plt.semilogy(M , Y_z, 'o')
plt.semilogy(M, 10**line(M, popt[0], popt[1]), ':', label = 'curve-fit')
# plot 1 sigma -error
y1 = 10**(line(M, popt[0] + pcov[0,0]**0.5, popt[1] - pcov[1,1]**0.5))
y2 = 10**(line(M, popt[0] - pcov[0,0]**0.5, popt[1] + pcov[1,1]**0.5))
plt.semilogy(M, y1, ':')
plt.semilogy(M, y2, ':')
plt.fill_between(M, y1, y2, facecolor="gray", alpha=0.15)
plt.xlabel(r"$\log X$")
plt.ylabel('Y')
plt.legend()
plt.show()
Your help is very appreciated for the variance curves
In principle, a linear fit doesn't need non-linear least-squares curve-fitting at all: linear regression should work.
That said, to address your questions, you might find lmfit (http://lmfit.github.io/lmfit-py/) useful here. It has a slightly higher-level and slightly more Pythonic approach to curve-fitting, and adds many features. One of these is calculating the uncertainty in the result for a selected value of sigma.
To do your fit with lmfit, it would look like
import numpy as np
import matplotlib.pyplot as plt
import scipy.optimize as optimization
import lmfit
M = np.array([-2, -1, 0, 1, 2, 3,4])
Y_z = np.array([0.05, 0.2, 3, 8, 50, 344, 2400 ])
# curve fit linear function
def line(x, a, b):
return a*x+b
# set up model and create parameters from model function
# note that function argument names are used for parameters
model = lmfit.Model(line)
params = model.make_params(a=1, b=0)
result = model.fit(np.log10(Y_z), params, x=M)
print(result.fit_report())
which will print out a report about the fit like this:
[[Model]]
Model(line)
[[Fit Statistics]]
# fitting method = leastsq
# function evals = 8
# data points = 7
# variables = 2
chi-square = 0.10468256
reduced chi-square = 0.02093651
Akaike info crit = -25.4191304
Bayesian info crit = -25.5273101
[[Variables]]
a: 0.77630819 +/- 0.02734470 (3.52%) (init = 1)
b: 0.22311337 +/- 0.06114460 (27.41%) (init = 0)
[[Correlations]] (unreported correlations are < 0.100)
C(a, b) = -0.447
You can calculate the 2-sigma uncertainty in the best-fit result as
# calculate 2-sigma uncertainty in result
del2 = result.eval_uncertainty(sigma=2, x=M)
and then use this and the fit results to plot the results (slightly modified from your form):
plt.plot(M, np.log10(Y_z), 'o', label='data')
plt.plot(M, result.best_fit, ':', label = 'curve-fit')
plt.fill_between(M, result.best_fit-del2, result.best_fit+del2, facecolor="grey", alpha=0.15)
plt.xlabel(r"$\log X$")
plt.ylabel('Y')
plt.legend()
plt.show()
which should produce a plot like
hope that helps.
I would like to find and plot a function f that represents a curve fitted on some number of set points that I already know, x and y.
After some research I started experimenting with scipy.optimize and curve_fit but on the reference guide I found that the program uses a function to fit the data instead and it assumes ydata = f(xdata, *params) + eps.
So my question is this: What do I have to change in my code to use the curve_fit or any other library to find the function of the curve using my set points? (note: I want to know the function as well so I can integrate later for my project and plot it). I know that its going to be a decaying exponencial function but don't know the exact parameters. This is what I tried in my program:
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
def func(x, a, b, c):
return a * np.exp(-b * x) + c
xdata = np.array([0.2, 0.5, 0.8, 1])
ydata = np.array([6, 1, 0.5, 0.2])
plt.plot(xdata, ydata, 'b-', label='data')
popt, pcov = curve_fit(func, xdata, ydata)
plt.plot(xdata, func(xdata, *popt), 'r-', label='fit')
plt.xlabel('x')
plt.ylabel('y')
plt.legend()
plt.show()
Am currently developing this project on a Raspberry Pi, if it changes anything. And would like to use least squares method since is great and precise, but any other method that works well is welcome.
Again, this is based on the reference guide of scipy library. Also, I get the following graph, which is not even a curve: Graph and curve based on set points
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
def func(x, a, b, c):
return a * np.exp(-b * x) + c
#c is a constant so taking the derivative makes it go to zero
def deriv(x, a, b, c):
return -a * b * np.exp(-b * x)
#Integrating gives you another c coefficient (offset) let's call it c1 and set it equal to zero by default
def integ(x, a, b, c, c1 = 0):
return -a/b * np.exp(-b * x) + c*x + c1
#There are only 4 (x,y) points here
xdata = np.array([0.2, 0.5, 0.8, 1])
ydata = np.array([6, 1, 0.5, 0.2])
#curve_fit already uses "non-linear least squares to fit a function, f, to data"
popt, pcov = curve_fit(func, xdata, ydata)
a,b,c = popt #these are the optimal parameters for fitting your 4 data points
#Now get more x values to plot the curve along so it looks like a curve
step = 0.01
fit_xs = np.arange(min(xdata),max(xdata),step)
#Plot the results
plt.plot(xdata, ydata, 'bx', label='data')
plt.plot(fit_xs, func(fit_xs,a,b,c), 'r-', label='fit')
plt.plot(fit_xs, deriv(fit_xs,a,b,c), 'g-', label='deriv')
plt.plot(fit_xs, integ(fit_xs,a,b,c), 'm-', label='integ')
plt.xlabel('x')
plt.ylabel('y')
plt.legend()
plt.show()
I am trying to understand the results from the scikit-learn gaussian mixture model implementation. Take a look at the following example:
#!/opt/local/bin/python
import numpy as np
import matplotlib.pyplot as plt
from sklearn.mixture import GaussianMixture
# Define simple gaussian
def gauss_function(x, amp, x0, sigma):
return amp * np.exp(-(x - x0) ** 2. / (2. * sigma ** 2.))
# Generate sample from three gaussian distributions
samples = np.random.normal(-0.5, 0.2, 2000)
samples = np.append(samples, np.random.normal(-0.1, 0.07, 5000))
samples = np.append(samples, np.random.normal(0.2, 0.13, 10000))
# Fit GMM
gmm = GaussianMixture(n_components=3, covariance_type="full", tol=0.001)
gmm = gmm.fit(X=np.expand_dims(samples, 1))
# Evaluate GMM
gmm_x = np.linspace(-2, 1.5, 5000)
gmm_y = np.exp(gmm.score_samples(gmm_x.reshape(-1, 1)))
# Construct function manually as sum of gaussians
gmm_y_sum = np.full_like(gmm_x, fill_value=0, dtype=np.float32)
for m, c, w in zip(gmm.means_.ravel(), gmm.covariances_.ravel(),
gmm.weights_.ravel()):
gmm_y_sum += gauss_function(x=gmm_x, amp=w, x0=m, sigma=np.sqrt(c))
# Normalize so that integral is 1
gmm_y_sum /= np.trapz(gmm_y_sum, gmm_x)
# Make regular histogram
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=[8, 5])
ax.hist(samples, bins=50, normed=True, alpha=0.5, color="#0070FF")
ax.plot(gmm_x, gmm_y, color="crimson", lw=4, label="GMM")
ax.plot(gmm_x, gmm_y_sum, color="black", lw=4, label="Gauss_sum")
# Annotate diagram
ax.set_ylabel("Probability density")
ax.set_xlabel("Arbitrary units")
# Draw legend
plt.legend()
plt.show()
Here I first generate a sample distribution constructed from gaussians, then fit a gaussian mixture model to these data. Next, I want to calculate the probability for some given input. Conveniently, the scikit implementation offer the score_samples method to do just that. Now I am trying to understand these results. I always thought, that I can just take the parameters of the gaussians from the GMM fit and construct the very same distribution by summing over them and then normalising the integral to 1. However, as you can see in the plot, the samples drawn from the score_samples method fit perfectly (red line) to the original data (blue histogram), the manually constructed distribution (black line) does not. I would like to understand where my thinking went wrong and why I can't construct the distribution myself by summing the gaussians as given by the GMM fit!?! Thanks a lot for any input!
Just in case anyone in the future is wondering about the same thing: One has to normalise the individual components, not the sum:
import numpy as np
import matplotlib.pyplot as plt
from sklearn.mixture import GaussianMixture
# Define simple gaussian
def gauss_function(x, amp, x0, sigma):
return amp * np.exp(-(x - x0) ** 2. / (2. * sigma ** 2.))
# Generate sample from three gaussian distributions
samples = np.random.normal(-0.5, 0.2, 2000)
samples = np.append(samples, np.random.normal(-0.1, 0.07, 5000))
samples = np.append(samples, np.random.normal(0.2, 0.13, 10000))
# Fit GMM
gmm = GaussianMixture(n_components=3, covariance_type="full", tol=0.001)
gmm = gmm.fit(X=np.expand_dims(samples, 1))
# Evaluate GMM
gmm_x = np.linspace(-2, 1.5, 5000)
gmm_y = np.exp(gmm.score_samples(gmm_x.reshape(-1, 1)))
# Construct function manually as sum of gaussians
gmm_y_sum = np.full_like(gmm_x, fill_value=0, dtype=np.float32)
for m, c, w in zip(gmm.means_.ravel(), gmm.covariances_.ravel(), gmm.weights_.ravel()):
gauss = gauss_function(x=gmm_x, amp=1, x0=m, sigma=np.sqrt(c))
gmm_y_sum += gauss / np.trapz(gauss, gmm_x) * w
# Make regular histogram
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=[8, 5])
ax.hist(samples, bins=50, normed=True, alpha=0.5, color="#0070FF")
ax.plot(gmm_x, gmm_y, color="crimson", lw=4, label="GMM")
ax.plot(gmm_x, gmm_y_sum, color="black", lw=4, label="Gauss_sum", linestyle="dashed")
# Annotate diagram
ax.set_ylabel("Probability density")
ax.set_xlabel("Arbitrary units")
# Make legend
plt.legend()
plt.show()