I am trying to find the peak of my data set by fitting it to a Lorentzian (more specifically I have to find at what value of the B-field the peak occurs). However, what follows the peak is not symmetric and definitely not linear so I am having trouble getting a good fit. This is what I have tried:
import numpy
import pylab
from scipy.optimize import leastsq # Levenberg-Marquadt Algorithm #
def lorentzian(x,p):
numerator = (p[0]**2 )
denominator = ( x - (p[1]) )**2 + p[0]**2
y = p[2]*(numerator/denominator)+p[3]*(x-p[0])+p[4]
return y
def residuals(p,y,x):
err = y - lorentzian(x,p)
return err
a = numpy.loadtxt('QHE.dat')
x = a[int(len(a)*8.2/10):,0]
y = a[int(len(a)*8.2/10):,1]
# initial values #
p = [0.4,1.2,1.5,1,1] # [hwhm, peak center, intensity] #
pbest = leastsq(residuals,p,args=(y,x),full_output=1)
best_parameters = pbest[0]
# fit to data #
fit = lorentzian(x,best_parameters)
peaks.append(best_parameters)
pylab.figure()
pylab.plot(x,y,'wo')
pylab.plot(x,fit,'r-',lw=2)
pylab.xlabel('B field', fontsize=18)
pylab.ylabel('Resistance', fontsize=18)
pylab.show()`
Does anyone have a suggestion how to handle this?
Edit:
Here is the data file I am trying to fit. The goal is to find the minimum.
Related
I am triyng to use scipy curve_fit function to fit a gaussian function to my data to estimate a theoretical power spectrum density. While doing so, the curve_fit function always return the initial parameters (p0=[1,1,1]) , thus telling me that the fitting didn't work.
I don't know where the issue is. I am using python 3.9 (spyder 5.1.5) from the anaconda distribution on windows 11.
here a Wetransfer link to the data file
https://wetransfer.com/downloads/6097ebe81ee0c29ee95a497128c1c2e420220704110130/86bf2d
Here is my code below. Can someone tell me what the issue is, and how can i solve it?
on the picture of the plot, the blue plot is my experimental PSD and the orange one is the result of the fit.
import numpy as np
import math
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
import scipy.constants as cst
File = np.loadtxt('test5.dat')
X = File[:, 1]
Y = File[:, 2]
f_sample = 50000
time=[]
for i in range(1,len(X)+1):
t=i*(1/f_sample)
time= np.append(time,t)
N = X.shape[0] # number of observation
N1=int(N/2)
delta_t = time[2] - time[1]
T_mes = N * delta_t
freq = np.arange(1/T_mes, (N+1)/T_mes, 1/T_mes)
freq=freq[0:N1]
fNyq = f_sample/2 # Nyquist frequency
nb = 350
freq_block = []
# discrete fourier tansform
X_ft = delta_t*np.fft.fft(X, n=N)
X_ft=X_ft[0:N1]
plt.figure()
plt.plot(time, X)
plt.xlabel('t [s]')
plt.ylabel('x [micro m]')
# Experimental power spectrum on both raw and blocked data
PSD_X_exp = (np.abs(X_ft)**2/T_mes)
PSD_X_exp_b = []
STD_PSD_X_exp_b = []
for i in range(0, N1+2, nb):
freq_b = np.array(freq[i:i+nb]) # i-nb:i
psd_b = np.array(PSD_X_exp[i:i+nb])
freq_block = np.append(freq_block, (1/nb)*np.sum(freq_b))
PSD_X_exp_b = np.append(PSD_X_exp_b, (1/nb)*np.sum(psd_b))
STD_PSD_X_exp_b = np.append(STD_PSD_X_exp_b, PSD_X_exp_b/np.sqrt(nb))
plt.figure()
plt.loglog(freq, PSD_X_exp)
plt.legend(['Raw Experimental PSD'])
plt.xlabel('f [Hz]')
plt.ylabel('PSD')
plt.figure()
plt.loglog(freq_block, PSD_X_exp_b)
plt.legend(['Experimental PSD after blocking'])
plt.xlabel('f [Hz]')
plt.ylabel('PSD')
kB = cst.k # Boltzmann constant [m^2kg/s^2K]
T = 273.15 + 25 # Temperature [K]
r = (2.8 / 2) * 1e-6 # Particle radius [m]
v = 0.00002414 * 10 ** (247.8 / (-140 + T)) # Water viscosity [Pa*s]
gamma = np.pi * 6 * r * v # [m*Pa*s]
Do = kB*T/gamma # expected value for D
f3db_o = 50000 # expected value for f3db
fc_o = 300 # expected value pour fc
n = np.arange(-10,11)
def theo_spectrum_lorentzian_filter(x, D_, fc_, f3db_):
PSD_theo=[]
for i in range(0,len(x)):
# print(i)
psd_theo=np.sum((((D_*Do)/2*math.pi**2)/((fc_*fc_o)**2+(x[i]+n*f_sample)
** 2))*(1/(1+((x[i]+n*f_sample)/(f3db_*f3db_o))**2)))
PSD_theo= np.append(PSD_theo,psd_theo)
return PSD_theo
popt, pcov = curve_fit(theo_spectrum_lorentzian_filter, freq_block, PSD_X_exp_b, p0=[1, 1, 1], sigma=STD_PSD_X_exp_b, absolute_sigma=True, check_finite=True,bounds=(0.1, 10), method='trf', jac=None)
D_, fc_, f3db_ = popt
D1 = D_*Do
fc1 = fc_*fc_o
f3db1 = f3db_*f3db_o
print('Diffusion constant D = ', D1, ' Corner frequency fc= ',fc1, 'f3db(diode,eff)= ', f3db1)
I believe I've successfully fitted your data. Here's the approach I took.
First, I plotted your model (with popt=[1, 1, 1]) and the data you had. I noticed your data was significantly lower than the model. Then I started fiddling with the parameters. I wanted to push the model upwards. I did that by multiplying popt[0] by increasingly large values. I ended up with 1E13 as a ballpark value. Note that I have no idea if this is physically possible for your model. Then I jury-rigged your fitting function to multiply D_ by 1E13 and ran your code. I got this fit:
So I believe it's a problem of 1) inappropriate starting values and 2) inappropriate bounds. In your position, I would revise this model, check if there's any problems with units and so on.
Here's what I used to try to fit your model:
plt.figure()
plt.loglog(freq_block[:170], PSD_X_exp_b[:170], label='Exp')
plt.loglog(freq_block[:170],
theo_spectrum_lorentzian_filter(
freq_block[:170],
1E13*popt[0], popt[1], popt[2]),
label='model'
)
plt.xlabel('f [Hz]')
plt.ylabel('PSD')
plt.legend()
I limited the data to point 170 because there were some weird backwards values that made me uncomfortable. I would recheck them if I were you.
Here's the model code I used. I didn't change the curve_fit call (except to limit x to :170.
def theo_spectrum_lorentzian_filter(x, D_, fc_, f3db_):
PSD_theo=[]
D_ = 1E13*D_ # I only changed here
for i in range(0,len(x)):
psd_theo=np.sum((((D_*Do)/2*math.pi**2)/((fc_*fc_o)**2+(x[i]+n*f_sample)
** 2))*(1/(1+((x[i]+n*f_sample)/(f3db_*f3db_o))**2)))
PSD_theo= np.append(PSD_theo,psd_theo)
return PSD_theo
I have heard that the Cauchy integration formula can be used to interpolate complex-valued functions along a closed boundary of a disk to points inside the disk. For my current project, this sounds rather valuable, so I attempted to give this a shot. Unfortunately, my experiments were not very successful so far, and I am not certain what is going wrong. Some degree of interpolation is certainly going on, but the results do not seem to be correct along the boundaries. Here is what my code returns:
Here is my initial code example:
import scipy.stats
import numpy as np
import scipy.integrate
import scipy.interpolate
import matplotlib.pyplot as plt
plt.close('all')
# This is the interpolation function, which takes as input a position on the
# boundary in radians (x), a complex evaluation point (eval_point), and the
# function which returns the boundary condition
def f(x,eval_point,itp):
# What is the complex coordinate of this point on the boundary?
zi = np.cos(x) + 1j*np.sin(x)
# Get the boundary condition value
fz = itp(x)
return fz/(zi-eval_point)
# Complex quadrature for integration, adapted from
# https://stackoverflow.com/questions/57325919/using-scipy-quad-with-i%ce%b5-trick-bad-results
def cquad(func, a, b, **kwargs):
real_integral = scipy.integrate.quad(lambda x: np.real(func(x, **kwargs)), a, b, limit=200)
imag_integral = scipy.integrate.quad(lambda x: np.imag(func(x, **kwargs)), a, b, limit=200)
return (real_integral[0] + 1j*imag_integral[0], real_integral[1:], imag_integral[1:])
# Define the interpolation function for the boundary values
itp = scipy.interpolate.interp1d(
x = [0,np.pi/2,np.pi,1.5*np.pi,2*np.pi],
y = [0+0j,0+1j,1+1j,1+0j,0+0j])
# Get some evaluation points
X,Y = np.meshgrid(np.linspace(-1,1,51),
np.linspace(-1,1,51))
XY = X+1j*Y
x = np.ndarray.flatten(XY)
# Throw away all points outside the unit disk; avoid evaluting at radius 1 to
# dodge singularities
x = x[np.where(np.abs(x) <= 0.99)]
# Calculate the result for each evaluation point
res = []
for val in x:
res.append(cquad(
func = f,
a = 0,
b = 2*np.pi,
eval_point = val,
itp = itp)[0]/(2*np.pi*1j))
# Convert the results into an array
res = np.asarray(res)
# Plot the real part of the results
plt.tricontour(
np.real(x),
np.imag(x),
np.real(res),
cmap = 'jet')
plt.colorbar(label='real part')
# Plot the imaginary part of the results
plt.tricontour(
np.real(x),
np.imag(x),
np.imag(res),
cmap = 'Greys')
plt.colorbar(label='imaginary part')
Does anybody have an idea what is going wrong?
You can get an easy approximation of that function by employing the FFT. The inverse FFT can be interpreted as polynomial evaluation at the corresponding points on the unit circle, so that the polynomial in total is an approximation of the Cauchy-formula
c = np.fft.fft(itp(np.linspace(0,2*np.pi,401)[:-1]))
c=c[::-1]/len(c)
np.polyval(c,[1,1j,-1,-1j])
returns
[5.55111512e-17+5.55111512e-17j, 5.55111512e-17+1.00000000e+00j,
1.00000000e+00+1.00000000e+00j, 1.00000000e+00+5.55111512e-17j]
these are the values that were expected.
X,Y = np.meshgrid(np.linspace(-1,1,151),
np.linspace(-1,1,151))
Z = (X+1j*Y).flatten()
Z = Z[np.where(np.abs(Z) <= 0.99)]
W = np.polyval(c,Z)
# Plot the real part of the results
plt.tricontour( Z.real, Z.imag, W.real, cmap = 'jet')
plt.colorbar(label='real part')
# Plot the imaginary part of the results
plt.tricontour( Z.real, Z.imag, W.imag, cmap = 'Greys')
plt.colorbar(label='imaginary part')
plt.tight_layout(); plt.show()
This then gives the picture
The dominant terms of the polynomial are
(1+1j)*(0.500000 - 0.045040*z^3 - 0.008279*z^7
- 0.005012*z^391 - 0.016220*z^395 - 0.405293*z^399)
As far as I could see, the leading degree 3 after the constant term is constant under refinement of the sampling sequence.
See the edit below for details.
I have a dataset, on which I need to perform and IFFT, cut the valueable part of it (by multiplying with a gaussian curve), then FFT back.
First is in angular frequency domain, so an IFFT leads to time domain. Then FFT-ing back should lead to angular frequency again, but I can't seem to find a solution how to get back the original domain. Of course it's easy on the y-values:
yf = np.fft.ifft(y)
#cut the valueable part there..
np.fft.fft(yf)
For the x-value transforms I'm using np.fft.fftfreq the following way:
# x is in ang. frequency domain, that's the reason for the 2*np.pi division
t = np.fft.fftfreq(len(x), d=(x[1]-x[0])/(2*np.pi))
However doing
x = np.fft.fftfreq(len(t), d=2*np.pi*(t[1]-t[0]))
completely not giving me back the original x values. Is that something I'm misunderstanding?
The question can be asked generalized, for example:
import numpy as np
x = np.arange(100)
xx = np.fft.fftfreq(len(x), d = x[1]-x[0])
# how to get back the original x from xx? Is it even possible?
I've tried to use a temporal variable where I store the original x values, but it's not too elegant. I'm looking for some kind of inverse of fftfreq, and in general the possible best solution for that problem.
Thank you.
EDIT:
I will provide the code at the end.
I have a dataset which has angular frequency on x axis and intensity on the y. I want to perfrom IFFT to change to time domain. Unfortunately the x values are not
evenly spaced, so a (linear) interpolation is needed first before IFFT. Then in time domain the transform looks like this:
The next step is to cut one of the symmetrical spikes with a gaussian curve, then FFT back to angular frequency domain (the same where we started). My problem is when I transfrom the x-axis for the IFFT (which I think is correct), I can't get back into the original angular frequency domain. Here is the code, which includes the generator for the dataset too.
import numpy as np
import matplotlib.pyplot as plt
import scipy
from scipy.interpolate import interp1d
C_LIGHT = 299.792
# for easier case, this is zero, so it can be ignored.
def _disp(x, GD=0, GDD=0, TOD=0, FOD=0, QOD=0):
return x*GD+(GDD/2)*x**2+(TOD/6)*x**3+(FOD/24)*x**4+(QOD/120)*x**5
# the generator to make sample datasets
def generator(start, stop, center, delay, GD=0, GDD=0, TOD=0, FOD=0, QOD=0, resolution=0.1, pulse_duration=15, chirp=0):
window = (np.sqrt(1+chirp**2)*8*np.log(2))/(pulse_duration**2)
lamend = (2*np.pi*C_LIGHT)/start
lamstart = (2*np.pi*C_LIGHT)/stop
lam = np.arange(lamstart, lamend+resolution, resolution)
omega = (2*np.pi*C_LIGHT)/lam
relom = omega-center
i_r = np.exp(-(relom)**2/(window))
i_s = np.exp(-(relom)**2/(window))
i = i_r + i_s + 2*np.sqrt(i_r*i_s)*np.cos(_disp(relom, GD=GD, GDD=GDD, TOD=TOD, FOD=FOD, QOD=QOD)+delay*omega)
#since the _disp polynomial is set to be zero, it's just cos(delay*omega)
return omega, i
def interpol(x,y):
''' Simple linear interpolation '''
xs = np.linspace(x[0], x[-1], len(x))
intp = interp1d(x, y, kind='linear', fill_value = 'extrapolate')
ys = intp(xs)
return xs, ys
def ifft_method(initSpectrumX, initSpectrumY, interpolate=True):
if len(initSpectrumY) > 0 and len(initSpectrumX) > 0:
Ydata = initSpectrumY
Xdata = initSpectrumX
else:
raise ValueError
N = len(Xdata)
if interpolate:
Xdata, Ydata = interpol(Xdata, Ydata)
# the (2*np.pi) division is because we have angular frequency, not frequency
xf = np.fft.fftfreq(N, d=(Xdata[1]-Xdata[0])/(2*np.pi)) * N * Xdata[-1]/(N-1)
yf = np.fft.ifft(Ydata)
else:
pass # some irrelevant code there
return xf, yf
def fft_method(initSpectrumX ,initSpectrumY):
if len(initSpectrumY) > 0 and len(initSpectrumX) > 0:
Ydata = initSpectrumY
Xdata = initSpectrumX
else:
raise ValueError
yf = np.fft.fft(Ydata)
xf = np.fft.fftfreq(len(Xdata), d=(Xdata[1]-Xdata[0])*2*np.pi)
# the problem is there, where I transform the x values.
xf = np.fft.ifftshift(xf)
return xf, yf
# the generated data
x, y = generator(1, 3, 2, delay = 1500, resolution = 0.1)
# plt.plot(x,y)
xx, yy = ifft_method(x,y)
#if the x values are correctly scaled, the two symmetrical spikes should appear exactly at delay value
# plt.plot(xx, np.abs(yy))
#do the cutting there, which is also irrelevant now
# the problem is there, in fft_method. The x values are not the same as before transforms.
xxx, yyy = fft_method(xx, yy)
plt.plot(xxx, np.abs(yyy))
#and it should look like this:
#xs = np.linspace(x[0], x[-1], len(x))
#plt.plot(xs, np.abs(yyy))
plt.grid()
plt.show()
Q-Q plots are used to get the goodness of fit between a set of data points and theoretical distribution. Following is the procedure to get the points.
Select the samples to use. Sort the selected samples with X(i) denoting the ith sample
Find the model values that correspond to the samples. This is done in two steps,
a. Associate each sample with the percentile it represents. pi = (i-0.5)/n
b. Calculate the model value that would be associated with this percentile. This is done by inverting the model CDF, as is done when generating random variates from the model distribution. Thus the model value corresponding to sample i is Finverse(pi).
c. Draw the Q-Q plot, using the n points
( X(i), Finverse(pi)) 1 ≤ i ≤ n
Using this approach I came up with the following python implementation.
_distn_names = ["pareto"]
def fit_to_all_distributions(data):
dist_names = _distn_names
params = {}
for dist_name in dist_names:
try:
dist = getattr(st, dist_name)
param = dist.fit(data)
params[dist_name] = param
except Exception:
print("Error occurred in fitting")
params[dist_name] = "Error"
return params
def get_q_q_plot(values, dist, params):
values.sort()
arg = params[:-2]
loc = params[-2]
scale = params[-1]
x = []
for i in range(len(values)):
x.append((i-0.5)/len(values))
y = getattr(st, dist).ppf(x, loc=loc, scale=scale, *arg)
y = list(y)
emp_percentiles = values
dist_percentiles = y
print("Emperical Percentiles")
print(emp_percentiles)
print("Distribution Percentiles")
print(dist_percentiles)
plt.figure()
plt.xlabel('dist_percentiles')
plt.ylabel('actual_percentiles')
plt.title('Q Q plot')
plt.plot(dist_percentiles, emp_percentiles)
plt.savefig("/path/q-q-plot.png")
b = 2.62
latencies = st.pareto.rvs(b, size=500)
data = pd.Series(latencies)
params = fit_to_all_distributions(data)
pareto_params = params["pareto"]
get_q_q_plot(latencies, "pareto", pareto_params)
Ideally I should get a straight line, but this is what I get.
Why don't I get a straight line? Is there anything wrong in my implementation?
You can get the Q-Q plot for any distribution (there are 82 in scipy stats) using the following code.
import os
import matplotlib.pyplot as plt
import sys
import math
import numpy as np
import scipy.stats as st
from scipy.stats._continuous_distns import _distn_names
from scipy.optimize import curve_fit
def get_q_q_plot(latency_values, distribution):
distribution = getattr(st, distribution)
params = distribution.fit(latency_values)
latency_values.sort()
arg = params[:-2]
loc = params[-2]
scale = params[-1]
x = []
for i in range(1, len(latency_values)):
x.append((i-0.5) / len(latency_values))
y = distribution.ppf(x, loc=loc, scale=scale, *arg)
y = list(y)
emp_percentiles = latency_values[1:]
dist_percentiles = y
return emp_percentiles, dist_percentiles
Currently, I am trying to solve a problem from astrophysics which can be simplified as following :
I wanted to fit a linear model (say y = a + b*x) to observed data, and I wish to use PyMC to characterize posterior of a and b in discrete grid parameter space like in this figure:
I know PyMC has DiscreteMetropolis class to find posterior in discrete space, but that's in integer space, not in custom discrete space. So I am thinking to define a potential to force PyMC to search in the grid, but not working well...Can anyone help with this? or Anyone has solved a similar problem? Any thoughts will be greatly appreciated :)
Here is my draft code, commented out potential class is my idea to force PyMC to search in the grid:
import sys
import matplotlib.pyplot as plt
import numpy as np
from scipy import stats
import pymc
#------------------------------------------------------------
# Generate the data
size = 200
slope_true = 12.3
y_intercept_true = 22.4
x = np.linspace(0, 1, size)
# y = a + b*x
y_true = y_intercept_true + slope_true * x
# add noise
y = y_true + np.random.normal(scale=.03, size=size)
# Define searching parameter space
# Note: this is discrete but not in the form of integer
slope_search_space = np.linspace(1,30,51)
y_intercept_search_space = np.linspace(1,30,51)
#------------------------------------------------------------
#Start initializing PyMC
#pymc.stochastic(dtype=int)
def slope(value = 5, t_l=1, t_h=30):
"""The switchpoint for the rate of disaster occurrence."""
def logp(value, t_l, t_h):
if value > t_h or value < t_l:
return -np.inf
else:
return -np.log(t_h - t_l + 1)
##pymc.potential
#def slope_prior(val=slope,t_l=-30, t_h=30):
# if val not in slope_search_space:
# return -np.inf
# return -np.log(t_h - t_l + 1)
#---
#pymc.stochastic(dtype=int)
def y_intercept(value=4, t_l=1, t_h=30):
"""The switchpoint for the rate of disaster occurrence."""
def logp(value, t_l, t_h):
if value > t_h or value < t_l:
return -np.inf
else:
return -np.log(t_h - t_l + 1)
##pymc.potential
#def y_intercept_prior(val=y_intercept,t_l=-30, t_h=30):
# if val not in y_intercept_search_space:
# return -np.inf
# return -np.log(t_h - t_l + 1)
# Define observed data
#pymc.deterministic
def mu(x=x, slope=slope, y_intercept=y_intercept):
# Linear age-price model
return y_intercept + slope*x
# Sampling distribution of prices
p = pymc.Poisson('p', mu, value=y, observed=True)
model = dict(slope=slope, y_intercept=y_intercept, mu=mu, p=p)
#-----------------------------------------------------------
# perform the MCMC
M = pymc.MCMC(model)
trace = M.sample(iter=10000,burn=5000)
#Plot
pymc.Matplot.plot(M)
plt.figure()
pymc.Matplot.summary_plot([M.slope,M.y_intercept])
plt.show()
I managed to solve my problem a few days ago. And to my surprise, some of my astronomy friends in Facebook group are also interested in this question, so I think it might be useful to post my solution just in case other people are having the same issue. Please note, this solution may not be the best way to tackle this problem, in fact, I believed there's more elegant way. But for now, this is the best I can come up with. Hope this is helpful to some of you.
The way I solve the problem is very straightforward, and I summarized as follow
1> Define slope, y_intercept stochastic variable in continuous form (PyMC then will use Metropolis to do sampling)
2> Define a function find_nearest to map continuous random variable slope, y_intercept to Grid e.g. Grid_slope=np.array([1,2,3,4,…51]), slope=4.678, then find_nearest(Grid_slope, slope) will return 5, as slope value is closest to 5 in the Grid_slope. Similarly to y_intercept variable.
3> When compute the likelihood, this is where I do the trick, I applied the find_nearest function to model in likelihood function i.e. to change model(slope, y_intercept) to model(find_nearest(Grid_slope, slope), find_nearest(Grid_y_intercept, y_intercept)), which will compute likelihood only upon Grid parameter space.
4> The trace returned for slope and y_intercept by PyMC may not be strictly Grid value, you can use find_nearest function to map trace to Grid value, and then making any statistical inference from it. For my case, I just use the trace straightaway to get statistics, and the result is nice :)
import sys
import matplotlib.pyplot as plt
import numpy as np
from scipy import stats
import pymc
#------------------------------------------------------------
# Generate the data
size = 200
slope_true = 12.3
y_intercept_true = 22.4
x = np.linspace(0, 1, size)
# y = a + b*x
y_true = y_intercept_true + slope_true * x
# add noise
y = y_true + np.random.normal(scale=.03, size=size)
# Define searching parameter space
# Note: this is discrete but not in the form of integer
slope_search_space = np.linspace(1,30,51)
y_intercept_search_space = np.linspace(1,30,51)
#------------------------------------------------------------
#Start initializing PyMC
from pymc import Normal, Gamma, deterministic, MCMC, Matplot, Uniform
# Constant priors for parameters
slope = Uniform('slope', 1, 30)
y_intercept = Uniform('y_intp', 1, 30)
# Precision of normal distribution of y value
tau = Uniform('tau',0,10000 )
#deterministic
def mu(x=x,slope=slope, y_intercept=y_intercept):
def find_nearest(array,value):
"""
This function maps 'value' to the nearest point in 'array'
"""
idx = (np.abs(array-value)).argmin()
return array[idx]
# Linear model
iso = find_nearest(y_intercept_search_space,y_intercept) + find_nearest(slope_search_space,slope)*x
return iso
# Sampling distribution of y
p = Normal('p', mu, tau, value=y, observed=True)
model = dict(slope=slope, y_intercept=y_intercept,tau=tau, mu=mu, p=p)
#-----------------------------------------------------------
# perform the MCMC
M = pymc.MCMC(model)
trace = M.sample(40000,20000)
#Plot
pymc.Matplot.plot(M)
M.slope.summary()
M.y_intercept.summary()
plt.figure()
pymc.Matplot.summary_plot([M.slope,M.y_intercept])
plt.show()