Here I aim to estimate the parameters (gama and omega) of a damped harmonic oscillator given by
dX^2/dt^2+gamma*dX/dt+(2*pi*omega)^2*X=0.
(We can add white gaussian noise to the system.)
import pymc
import numpy as np
import scipy.io as sio
import matplotlib.pyplot as plt;
from scipy.integrate import odeint
#import data
xdata = sio.loadmat('T.mat')['T'][0] #time
ydata1 = sio.loadmat('V1.mat')['V1'][0] # V2=dV1/dt, (X=V1),
ydata2 = sio.loadmat('V2.mat')['V2'][0] # dV2/dt=-(2pi*omega)^2*V1-gama*V2
#time span for solving the equations
npts= 500
dt=0.01
Tspan=5.0
time = np.linspace(0,Tspan,npts+1)
#initial condition
V0 = [1.0, 1.0]
# Priors for unknown model parameters
sigma = pymc.Uniform('sigma', 0.0, 100.0)
gama= pymc.Uniform('gama', 0.0, 20.0)
omega=pymc.Uniform('omega',0.0, 20.0)
#Solve the equations
#pymc.deterministic
def DHOS(gama=gama, omega=omega):
V1= np.zeros(npts+1)
V2= np.zeros(npts+1)
V1[0] = V0[0]
V2[0] = V0[1]
for i in range(1,npts+1):
V1[i]= V1[i-1] + dt*V2[i-1];
V2[i] = V2[i-1] + dt*(-((2*np.pi*omega)**2)*V1[i-1]-gama*V2[i-1]);
return [V1, V2]
#or we can use odeint
##pymc.deterministic
#def DHS( gama=gama, omega=omega):
# def DOS_func(y, time):
# V1, V2 = y[0], y[1]
# dV1dt = V2
# dV2dt = -((2*np.pi*omega)**2)* V1 -gama*V2
# dydt = [dV1dt, dV2dt]
# return dydt
# soln = odeint(DOS_func,V0, time)
# V1, V2 = soln[:,0], soln[:,1]
# return V1, V2
# value of outcome (observations)
V1 = pymc.Lambda('V1', lambda DHOS=DHOS: DHOS[0])
V2 = pymc.Lambda('V2', lambda DHOS=DHOS: DHOS[1])
# liklihood of observations
Yobs1 = pymc.Normal('Yobs1', mu=V1, tau=1.0/sigma**2, value=ydata1, observed=True)
Yobs2 = pymc.Normal('Yobs2', mu=V2, tau=1.0/sigma**2, value=ydata2, observed=True)
By saving the above code as DampedOscil_model.py, then we are able to run PYMC as follows
import pymc
import DampedOscil_model
MDL = pymc.MCMC(DampedOscil_model, db='pickle')
MDL.sample(iter=1e4, burn=1e2, thin=2)
gama_trace=MDL.trace('gama')[- 1000:]
omega_trace=MDL.trace('omega')[-1000:]
gama=MDL.gama.value
omega=MDL.omega.value
And it works well (See below).
The true signal constructed by gama_true=2.0 and omega_est=1.5 versus the estimated signal. The estimated parameter values are gama_est=2.04 and omega_est=1.49
Now I would convert this code to PYMC3 to use NUTS and ADVI.
import matplotlib.pyplot as plt
import scipy.io as sio
import pandas as pd
import numpy as np
import pymc3 as pm
import theano.tensor as tt
import theano
from pymc3 import Model, Normal, HalfNormal, Uniform
from pymc3 import NUTS, find_MAP, sample, Slice, traceplot, summary
from pymc3 import Deterministic
from scipy.optimize import fmin_powell
#import data
xdata = sio.loadmat('T.mat')['T'][0] #time
ydata1 = sio.loadmat('V1.mat')['V1'][0] # V2=dV1/dt, (X=V1),
ydata2 = sio.loadmat('V2.mat')['V2'][0] # dV2/dt=-(2pi*omega)^2*V1-gama*V2
#time span for solving the equations
npts= 500
dt=0.01
Tspan=5.0
time = np.linspace(0,Tspan,npts+1)
niter=10000
burn=niter//2;
with pm.Model() as model:
#Priors for unknown model parameters
sigma = pm.HalfNormal('sigma', sd=1)
gama= pm.Uniform('gama', 0.0, 20.0)
omega=pm.Uniform('omega',0.0, 20.0)
#initial condition
V0 = [1.0, 1.0]
#Solve the equations
# do I need to use theano.tensor here?!
#theano.compile.ops.as_op(itypes=[tt.dscalar, tt.dscalar],otypes=[tt.dvector])
def DHOS(gama=gama, omega=omega):
V1= np.zeros(npts+1)
V2= np.zeros(npts+1)
V1[0] = V0[0]
V2[0] = V0[1]
for i in range(1,npts+1):
V1[i]= V1[i-1] + dt*V2[i-1];
V2[i] = V2[i-1] + dt*(-((2*np.pi*1)**2)*V1[i-1]-gama*V2[i-1]);
return V1,V2
V1 = pm.Deterministic('V1', DHOS[0])
V2 = pm.Deterministic('V2', DHOS[1])
start = pm.find_MAP(fmin=fmin_powell, disp=True)
step=pm.NUTS()
trace=pm.sample(niter, step, start=start, progressbar=False)
traceplot(trace);
Summary=pm.df_summary(trace[-1000:])
gama_trace = trace.get_values('gama', burn)
omega_trace = trace.get_values('omega', burn)
For this code I get the following error:
V1 = pm.Deterministic('V1', DHOS[0])
TypeError: 'FromFunctionOp' object does not support indexing
Briefly, I wonder to know how can I to convert the following part of PYMC code to PYMC3.
#pymc.deterministic
def DOS(gama=gama, omega=omega):
V1= np.zeros(npts+1)
V2= np.zeros(npts+1)
V1[0] = V0[0]
V2[0] = V0[1]
for i in range(1,npts+1):
V1[i]= V1[i-1] + dt*V2[i-1];
V2[i] = V2[i-1] + dt*(-((2*np.pi*omega)**2)*V1[i-1]-gama*V2[i-1]);
return [V1, V2]
V1 = pymc.Lambda('V1', lambda DOS=DOS: DOS[0])
V2 = pymc.Lambda('V2', lambda DOS=DOS: DOS[1])
The problem is, first, the argumentation of Deterministic function is different in PYMC3 from PYMC, secondly, there in no Lambda function in PYMC3.
I appreciate your help in solving ODEs in PYMC3 to solve parameter estimation task in biological systems (estimating the equation parameters from data).
Thanks a lot in advance for your help.
Kind Regards,
Meysam
I would suggest, and have successfully implemented, using a 'black box' method for interfacing with PYMC3. In this case what that means is calculating the log-liklihood yourself and then using PYMC3 to sample it. This requires writing your functions in a way that Theano and PYMC3 can interface with them.
This is outlined in a notebook on the PYMC3 page, which uses cython as an example.
Here is a bit shorter sample of what needs to be done.
First you can load your data and set-up any parameters you need such as your time steps etc.
import pymc3 as pm
import numpy as np
import theano
import theano.tensor as tt
#import data
xdata = sio.loadmat('T.mat')['T'][0] #time
ydata1 = sio.loadmat('V1.mat')['V1'][0] # V2=dV1/dt, (X=V1),
ydata2 = sio.loadmat('V2.mat')['V2'][0] # dV2/dt=-(2pi*omega)^2*V1-gama*V2
#time span for solving the equations
npts= 500
dt=0.01
Tspan=5.0
time = np.linspace(0,Tspan,npts+1)
#initial condition
V0 = [1.0, 1.0]
Then you define a data generating function just as before but you don't need to use any decorators from PYMC for this. The output of this function should be whatever you need to compare to your data to calculate the likelihood.
def DHOS(theta):
gama,omega=theta
V1= np.zeros(npts+1)
V2= np.zeros(npts+1)
V1[0] = V0[0]
V2[0] = V0[1]
for i in range(1,npts+1):
V1[i]= V1[i-1] + dt*V2[i-1];
V2[i] = V2[i-1] + dt*(-((2*np.pi*omega)**2)*V1[i-1]-gama*V2[i-1]);
return [V1, V2]
Next you write a function that calls the previous function and calculates the likelihood using whatever distribution you want, in this a normal distribution.
def my_loglike(theta,data,sigma):
"""
A Gaussian log-likelihood function for a model with parameters given in theta
"""
model = DHOS(theta) #V1 and V2 from the DHOS function
#Here data = [ydata1,ydata2] to compare with model
#sigma is either the same shape as model or a scalar
#which corresponds to the uncertainty on the data.
return -(0.5)*sum((data - model)**2/sigma**2)
From here you have now have to define a Theano class so that it can interface with PYMC3.
# define a theano Op for our likelihood function
class LogLike(tt.Op):
"""
Specify what type of object will be passed and returned to the Op when it is
called. In our case we will be passing it a vector of values (the parameters
that define our model) and returning a single "scalar" value (the
log-likelihood)
"""
itypes = [tt.dvector] # expects a vector of parameter values when called
otypes = [tt.dscalar] # outputs a single scalar value (the log likelihood)
def __init__(self, loglike, data, sigma):
"""
Initialise the Op with various things that our log-likelihood function
requires. Below are the things that are needed in this particular
example.
Parameters
----------
loglike:
The log-likelihood (or whatever) function we've defined
data:
The "observed" data that our log-likelihood function takes in
x:
The dependent variable (aka 'x') that our model requires
sigma:
The noise standard deviation that our function requires.
"""
# add inputs as class attributes
self.likelihood = loglike
self.data = data
self.sigma = sigma
def perform(self, node, inputs, outputs):
# the method that is used when calling the Op
theta, = inputs # this will contain my variables
# call the log-likelihood function
logl = self.likelihood(theta, self.data, self.sigma)
outputs[0][0] = array(logl) # output the log-likelihood
Finally you can use PYMC3 to build your model and sample accordingly.
ndraws = 10000 # number of draws from the distribution
nburn = 1000 # number of "burn-in points" (which we'll discard)
# create our Op
logl = LogLike(my_loglike, rdat_sim, 10)
# use PyMC3 to sampler from log-likelihood
with pm.Model():
gama= pm.Uniform('gama', 0.0, 20.0)
omega=pm.Uniform('omega',0.0, 20.0)
# convert m and c to a tensor vector
theta = tt.as_tensor_variable([gama,omega])
# use a DensityDist (use a lamdba function to "call" the Op)
pm.DensityDist('likelihood', lambda v: logl(v), observed={'v': theta})
trace = pm.sample(ndraws, tune=nburn, discard_tuned_samples=True)
And you can use the internal plotting to see the results of the sampling
_ = pm.traceplot(trace)
This was just adapted from the example notebook in the link, and as mentioned there if you want to use NUTS you need gradient information, which you do not have given you custom function. In the link it talks about how to sample the gradient and construct it so you can pass it into the sampler, but I have not shown that here.
Additionally if you want to use solve_ivp (or odeint or another solver), all you have to do is change the DHOS function as you normally would to invoke the solver. The rest of the code should be portable to whatever problem you, or anyone else, need.
Related
I have been working with the following link,
Fitting empirical distribution to theoretical ones with Scipy (Python)?
I have been using my data to the code from the link and found out that the common distribution for my data is the Non-Central Student’s T distribution. I couldn’t find the distribution in the pymc3 package, so, I decided to have a look with scipy to understand how the distribution is formed. I created a custom distribution and I have few questions:
I would like to know if my approach to creating the distribution is right?
How can I implement the custom distribution into models?
Regarding the prior distribution, do I use same steps in normal distribution priors (mu and sigma) combined with halfnormed for degree of freedom and noncentral value?
My custom distribution:
import numpy as np
import theano.tensor as tt
from scipy import stats
from scipy.special import hyp1f1, nctdtr
import warnings
from pymc3.theanof import floatX
from pymc3.distributions.dist_math import bound, gammaln
from pymc3.distributions.continuous import assert_negative_support, get_tau_sigma
from pymc3.distributions.distribution import Continuous, draw_values, generate_samples
class NonCentralStudentT(Continuous):
"""
Parameters
----------
nu: float
Degrees of freedom, also known as normality parameter (nu > 0).
mu: float
Location parameter.
sigma: float
Scale parameter (sigma > 0). Converges to the standard deviation as nu increases. (only required if lam is not specified)
lam: float
Scale parameter (lam > 0). Converges to the precision as nu increases. (only required if sigma is not specified)
"""
def __init__(self, nu, nc, mu=0, lam=None, sigma=None, sd=None, *args, **kwargs):
super().__init__(*args, **kwargs)
super(NonCentralStudentT, self).__init__(*args, **kwargs)
if sd is not None:
sigma = sd
warnings.warn("sd is deprecated, use sigma instead", DeprecationWarning)
self.nu = nu = tt.as_tensor_variable(floatX(nu))
self.nc = nc = tt.as_tensor_variable(floatX(nc))
lam, sigma = get_tau_sigma(tau=lam, sigma=sigma)
self.lam = lam = tt.as_tensor_variable(lam)
self.sigma = self.sd = sigma = tt.as_tensor_variable(sigma)
self.mean = self.median = self.mode = self.mu = mu = tt.as_tensor_variable(mu)
self.variance = tt.switch((nu > 2) * 1, (1 / self.lam) * (nu / (nu - 2)), np.inf)
assert_negative_support(lam, 'lam (sigma)', 'NonCentralStudentT')
assert_negative_support(nu, 'nu', 'NonCentralStudentT')
assert_negative_support(nc, 'nc', 'NonCentralStudentT')
def random(self, point=None, size=None):
"""
Draw random values from Non-Central Student's T distribution.
Parameters
----------
point: dict, optional
Dict of variable values on which random values are to be
conditioned (uses default point if not specified).
size: int, optional
Desired size of random sample (returns one sample if not
specified).
Returns
-------
array
"""
nu, nc, mu, lam = draw_values([self.nu, self.nc, self.mu, self.lam], point=point, size=size)
return generate_samples(stats.nct.rvs, nu, nc, loc=mu, scale=lam ** -0.5, dist_shape=self.shape, size=size)
def logp(self, value):
"""
Calculate log-probability of Non-Central Student's T distribution at specified value.
Parameters
----------
value: numeric
Value(s) for which log-probability is calculated. If the log probabilities for multiple
values are desired the values must be provided in a numpy array or theano tensor
Returns
-------
TensorVariable
"""
nu = self.nu
nc = self.nc
mu = self.mu
lam = self.lam
n = nu * 1.0
nc = nc * 1.0
x2 = value * value
ncx2 = nc * nc * x2
fac1 = n + x2
trm1 = n / 2. * tt.log(n) + gammaln(n + 1)
trm1 -= n * tt.log(2) + nc * nc / 2. + (n / 2.) * tt.log(fac1) + gammaln(n / 2.)
Px = tt.exp(trm1)
valF = ncx2 / (2 * fac1)
trm1 = tt.sqrt(2) * nc * value * hyp1f1(n / 2 + 1, 1.5, valF)
trm1 /= np.asarray(fac1 * tt.gamma((n + 1) / 2))
trm2 = hyp1f1((n + 1) / 2, 0.5, valF)
trm2 /= np.asarray(np.sqrt(fac1) * tt.gamma(n / 2 + 1))
Px *= trm1 + trm2
return bound(Px, lam > 0, nu > 0, nc > 0)
def logcdf(self, value):
"""
Compute the log of the cumulative distribution function for Non-Central Student's T distribution
at the specified value.
Parameters
----------
value: numeric
Value(s) for which log CDF is calculated. If the log CDF for multiple
values are desired the values must be provided in a numpy array or theano tensor.
Returns
-------
TensorVariable
"""
nu = self.nu
nc = self.nc
return nctdtr(nu, nc, value)
My Custom model:
with pm.Model() as model:
# Prior Distributions for unknown model parameters:
mu = pm.Normal('sigma', 0, 10)
sigma = pm.Normal('sigma', 0, 10)
nc= pm.HalfNormal('nc', sigma=10)
nu= pm.HalfNormal('nu', sigma=1)
# Observed data is from a Likelihood distributions (Likelihood (sampling distribution) of observations):
=> (input custom distribution) observed_data = pm.Beta('observed_data', alpha=alpha, beta=beta, observed=data)
# draw 5000 posterior samples
trace = pm.sample(draws=5000, tune=2000, chains=3, cores=1)
# Obtaining Posterior Predictive Sampling:
post_pred = pm.sample_posterior_predictive(trace, samples=3000)
print(post_pred['observed_data'].shape)
print('\nSummary: ')
print(pm.stats.summary(data=trace))
print(pm.stats.summary(data=post_pred))
Edit 1:
I redesigned the custom model to include the custom distribution, however, I keep on getting error based on the equations used to get the likelihood distribution or sometimes tensor locks down and the code just freeze. Find my code below,
with pm.Model() as model:
# Prior Distributions for unknown model parameters:
mu = pm.Normal('mu', mu=0, sigma=1)
sd = pm.HalfNormal('sd', sigma=1)
nc = pm.HalfNormal('nc', sigma=10)
nu = pm.HalfNormal('nu', sigma=1)
# Custom distribution:
# observed_data = pm.DensityDist('observed_data', NonCentralStudentT, observed=data_list)
# Observed data is from a Likelihood distributions (Likelihood (sampling distribution) of observations):
observed_data = NonCentralStudentT('observed_data', mu=mu, sd=sd, nc=nc, nu=nu, observed=data_list)
# draw 5000 posterior samples
trace_S = pm.sample(draws=5000, tune=2000, chains=3, cores=1)
# Obtaining Posterior Predictive Sampling:
post_pred_S = pm.sample_posterior_predictive(trace_S, samples=3000)
print(post_pred_S['observed_data'].shape)
print('\nSummary: ')
print(pm.stats.summary(data=trace_S))
print(pm.stats.summary(data=post_pred_S))
Edit 2:
I am looking online in order to convert the function to theano, the only thing that I found to define the function is from the following GitHub link hyp1f1 function GitHub
Will this be enough to use in order to convert the function into theano?
In addition, I have a question, it is okay to use NumPy arrays with theano?
Also, I thought of another way but I am not sure if this can be implemented, I looked into the nct function in scipy and they wrote the following,
If Y is a standard normal random variable and V is an independent
chi-square random variable ( chi2 ) with k degrees of freedom, then
X=(Y+c) / sqrt(V/k)
has a non-central Student’s t distribution on the real line. The
degrees of freedom parameter k (denoted df in the implementation)
satisfies k>0 and the noncentrality parameter c (denoted nc in the
implementation) is a real number.
The probability density above is defined in the “standardized” form.
To shift and/or scale the distribution use the loc and scale
parameters. Specifically, nct.pdf(x, df, nc, loc, scale) is
identically equivalent to nct.pdf(y, df, nc) / scale with y = (x -
loc) / scale .
So, I thought of only using the priors as normal and chi2 random variables code part in their distributions and use the degree of freedom variable as mentioned before in the code into the equation mentioned in SciPy, will it be enough to get the distribution?
Edit 3:
I managed to run the code in the link about fitting empirical distribution and found out the second best was the student t distribution, so, I will be using this. Thank you for your help. I just have a side question, I ran my model with student t distribution but I got these warnings:
There were 52 divergences after tuning. Increase target_accept or
reparameterize. The acceptance probability does not match the target.
It is 0.7037574708196309, but should be close to 0.8. Try to increase
the number of tuning steps. The number of effective samples is smaller
than 10% for some parameters.
I am just confused about these warnings, Do you have any idea what it means? I know that this won't affect my code, but, I can reduce the divergences? and regarding the effective samples, Do I need to increase the number of samples in the trace code?
I'm almost new to Python and I'm trying to fit data from college using lmfit. The Y variable has a variable error of 3%. How do I add that error to the fitting process? I am changing from scipy's curve fit and in scipy it was really easy to do so, just creating an array with the error values and specifying the error when fitting by adding the text "sigma = [yourarray]"
This is my current code:
from lmfit import Minimizer, Parameters, report_fit
import matplotlib.pyplot as plt
w1, V1, phi1, scal1 = np.loadtxt("./FiltroPasaBajo_1.txt", delimiter = "\t", unpack = True)
t = w1
eV= V1*0.03 + 0.01
def funcion(parametros, x, y):
R = parametros['R'].value
C = parametros['C'].value
modelo = 4/((1+(x**2)*(R**2)*(C**2))**1/2)
return modelo - y
parametros = Parameters()
parametros.add('R', value = 1000, min= 900, max = 1100)
parametros.add('C', value = 1E-6, min = 1E-7, max = 1E-5)
fit = Minimizer(funcion, parametros, fcn_args=(t,V1))
resultado = fit.minimize()
final = V1 + resultado.residual
report_fit(resultado)
try:
plt.plot(t, V1, 'k+')
plt.plot(t, final, 'r')
plt.show()
except ImportError:
pass
V1 are the values I measured, and eV would be the array of errors. t is the x coordinate.
Thank you for your time
The minimize() function minimizes an array in the least-square sense, adjusting the variable parameters in order to minimize (resid**2).sum() for the resid array returned by your objective function. It really does not know anything about the uncertainties in your data or even about your data. To use the uncertainties in your fit, you need to pass in your array eV just as you pass in t and V1 and then use that in your calculation of the array to be minimized.
One typically wants to minimize Sum[ (data-model)^2/epsilon^2 ], where epsilon is the uncertainty in the data (your eV), so the residual array should be altered from data-model to (data-model)/epsilon. For your fit, you would want
def funcion(parametros, x, y, eps):
R = parametros['R'].value
C = parametros['C'].value
modelo = 4/((1+(x**2)*(R**2)*(C**2))**1/2)
return (modelo - y)/eps
and then use this with
fit = Minimizer(funcion, parametros, fcn_args=(t, V1, eV))
resultado = fit.minimize()
...
If you use the lmfit.Model interface (designed for curve-fitting), then you could pass in weights array that multiplies data -model, and so would be 1.0 / eV to represent weighting for uncertainties (as above with minimize). Using the lmfit.Model interface and providing uncertainties would then look like this:
from lmfit import Model
# model function, to model the data
def func(t, r, c):
return 4/((1+(t**2)*(r**2)*(c**2))**1/2)
model = Model(func)
parametros = model.make_params(r=1000, c=1.e-6)
parametros['r'].set(min=900, max=1100)
parametros['c'].set(min=1.e-7, max=1.e-5)
resultado = model.fit(V1, parametros, t=t, weights=1.0/eV)
print(resultado.fit_report())
plt.errorbar(t, V1, eV, 'k+', label='data')
plt.plot(t, resultado.best_fit, 'r', label='fit')
plt.legend()
plt.show()
hope that helps....
I think you cannot provide sigma in fit.minimize() directly.
However I see that fit.minimize() uses scipy's leastsq method (by default) which is the same method used by scipy's curve_fit.
If you look into scipy's curve_fit source, it does following with the sigma (for 1-d case).
transform = 1.0 / sigma
jac = _wrap_jac(jac, xdata, transform)
res = leastsq(func, p0, Dfun=jac, full_output=1, **kwargs)
Since fit.minimize() allows you to pass kwargs (Dfun) for leastsq, you can pass the jac the way it is done in scipy curve_fit.
Currently, I am trying to solve a problem from astrophysics which can be simplified as following :
I wanted to fit a linear model (say y = a + b*x) to observed data, and I wish to use PyMC to characterize posterior of a and b in discrete grid parameter space like in this figure:
I know PyMC has DiscreteMetropolis class to find posterior in discrete space, but that's in integer space, not in custom discrete space. So I am thinking to define a potential to force PyMC to search in the grid, but not working well...Can anyone help with this? or Anyone has solved a similar problem? Any thoughts will be greatly appreciated :)
Here is my draft code, commented out potential class is my idea to force PyMC to search in the grid:
import sys
import matplotlib.pyplot as plt
import numpy as np
from scipy import stats
import pymc
#------------------------------------------------------------
# Generate the data
size = 200
slope_true = 12.3
y_intercept_true = 22.4
x = np.linspace(0, 1, size)
# y = a + b*x
y_true = y_intercept_true + slope_true * x
# add noise
y = y_true + np.random.normal(scale=.03, size=size)
# Define searching parameter space
# Note: this is discrete but not in the form of integer
slope_search_space = np.linspace(1,30,51)
y_intercept_search_space = np.linspace(1,30,51)
#------------------------------------------------------------
#Start initializing PyMC
#pymc.stochastic(dtype=int)
def slope(value = 5, t_l=1, t_h=30):
"""The switchpoint for the rate of disaster occurrence."""
def logp(value, t_l, t_h):
if value > t_h or value < t_l:
return -np.inf
else:
return -np.log(t_h - t_l + 1)
##pymc.potential
#def slope_prior(val=slope,t_l=-30, t_h=30):
# if val not in slope_search_space:
# return -np.inf
# return -np.log(t_h - t_l + 1)
#---
#pymc.stochastic(dtype=int)
def y_intercept(value=4, t_l=1, t_h=30):
"""The switchpoint for the rate of disaster occurrence."""
def logp(value, t_l, t_h):
if value > t_h or value < t_l:
return -np.inf
else:
return -np.log(t_h - t_l + 1)
##pymc.potential
#def y_intercept_prior(val=y_intercept,t_l=-30, t_h=30):
# if val not in y_intercept_search_space:
# return -np.inf
# return -np.log(t_h - t_l + 1)
# Define observed data
#pymc.deterministic
def mu(x=x, slope=slope, y_intercept=y_intercept):
# Linear age-price model
return y_intercept + slope*x
# Sampling distribution of prices
p = pymc.Poisson('p', mu, value=y, observed=True)
model = dict(slope=slope, y_intercept=y_intercept, mu=mu, p=p)
#-----------------------------------------------------------
# perform the MCMC
M = pymc.MCMC(model)
trace = M.sample(iter=10000,burn=5000)
#Plot
pymc.Matplot.plot(M)
plt.figure()
pymc.Matplot.summary_plot([M.slope,M.y_intercept])
plt.show()
I managed to solve my problem a few days ago. And to my surprise, some of my astronomy friends in Facebook group are also interested in this question, so I think it might be useful to post my solution just in case other people are having the same issue. Please note, this solution may not be the best way to tackle this problem, in fact, I believed there's more elegant way. But for now, this is the best I can come up with. Hope this is helpful to some of you.
The way I solve the problem is very straightforward, and I summarized as follow
1> Define slope, y_intercept stochastic variable in continuous form (PyMC then will use Metropolis to do sampling)
2> Define a function find_nearest to map continuous random variable slope, y_intercept to Grid e.g. Grid_slope=np.array([1,2,3,4,…51]), slope=4.678, then find_nearest(Grid_slope, slope) will return 5, as slope value is closest to 5 in the Grid_slope. Similarly to y_intercept variable.
3> When compute the likelihood, this is where I do the trick, I applied the find_nearest function to model in likelihood function i.e. to change model(slope, y_intercept) to model(find_nearest(Grid_slope, slope), find_nearest(Grid_y_intercept, y_intercept)), which will compute likelihood only upon Grid parameter space.
4> The trace returned for slope and y_intercept by PyMC may not be strictly Grid value, you can use find_nearest function to map trace to Grid value, and then making any statistical inference from it. For my case, I just use the trace straightaway to get statistics, and the result is nice :)
import sys
import matplotlib.pyplot as plt
import numpy as np
from scipy import stats
import pymc
#------------------------------------------------------------
# Generate the data
size = 200
slope_true = 12.3
y_intercept_true = 22.4
x = np.linspace(0, 1, size)
# y = a + b*x
y_true = y_intercept_true + slope_true * x
# add noise
y = y_true + np.random.normal(scale=.03, size=size)
# Define searching parameter space
# Note: this is discrete but not in the form of integer
slope_search_space = np.linspace(1,30,51)
y_intercept_search_space = np.linspace(1,30,51)
#------------------------------------------------------------
#Start initializing PyMC
from pymc import Normal, Gamma, deterministic, MCMC, Matplot, Uniform
# Constant priors for parameters
slope = Uniform('slope', 1, 30)
y_intercept = Uniform('y_intp', 1, 30)
# Precision of normal distribution of y value
tau = Uniform('tau',0,10000 )
#deterministic
def mu(x=x,slope=slope, y_intercept=y_intercept):
def find_nearest(array,value):
"""
This function maps 'value' to the nearest point in 'array'
"""
idx = (np.abs(array-value)).argmin()
return array[idx]
# Linear model
iso = find_nearest(y_intercept_search_space,y_intercept) + find_nearest(slope_search_space,slope)*x
return iso
# Sampling distribution of y
p = Normal('p', mu, tau, value=y, observed=True)
model = dict(slope=slope, y_intercept=y_intercept,tau=tau, mu=mu, p=p)
#-----------------------------------------------------------
# perform the MCMC
M = pymc.MCMC(model)
trace = M.sample(40000,20000)
#Plot
pymc.Matplot.plot(M)
M.slope.summary()
M.y_intercept.summary()
plt.figure()
pymc.Matplot.summary_plot([M.slope,M.y_intercept])
plt.show()
I would like to fit ellipsometric data to complex model using LMFit. Two measured parameters, psi and delta, are variables in a complex function rho.
I could try with separating problem to real and imaginary part with shared parameters or piecewise approach, but is there any way to do it directly with complex function?
Fitting only real part of function works beautifully, but when I define complex residual function I get:
TypeError: no ordering relation is defined for complex numbers.
Below is my code for real function fitting and my attempt at tackling complex fit problem:
from __future__ import division
from __future__ import print_function
import numpy as np
from pylab import *
from lmfit import minimize, Parameters, Parameter, report_errors
#=================================================================
# MODEL
def r01_p(eps2, th):
c=cos(th)
s=(sin(th))**2
stev= sqrt(eps2) * c - sqrt(1-(s / eps2))
imen= sqrt(eps2) * c + sqrt(1-(s / eps2))
return stev/imen
def r01_s(eps2, th):
c=cos(th)
s=(sin(th))**2
stev= c - sqrt(eps2) * sqrt(1-(s/eps2))
imen= c + sqrt(eps2) * sqrt(1-(s/eps2))
return stev/imen
def rho(eps2, th):
return r01_p(eps2, th)/r01_s(eps2, th)
def psi(eps2, th):
x1=abs(r01_p(eps2, th))
x2=abs(r01_s(eps2, th))
return np.arctan2(x1,x2)
#=================================================================
# REAL FIT
#
#%%
# generate data from model
th=linspace(deg2rad(45),deg2rad(70),70-45)
error=0.01
var_re=np.random.normal(size=len(th), scale=error)
data = psi(2,th) + var_re
# residual function
def residuals(params, th, data):
eps2 = params['eps2'].value
diff = psi(eps2, th) - data
return diff
# create a set of Parameters
params = Parameters()
params.add('eps2', value= 1.0, min=1.5, max=3.0)
# do fit, here with leastsq model
result = minimize(residuals, params, args=(th, data),method="leastsq")
# calculate final result
final = data + result.residual
# write error report
report_errors(params)
# try to plot results
th, data, final=rad2deg([th, data, final])
try:
import pylab
clf()
fig=plot(th, data, 'r o',
th, final, 'b')
setp(fig,lw=2.)
xlabel(r'$\theta$ $(^{\circ})$', size=20)
ylabel(r'$\psi$ $(^{\circ})$',size=20)
show()
except:
pass
#%%
#=================================================================
# COMPLEX FIT
# TypeError: no ordering relation is defined for complex numbers
"""
# data from model with added noise
th=linspace(deg2rad(45),deg2rad(70),70-45)
error=0.001
var_re=np.random.normal(size=len(th), scale=error)
var_im=np.random.normal(size=len(th), scale=error) * 1j
data = rho(4-1j,th) + var_re + var_im
# residual function
def residuals(params, th, data):
eps2 = params['eps2'].value
diff = rho(eps2, th) - data
return np.abs(diff)
# create a set of Parameters
params = Parameters()
params.add('eps2', value= 1.5+1j, min=1+1j, max=3+3j)
# do fit, here with leastsq model
result = minimize(residuals, params, args=(th, data),method="leastsq")
# calculate final result
final = data + result.residual
# write error report
report_errors(params)
"""
#=================================================================
Edit:
I solved problem with separated variables for imaginary and real part. Data should be shaped as [[imaginary_data],[real_data]], objective function must return 1D array.
def objective(params, th_data, data):
eps_re = params['eps_re'].value
eps_im = params['eps_im'].value
d = params['d'].value
residual_delta = data[0,:] - delta(eps_re - eps_im*1j, d, frac, lambd, th_data)
residual_psi = data[1,:] - psi(eps_re - eps_im*1j, d, frac, lambd, th_data)
return np.append(residual_delta,residual_psi)
# create a set of Parameters
params = Parameters()
params.add('eps_re', value= 1.5, min=1.0, max=5 )
params.add('eps_im', value= 1.0, min=0.0, max=5 )
params.add('d', value= 10.0, min=5.0, max=100.0 )
# All available methods
methods=['leastsq','nelder','lbfgsb','anneal','powell','cobyla','slsqp']
# Chosen method
#metoda='leastsq'
# run the global fit to all the data sets
result = minimize(objective, params, args=(th_data,data),method=metoda))
....
return ...
The lmfit FAQ suggests simply taking both real and imaginary parts by using numpy.ndarray.view, which means you don't need to go through the separation of the real and imaginary parts manually.
def residuals(params, th, data):
eps2 = params['eps2'].value
diff = rho(eps2, th) - data
# The only change required is to use view instead of abs.
return diff.view()
I am having some trouble translating my MATLAB code into Python via Scipy & Numpy. I am stuck on how to find optimal parameter values (k0 and k1) for my system of ODEs to fit to my ten observed data points. I currently have an initial guess for k0 and k1. In MATLAB, I can using something called 'fminsearch' which is a function that takes the system of ODEs, the observed data points, and the initial values of the system of ODEs. It will then calculate a new pair of parameters k0 and k1 that will fit the observed data. I have included my code to see if you can help me implement some kind of 'fminsearch' to find the optimal parameter values k0 and k1 that will fit my data. I want to add whatever code to do this to my lsqtest.py file.
I have three .py files - ode.py, lsq.py, and lsqtest.py
ode.py:
def f(y, t, k):
return (-k[0]*y[0],
k[0]*y[0]-k[1]*y[1],
k[1]*y[1])
lsq.py:
import pylab as py
import numpy as np
from scipy import integrate
from scipy import optimize
import ode
def lsq(teta,y0,data):
#INPUT teta, the unknowns k0,k1
# data, observed
# y0 initial values needed by the ODE
#OUTPUT lsq value
t = np.linspace(0,9,10)
y_obs = data #data points
k = [0,0]
k[0] = teta[0]
k[1] = teta[1]
#call the ODE solver to get the states:
r = integrate.odeint(ode.f,y0,t,args=(k,))
#the ODE system in ode.py
#at each row (time point), y_cal has
#the values of the components [A,B,C]
y_cal = r[:,1] #separate the measured B
#compute the expression to be minimized:
return sum((y_obs-y_cal)**2)
lsqtest.py:
import pylab as py
import numpy as np
from scipy import integrate
from scipy import optimize
import lsq
if __name__ == '__main__':
teta = [0.2,0.3] #guess for parameter values k0 and k1
y0 = [1,0,0] #initial conditions for system
y = [0.000,0.416,0.489,0.595,0.506,0.493,0.458,0.394,0.335,0.309] #observed data points
data = y
resid = lsq.lsq(teta,y0,data)
print resid
For these kind of fitting tasks you could use the package lmfit. The outcome of the fit would look like this; as you can see, the data are reproduced very well:
For now, I fixed the initial concentrations, you could also set them as variables if you like (just remove the vary=False in the code below). The parameters you obtain are:
[[Variables]]
x10: 5 (fixed)
x20: 0 (fixed)
x30: 0 (fixed)
k0: 0.12183301 +/- 0.005909 (4.85%) (init= 0.2)
k1: 0.77583946 +/- 0.026639 (3.43%) (init= 0.3)
[[Correlations]] (unreported correlations are < 0.100)
C(k0, k1) = 0.809
The code that reproduces the plot looks like this (some explanation can be found in the inline comments):
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint
from lmfit import minimize, Parameters, Parameter, report_fit
from scipy.integrate import odeint
def f(y, t, paras):
"""
Your system of differential equations
"""
x1 = y[0]
x2 = y[1]
x3 = y[2]
try:
k0 = paras['k0'].value
k1 = paras['k1'].value
except KeyError:
k0, k1 = paras
# the model equations
f0 = -k0 * x1
f1 = k0 * x1 - k1 * x2
f2 = k1 * x2
return [f0, f1, f2]
def g(t, x0, paras):
"""
Solution to the ODE x'(t) = f(t,x,k) with initial condition x(0) = x0
"""
x = odeint(f, x0, t, args=(paras,))
return x
def residual(paras, t, data):
"""
compute the residual between actual data and fitted data
"""
x0 = paras['x10'].value, paras['x20'].value, paras['x30'].value
model = g(t, x0, paras)
# you only have data for one of your variables
x2_model = model[:, 1]
return (x2_model - data).ravel()
# initial conditions
x10 = 5.
x20 = 0
x30 = 0
y0 = [x10, x20, x30]
# measured data
t_measured = np.linspace(0, 9, 10)
x2_measured = np.array([0.000, 0.416, 0.489, 0.595, 0.506, 0.493, 0.458, 0.394, 0.335, 0.309])
plt.figure()
plt.scatter(t_measured, x2_measured, marker='o', color='b', label='measured data', s=75)
# set parameters including bounds; you can also fix parameters (use vary=False)
params = Parameters()
params.add('x10', value=x10, vary=False)
params.add('x20', value=x20, vary=False)
params.add('x30', value=x30, vary=False)
params.add('k0', value=0.2, min=0.0001, max=2.)
params.add('k1', value=0.3, min=0.0001, max=2.)
# fit model
result = minimize(residual, params, args=(t_measured, x2_measured), method='leastsq') # leastsq nelder
# check results of the fit
data_fitted = g(np.linspace(0., 9., 100), y0, result.params)
# plot fitted data
plt.plot(np.linspace(0., 9., 100), data_fitted[:, 1], '-', linewidth=2, color='red', label='fitted data')
plt.legend()
plt.xlim([0, max(t_measured)])
plt.ylim([0, 1.1 * max(data_fitted[:, 1])])
# display fitted statistics
report_fit(result)
plt.show()
If you have data for additional variables, you can simply update the function residual.
The following worked for me:
import pylab as pp
import numpy as np
from scipy import integrate, interpolate
from scipy import optimize
##initialize the data
x_data = np.linspace(0,9,10)
y_data = np.array([0.000,0.416,0.489,0.595,0.506,0.493,0.458,0.394,0.335,0.309])
def f(y, t, k):
"""define the ODE system in terms of
dependent variable y,
independent variable t, and
optinal parmaeters, in this case a single variable k """
return (-k[0]*y[0],
k[0]*y[0]-k[1]*y[1],
k[1]*y[1])
def my_ls_func(x,teta):
"""definition of function for LS fit
x gives evaluation points,
teta is an array of parameters to be varied for fit"""
# create an alias to f which passes the optional params
f2 = lambda y,t: f(y, t, teta)
# calculate ode solution, retuen values for each entry of "x"
r = integrate.odeint(f2,y0,x)
#in this case, we only need one of the dependent variable values
return r[:,1]
def f_resid(p):
""" function to pass to optimize.leastsq
The routine will square and sum the values returned by
this function"""
return y_data-my_ls_func(x_data,p)
#solve the system - the solution is in variable c
guess = [0.2,0.3] #initial guess for params
y0 = [1,0,0] #inital conditions for ODEs
(c,kvg) = optimize.leastsq(f_resid, guess) #get params
print "parameter values are ",c
# fit ODE results to interpolating spline just for fun
xeval=np.linspace(min(x_data), max(x_data),30)
gls = interpolate.UnivariateSpline(xeval, my_ls_func(xeval,c), k=3, s=0)
#pick a few more points for a very smooth curve, then plot
# data and curve fit
xeval=np.linspace(min(x_data), max(x_data),200)
#Plot of the data as red dots and fit as blue line
pp.plot(x_data, y_data,'.r',xeval,gls(xeval),'-b')
pp.xlabel('xlabel',{"fontsize":16})
pp.ylabel("ylabel",{"fontsize":16})
pp.legend(('data','fit'),loc=0)
pp.show()
Look at the scipy.optimize module. The minimize function looks fairly similar to fminsearch, and I believe that both basically use a simplex algorithm for optimization.
# cleaned up a bit to get my head around it - thanks for sharing
import pylab as pp
import numpy as np
from scipy import integrate, optimize
class Parameterize_ODE():
def __init__(self):
self.X = np.linspace(0,9,10)
self.y = np.array([0.000,0.416,0.489,0.595,0.506,0.493,0.458,0.394,0.335,0.309])
self.y0 = [1,0,0] # inital conditions ODEs
def ode(self, y, X, p):
return (-p[0]*y[0],
p[0]*y[0]-p[1]*y[1],
p[1]*y[1])
def model(self, X, p):
return integrate.odeint(self.ode, self.y0, X, args=(p,))
def f_resid(self, p):
return self.y - self.model(self.X, p)[:,1]
def optim(self, p_quess):
return optimize.leastsq(self.f_resid, p_guess) # fit params
po = Parameterize_ODE(); p_guess = [0.2, 0.3]
c, kvg = po.optim(p_guess)
# --- show ---
print "parameter values are ", c, kvg
x = np.linspace(min(po.X), max(po.X), 2000)
pp.plot(po.X, po.y,'.r',x, po.model(x, c)[:,1],'-b')
pp.xlabel('X',{"fontsize":16}); pp.ylabel("y",{"fontsize":16}); pp.legend(('data','fit'),loc=0); pp.show()