CAR model from pymc2 to PyMC3 - python

I'm still a noob in PyMC3, so the question might me naive, but I don't know how to translate this pymc2 code in pymc3. In particular it's not clear to me how to translate the R function.
beta = pymc.Normal('beta', mu=0, tau=1.0e-4)
s = pymc.Uniform('s', lower=0, upper=1.0e+4)
tau = pymc.Lambda('tau', lambda s=s: s**(-2))
### Intrinsic CAR
#pymc.stochastic
def R(tau=tau, value=np.zeros(N)):
# Calculate mu based on average of neighbors
mu = np.array([sum(W[i]*value[A[i]])/Wplus[i] for i in xrange(N)])
# Scale precision to the number of neighbors
taux = tau*Wplus
return pymc.normal_like(value, mu, taux)
#pymc.deterministic
def M(beta=beta, R=R):
return [np.exp(beta + R[i]) for i in xrange(N)]
obsvd = pymc.Poisson("obsvd", mu=M, value=Y, observed=True)
model = pymc.Model([s, beta, obsvd])
Code from https://github.com/Youki/statistical-modeling-for-data-analysis-with-python/blob/945c13549a872d869e33bc48082c42efc022a07b/Chapter11/Chapter11.rst, and http://glau.ca/?p=340
Can you help me? Thanks

In PyMC3, you can implement the CAR model using the scan function of Theano. There is a sample code in their documentation. There are two implementations for CAR in the linked document. Here is the first one [Source]:
from theano import scan
floatX = "float32"
from pymc3.distributions import continuous
from pymc3.distributions import distribution
class CAR(distribution.Continuous):
"""
Conditional Autoregressive (CAR) distribution
Parameters
----------
a : list of adjacency information
w : list of weight information
tau : precision at each location
"""
def __init__(self, w, a, tau, *args, **kwargs):
super(CAR, self).__init__(*args, **kwargs)
self.a = a = tt.as_tensor_variable(a)
self.w = w = tt.as_tensor_variable(w)
self.tau = tau*tt.sum(w, axis=1)
self.mode = 0.
def get_mu(self, x):
def weigth_mu(w, a):
a1 = tt.cast(a, 'int32')
return tt.sum(w*x[a1])/tt.sum(w)
mu_w, _ = scan(fn=weigth_mu,
sequences=[self.w, self.a])
return mu_w
def logp(self, x):
mu_w = self.get_mu(x)
tau = self.tau
return tt.sum(continuous.Normal.dist(mu=mu_w, tau=tau).logp(x))
with pm.Model() as model1:
# Vague prior on intercept
beta0 = pm.Normal('beta0', mu=0.0, tau=1.0e-5)
# Vague prior on covariate effect
beta1 = pm.Normal('beta1', mu=0.0, tau=1.0e-5)
# Random effects (hierarchial) prior
tau_h = pm.Gamma('tau_h', alpha=3.2761, beta=1.81)
# Spatial clustering prior
tau_c = pm.Gamma('tau_c', alpha=1.0, beta=1.0)
# Regional random effects
theta = pm.Normal('theta', mu=0.0, tau=tau_h, shape=N)
mu_phi = CAR('mu_phi', w=wmat, a=amat, tau=tau_c, shape=N)
# Zero-centre phi
phi = pm.Deterministic('phi', mu_phi-tt.mean(mu_phi))
# Mean model
mu = pm.Deterministic('mu', tt.exp(logE + beta0 + beta1*aff + theta + phi))
# Likelihood
Yi = pm.Poisson('Yi', mu=mu, observed=O)
# Marginal SD of heterogeniety effects
sd_h = pm.Deterministic('sd_h', tt.std(theta))
# Marginal SD of clustering (spatial) effects
sd_c = pm.Deterministic('sd_c', tt.std(phi))
# Proportion sptial variance
alpha = pm.Deterministic('alpha', sd_c/(sd_h+sd_c))
trace1 = pm.sample(1000, tune=500, cores=4,
init='advi',
nuts_kwargs={"target_accept":0.9,
"max_treedepth": 15})
The M function is written here as:
mu = pm.Deterministic('mu', tt.exp(logE + beta0 + beta1*aff + theta + phi))

Related

What does the locality linear coding function do?

I got this code for spectral clustering.
https://github.com/BirdYin/scllc/blob/master/scllc.py
This is a landmark-based spectral clustering code.
What does the locality_linear_coding function do in this code?
class Scllc:
def __locality_linear_coding(self, data, neighbors):
indicator = np.ones([neighbors.shape[0], 1])
penalty = np.eye(self.n_neighbors)
# Get the weights of every neighbors
z = neighbors - indicator.dot(data.reshape(-1,1).T)
local_variance = z.dot(z.T)
local_variance = local_variance + self.lambda_val * penalty
weights = scipy.linalg.solve(local_variance, indicator)
weights = weights / np.sum(weights)
weights = weights / np.sum(np.abs(weights))
weights = np.abs(weights)
return weights.reshape(self.n_neighbors)
def fit(self, X):
[n_data, n_dim] = X.shape
# Select landmarks
if self.func_landmark == 'kmeans':
landmarks, centers, unknown = k_means(X, self.n_landmarks, n_init=1, max_iter=100)
nbrs = NearestNeighbors(metric='euclidean').fit(landmarks)
# Create properties of the sparse matrix Z
[dist, indy] = nbrs.kneighbors(X, n_neighbors = self.n_neighbors)
indx = np.ones([n_data, self.n_neighbors]) * np.asarray(range(n_data))[:, None]
valx = np.zeros([n_data, self.n_neighbors])
self.delta = np.mean(valx)
# Compute all the coded data
for index in range(n_data):
# Compute the weights of its neighbors
localmarks = landmarks[indy[index,:], :]
weights = self.__locality_linear_coding(X[index,:], localmarks)
# Compute the coded data
valx[index] = weights
# Construct sparse matrix
indx = indx.reshape(n_data * self.n_neighbors)
indy = indy.reshape(n_data * self.n_neighbors)
valx = valx.reshape(n_data * self.n_neighbors)
Z = sparse.coo_matrix((valx,(indx,indy)),shape=(n_data,self.n_landmarks))
Z = Z / np.sqrt(np.sum(Z, 0))
# Get first k eigenvectors
[U, Sigma, V] = svds(Z, k = self.n_clusters + 1)
U = U[:, 0:self.n_clusters]
embedded_data = U / np.sqrt(np.sum(U * U, 0))
You can see the documentation of numpy module to deal with n-dimensional array
.For exemple, the dot method do the product of the matrices
Than They have use the scipy module, you can also see the documentation on internet.
the first function of a class is always an initialize method. Because the user have to call it to fully use the class. It is the first function where are defined and saved all the variables that the user want

Bayesian Calibration with PyMC3, Kennedy O'Hagan

I'm quite new to probabilistic programming and pymc3...
Currently, I want to implement the Kennedy-O’Hagan framework in pymc3.
The setup is according to the paper of Kennedy and O'Hagan as follows:
We have n observations zi of the form
zi = f(xi , theta) + g(xi) + ei,
where xi are known imputs and theta are unknown calibration parameters and ei are iid error terms. We also have m model evaluations yj of the form
yj = f(x'j, thetaj), where both x'j (different than xi above) and thetaj are known. Therefore, the data consists of all zi and yj. In the paper, Kennedy-O'Hagan model f, g using gaussian processes:
f ~ GP{m1 (.,.), Sigma1[(.,.),(.,.)] }
g ~ GP{m2 (.), Sigma2[(.),(.)] }
Among other things, the goal is to get posterior samples for the unknow calibration parameters theta.
What I've done so far:
import pymc3 as pm
import numpy as np
import matplotlib.pyplot as plt
from multiprocessing import freeze_support
import sys
import theano
import theano.tensor as tt
from mpl_toolkits.mplot3d import Axes3D
import pyDOE
from scipy.stats.distributions import uniform
def physical_system(x):
return 0.65 * x / (1 + x / 5)
def observation(x):
return physical_system(x[:]) + np.random.normal(0,0.01,len(x))
def computational_system(input):
return input[:,0]*input[:,1]
if __name__ == "__main__":
freeze_support()
# observations with noise
x_obs = np.linspace(0,4,10)
y_real = physical_system(x_obs[:])
y_obs = observation(x_obs[:])
# computation model
N = 60
design = pyDOE.lhs(2, samples=N, criterion='center')
left = [-0.2,-0.2]; right = [4.2,1.2]
for i in range(2):
design[:,i] = uniform(loc=left[i],scale=right[i]-left[i]).ppf(design[:,i])
x_comp = design[:,0][:,None]; t_comp = design[:,1][:,None]
input_comp = np.hstack((x_comp,t_comp))
y_comp = computational_system(input_comp)
x_obs_shared = theano.shared(x_obs[:, None])
with pm.Model() as model:
noise = pm.HalfCauchy('noise',beta=5)
ls_1 = pm.Gamma('ls_1', alpha=1, beta=1, shape=2)
cov = pm.gp.cov.ExpQuad(2,ls=ls_1)
f = pm.gp.Marginal(cov_func=cov)
# train the gp f with data from computer model:
f_0 = f.marginal_likelihood('f_0', X=input_comp, y=y_comp, noise=noise)
trace = pm.sample(500, pm.Metropolis(), chains=4)
burned_trace = trace[300:]
Until here, everything is fine. My GP f is trained according the computer model.
Now, I want to test if I could fit this trained GP to my observed data:
#gp f is now trained to data from computer model
#now I want to fit this trained gp to observed data and find posterior for theta
with model:
sd = pm.Gamma('eta', alpha=1, beta=1)
theta = pm.Normal('theta', mu=0, sd=sd)
sigma = pm.Gamma('sigma', alpha=1, beta=1)
input_1 = tt.concatenate([x_obs_shared, tt.tile(theta, len(x_obs[:,None]), ndim=2).T], axis=1)
f_1 = gp1.conditional('f_1', Xnew=input_1, shape=(10,))
y_ = pm.Normal('y_', mu=f_1,sd=sigma, observed=y_obs)
step = pm.Metropolis()
trace_ = pm.sample(30000, step,start=pm.find_MAP(), chains=4)
Is this formulation correct? I get very unstable results...
The full formulation according KOH should be something like this:
with pm.Model() as model:
theta = pm.Normal('theta', mu=0, sd=10)
noise = pm.HalfCauchy('noise',beta=5)
ls_1 = pm.Gamma('ls_1', alpha=1, beta=1, shape=2)
cov = pm.gp.cov.ExpQuad(2,ls=ls_1)
gp1 = pm.gp.Marginal(cov_func=cov)
gp2 = pm.gp.Marginal(cov_func=cov)
gp = gp1 + gp2
input_1 = tt.concatenate([x_obs_shared, tt.tile(theta, len(x_obs), ndim=2).T], axis=1)
f_0 = gp1.marginal_likelihood('f_0', X=input_comp, y=y_comp, noise=noise)
f_1 = gp1.marginal_likelihood('f_1', X=input_1, y=y_obs, noise=noise)
f = gp.marginal_likelihood('f', X=input_1, y=y_obs, noise=noise)
Could somebody give me some advise how to formulate the KOH properly with pymc3? I am desperate... Would appreciate any help. Thank you!
You might have found the solution but if not, that's a good one (Guidelines for the Bayesian calibration of building energy models)

Pymc reading observations

I am using Pymc to run a Gibbs sampler on a simple model with the data set as a list with 110 elements (55 observations in each dimension).
log y[i,j,k] = alpha[i,k] + beta[j,k] + mu[k]
where log y follows a multivariate normal distribution (because k = 2) with some covariance matrix that is modeled as rho, sigma1 and sigma2.
After taking log-transformation, the data becomes a list of 110 numbers ranging from 6 to 15.
This is the piece of code that I have used:
import pymc as pm
from pymc import Normal, Uniform, MvNormal, Exponential, Gamma,InverseGamma
from pymc import MCMC
mu = np.zeros(2, dtype=object)
alpha = np.zeros([10,2], dtype = object)
beta = np.zeros([10,2], dtype = object)
for k in range(2):
mu[k] = Normal('mu_{}'.format(k), 0,1000)
for i in range(0,10):
alpha[i][k] = Normal('alpha_{}_{}'.format(i,k), 0, 1000)
beta[i][k] = Normal('beta_{}_{}'.format(i,k), 0, 1000)
rho = Uniform('rho', lower = -1, upper = 1)
sigma1 = InverseGamma('sigma1', 2.0001,1) #sigma squared
sigma2 = InverseGamma('sigma2', 2.0001,1)
#pm.deterministic
def PRECISION():
PREC = [[sigma2/(sigma1*sigma2*(1-rho)),(-rho*
(sigma1*sigma2)**0.5)/(sigma1*sigma2*(1-rho))],[(-rho*
(sigma1*sigma2)**0.5)/(sigma1*sigma2*(1-rho)), sigma1/(sigma1*sigma2*(1-
rho))]]
return PREC
mean = np.zeros([10,10,2])
mean_list_1 = []
mean_list_2 = []
for i in range(10):
for j in range(10):
mean[i,j,0] = mu[0] + alpha[i][0] + beta[j][0]
mean_list_1.append(mean[i,j,0])
mean[i,j,1] = mu[1] + alpha[i][1] + beta[j][1]
mean_list_2.append(mean[i,j,1])
#Restructure the vector
bi_mean = np.zeros(55, dtype = object)
bi_data = np.zeros(55, dtype = object)
log_Y = np.zeros(55, dtype = object)
for i in range(55):
bi_mean[i] = [mean_list_1[i], mean_list_2[i]]
bi_data[i] = [data[i], data[i+55]]
log_Y = [pm.MvNormal('log-Y_{}'.format(i), bi_mean[i], PRECISION, value =
bi_data[i], observed = True) for i in range(55)]
monitor_list = [sigma1, sigma2, rho,mu, alpha, beta,log_Y]
model = MCMC([monitor_list],calc_deviance=True)
model.sample(iter=10000, burn=5000, thin=5)
I tried running it in Pymc but the resulting values of alpha and beta is too small to match the magnitude of the observations. Is there a way that I can check where I go wrong? Thank you.

Bayesian Correlation with PyMC3

I'm trying to convert this example of Bayesian correlation for PyMC2 to PyMC3, but get completely different results. Most importantly, the mean of the multivariate Normal distribution quickly goes to zero, whereas it should be around 400 (as it is for PyMC2). Consequently, the estimated correlation quickly goes towards 1, which is wrong as well.
The full code is available in this notebook for PyMC2 and in this notebook for PyMC3.
The relevant code for PyMC2 is
def analyze(data):
# priors might be adapted here to be less flat
mu = pymc.Normal('mu', 0, 0.000001, size=2)
sigma = pymc.Uniform('sigma', 0, 1000, size=2)
rho = pymc.Uniform('r', -1, 1)
#pymc.deterministic
def precision(sigma=sigma,rho=rho):
ss1 = float(sigma[0] * sigma[0])
ss2 = float(sigma[1] * sigma[1])
rss = float(rho * sigma[0] * sigma[1])
return np.linalg.inv(np.mat([[ss1, rss], [rss, ss2]]))
mult_n = pymc.MvNormal('mult_n', mu=mu, tau=precision, value=data.T, observed=True)
model = pymc.MCMC(locals())
model.sample(50000,25000)
My port of the above code to PyMC3 is as follows:
def precision(sigma, rho):
C = T.alloc(rho, 2, 2)
C = T.fill_diagonal(C, 1.)
S = T.diag(sigma)
return T.nlinalg.matrix_inverse(T.nlinalg.matrix_dot(S, C, S))
def analyze(data):
with pm.Model() as model:
# priors might be adapted here to be less flat
mu = pm.Normal('mu', mu=0., sd=0.000001, shape=2, testval=np.mean(data, axis=1))
sigma = pm.Uniform('sigma', lower=1e-6, upper=1000., shape=2, testval=np.std(data, axis=1))
rho = pm.Uniform('r', lower=-1., upper=1., testval=0)
prec = pm.Deterministic('prec', precision(sigma, rho))
mult_n = pm.MvNormal('mult_n', mu=mu, tau=prec, observed=data.T)
return model
model = analyze(data)
with model:
trace = pm.sample(50000, tune=25000, step=pm.Metropolis())
The PyMC3 version runs, but clearly does not return the expected result. Any help would be highly appreciated.
The call signature of pymc.Normal is
In [125]: pymc.Normal?
Init signature: pymc.Normal(self, *args, **kwds)
Docstring:
N = Normal(name, mu, tau, value=None, observed=False, size=1, trace=True, rseed=True, doc=None, verbose=-1, debug=False)
Notice that the third positional argument of pymc.Normal is tau, not the standard deviation, sd.
Therefore, since the pymc code uses
mu = Normal('mu', 0, 0.000001, size=2)
The corresponding pymc3 code should use
mu = pm.Normal('mu', mu=0., tau=0.000001, shape=2, ...)
or
mu = pm.Normal('mu', mu=0., sd=math.sqrt(1/0.000001), shape=2, ...)
since tau = 1/sigma**2.
With this one change, your pymc3 code produces (something like)

Simplest linear model with PyMC

Say I try to estimate the slope of a simple y= m * x problem using the following data:
x_data = np.array([0,1,2,3])
y_data = np.array([0,1,2,3])
Clearly the slope is 1. However, when I run this in PyMC I get 10
slope = pm.Uniform('slope', lower=0, upper=20)
#pm.deterministic
def y_gen(value=y_data, x=x_data, slope=slope, observed=True):
return slope * x
model = pm.Model([slope])
mcmc = pm.MCMC(model)
mcmc.sample(100000, 5000)
# This returns 10
final_guess = mcmc.trace('slope')[:].mean()
but it should be 1!
Note: The above is with PyMC2.
You need to define a likelihood, try this:
import pymc as pm
import numpy as np
x_data = np.linspace(0,1,100)
y_data = np.linspace(0,1,100)
slope = pm.Normal('slope', mu=0, tau=10**-2)
tau = pm.Uniform('tau', lower=0, upper=20)
#pm.deterministic
def y_gen(x=x_data, slope=slope):
return slope * x
like = pm.Normal('likelihood', mu=y_gen, tau=tau, observed=True, value=y_data)
model = pm.Model([slope, y_gen, like, tau])
mcmc = pm.MCMC(model)
mcmc.sample(100000, 5000)
# This returns 10
final_guess = mcmc.trace('slope')[:].mean()
It returns 10 because you're just sampling from your uniform prior and 10 is the expected value of that.
You need to set value=y_data, observed=True for the likelihood. Also, a minor point, you don't need to instantiate a Model object. Just pass your nodes (or a call to locals()) to MCMC.

Categories