Where is the value in the lambda function coming from? - python

below is the code part from this github repo that I am confused of:
full_promp.py:
.....
......
class ProbInvKinematics:
#params:
#fwd_k: A forward kinematics object
def __laplace_cost_and_grad(self, theta, mu_theta, inv_sigma_theta, mu_x, inv_sigma_x):
print ("theta ",theta)
f_th, jac_th, ori = self.fwd_k.position_and_jac(theta)
jac_th = jac_th[0:3,:]
diff1 = theta - mu_theta
tmp1 = np.dot(inv_sigma_theta, diff1)
diff2 = f_th - mu_x
tmp2 = np.dot(inv_sigma_x, diff2)
nll = 0.5*(np.dot(diff1,tmp1) + np.dot(diff2,tmp2))
grad_nll = tmp1 + np.dot(jac_th.T,tmp2)
return nll, grad_nll
def __init__(self, fwd_kinematics):
self.fwd_k = fwd_kinematics
def inv_kin(self, mu_theta, sig_theta, mu_x, sig_x):
inv_sig_theta = np.linalg.inv(sig_theta)
inv_sig_x = np.linalg.inv(sig_x)
cost_grad = lambda theta: self.__laplace_cost_and_grad(theta, mu_theta, inv_sig_theta, mu_x, inv_sig_x)
cost = lambda theta: cost_grad(theta)[0]
grad = lambda theta: cost_grad(theta)[1]
res = opt.minimize(cost, mu_theta, method='BFGS', jac=grad)
post_mean = res.x
post_cov = res.hess_inv
return post_mean, post_cov
Usage of the class ProbInvKinematics as follow:
import robpy.full_promp as promp
prob_inv_kin = promp.ProbInvKinematics(fwd_kin)
mu_cartesian = np.array([-0.62, -0.44, -0.34])
Sigma_cartesian = 0.02**2*np.eye(3)
mu_q, Sigma_q = prob_inv_kin.inv_kin(mu_theta=prior_mu_q, sig_theta=prior_Sigma_q,
mu_x = mu_cartesian, sig_x = Sigma_cartesian)
I see that the parameter value theta is defined nowhere. But somehow when I try to print out theta in def __laplace_cost_and_grad(), the value is there... What is the logic of using this theta?

Related

Nlopt from R to Python

This is my first time using Python.
I am having trouble passing from R to Python in the Nlopt package
So I am using a Maximum Likelihood Estimator to estimate 4 parameters. In R I programmed two functions: loglikelihood and gradient from the log-likelihood
In R both of my functions are like this:
loglik <- function(par){
g_h <- par[1]; g_c <- par[2]; a_bar <- par[3]; sigma_e <- par[4]
d <- mutate(obs_data,
num = h + ((I-g_c)/w),
den = g_h + ((I-g_c)/w),
eps = (num/den)-a_bar,
arg_1 = 0.5*(eps/sigma_e)^2,
arg_2 = log(abs(den)),
opt = log(sigma_e * sqrt(2*pi)) + arg_1 + arg_2)
LL <- - sum(d$opt)
return(-LL)
}
grad_loglik <- function(par){
g_h <- par[1]; g_c <- par[2]; a_bar <- par[3]; sigma_e <- par[4]
d <- obs_data %>%
mutate(num = h + ((I- g_c)/w),
den = g_h + ((I-g_c)/w),
eps = num/den - a_bar,
eps_gh = -num/(den^2),
eps_gc = (h-g_h)/(w*(den^2)),
ll_gh = -(eps/(sigma_e^2))*eps_gh - 1/den,
ll_gc = -(eps/(sigma_e^2))*eps_gc + 1/(w*den),
ll_abar = eps/(sigma_e^2),
ll_se = -1/sigma_e + (eps^2)/(sigma_e^3))
return(c(-sum(d$ll_gh),
-sum(d$ll_gc),
-sum(d$ll_abar),
-sum(d$ll_se)))
}
So I am trying to minimize the loglik function
The code for using the Nlopt function
opt_parr <- nloptr(x0 = val_i,
eval_f = loglik,
eval_grad_f = grad_loglik,
lb = c(0,0,0,0),
ub = c(24, 100, 1, 1),
opts = list("algorithm" = "NLOPT_LD_LBFGS",
"xtol_rel" = xtol,
"maxeval"= maxev,
"print_level" = 0))
So I translated both of my functions in python:
def loglik(par):
g_h = par[0]; g_c = par[1]; a_bar = par[2]; sigma_e = par[3]
d = obs_data
d = d.assign(num = lambda x: d.h + ((d.I - g_c)/d.w),
den = lambda x: g_h + ((d.I - g_c)/d.w),
eps = lambda x: (d.num/d.den) - a_bar,
arg1 = lambda x: 0.5*(d.eps/sigma_e)**2,
arg2 = lambda x: np.log(np.absolute(d.den)),
opt = lambda x: np.log(sigma_e * np.sqrt(2*np.pi)) + d.arg1 + d.arg2)
LL = -sum(d.opt)
return(-LL)
def grad_loglik(par):
g_h = par[0]; g_c = par[1]; a_bar = par[2]; sigma_e = par[3]
d = obs_data
d = d.assign(num = lambda x: d.h + ((d.I - g_c)/d.w),
den = lambda x: g_h + ((d.I - g_c)/d.w),
eps = lambda x: (d.num/d.den) - a_bar,
eps_gh = lambda x: -d.num/(d.den**2),
eps_gc = lambda x: (d.h-g_h)/(d.w*(d.den**2)),
ll_gh = lambda x: -(d.eps/(sigma_e**2))*d.eps_gh - 1/d.den,
ll_gc = lambda x: -(d.eps/(sigma_e**2))*d.eps_gc + 1/(d.w*d.den),
ll_abar = lambda x: d.eps/(sigma_e**2),
ll_se = lambda x: -1/sigma_e + (d.eps**2)/(sigma_e**3))
G1 = -sum(d.ll_gh)
G2 = -sum(d.ll_gc)
G3 = -sum(d.ll_abar)
G4 = -sum(d.ll_se)
G = [G1, G2, G3, G4]
return(G)
But I don't understand how to program the optimizer. So far this is my best try:
#%% Find optimal parameters
opt = nlopt.opt(nlopt.LD_LBFGS, 4)
opt.set_lower_bounds([0]*4)
opt.set_upper_bounds([24, 100, 1, 1])
opt.set_min_objective(loglik)
opt.set_xtol_rel(1e-64)
x = opt.optimize([1e-4]*4)
minf = opt.last_optimum_value()
print("optimum at ", x[0], x[1], x[2], x[3])
print("minimum value = ", minf)
print("result code = ", opt.last_optimize_result())
I don't know where to put the gradient function in order to make it work, in R was kinda clear.
But this page tell me that:
But since I am new in Python this doesn't tell me much info. Am I wrongly programming the gradient function? Where does it has to be?
Thanks in advance!
Data
Just use obs_data
import numpy as np
import pandas as pd
import nlopt
N = 100_000
np.random.seed(1)
wage = np.exp(np.random.normal(loc = 4, scale = 0.1, size = N))
nlincome = np.exp(np.random.normal(loc = 3, scale = 0.5, size = N))
eps_ = np.random.normal(loc = 0, scale = 0.01, size = N)
data = pd.DataFrame({'wages':wage, 'non_labor_income': nlincome,
'epsilon': eps_})
data = data.assign(alpha_bar = lambda x: α_bar + data['epsilon'])
check = data.assign(h = lambda x: (data['alpha_bar']+ data['epsilon'])*γ_h
- (((1-data['alpha_bar']-data['epsilon'])*
(data['non_labor_income']-γ_c))/(data['wages'])))
check = check.assign(l = lambda x: time - check.h,
c = lambda x: (check.wages*check.h)+check.non_labor_income,
total_income = lambda x: check.wages*check.h)
obs_data = check[['wages', 'h', 'non_labor_income']]
obs_data = obs_data.rename(columns = {"wages":"w", "non_labor_income":"i"})

Represent multiple normal priors with a single call in Bayesian logistic regression with Tensorflow

I am trying to properly represent multiple normal priors with a function call in the joint distribution and run mcmc.sample_chain with NUTS kernel. I used the excellent blog post here for a sketch with the following MCMC implementation.
def trace_fn(_, pkr):
return (
pkr.inner_results.inner_results.target_log_prob,
pkr.inner_results.inner_results.leapfrogs_taken,
pkr.inner_results.inner_results.has_divergence,
pkr.inner_results.inner_results.energy,
pkr.inner_results.inner_results.log_accept_ratio
)
def run_nuts(
target_log_prob_fn,
inits,
trace_fn=trace_fn,
bijectors_list=None,
num_steps=5000,
num_burnin=500,
n_chains=n_chains):
step_size = np.random.rand(n_chains, 1)*.5 + 1.
if not isinstance(inits, list):
inits = [inits]
if bijectors_list is None:
bijectors_list = [tfb.Identity()]*len(inits)
kernel = tfp.mcmc.DualAveragingStepSizeAdaptation(
tfp.mcmc.TransformedTransitionKernel(
inner_kernel=tfp.mcmc.NoUTurnSampler(
target_log_prob_fn,
step_size=[step_size]*len(inits)
),
bijector=bijectors_list
),
target_accept_prob=.8,
num_adaptation_steps=int(0.8*num_burnin),
step_size_setter_fn=lambda pkr, new_step_size: pkr._replace(
inner_results=pkr.inner_results._replace(step_size=new_step_size)
),
step_size_getter_fn=lambda pkr: pkr.inner_results.step_size,
log_accept_prob_getter_fn=lambda pkr: pkr.inner_results.log_accept_ratio,
)
res = tfp.mcmc.sample_chain(
num_results=num_steps,
num_burnin_steps=num_burnin,
current_state=inits,
kernel=kernel,
trace_fn=trace_fn
)
return res
I can get the MCMC working when individually specifying the priors but not when declaring them as a batch.
This works
dtype=tf.float32
root = tfd.JointDistributionCoroutine.Root
def basic_logistic(data_df):
def _generator():
a = yield root(tfd.Sample(tfd.Normal(0,10),1, name='a'))
b = yield root(tfd.Sample(tfd.Normal(0,10),1, name='b'))
c = yield root(tfd.Sample(tfd.Normal(0,10),1, name='c'))
l = a+tf.cast(data_df['x1'],dtype)*b + tf.cast(data_df['x2'],dtype)*c
print(l)
y = yield tfd.Independent(
tfd.Bernoulli(
logits = l,
name = 'success'
),
reinterpreted_batch_ndims=1
)
return tfd.JointDistributionCoroutine(_generator)
arm_0_test = basic_logistic(arm_0_test_df)
arm_0_log_prob = lambda *args: arm_0_test.log_prob(args + (tf.cast(arm_0_test_df['y'],dtype),))
n_chains=3
arm0_res = run_nuts(arm_0_log_prob, [tf.ones((n_chains,1)), tf.ones((n_chains,1)), tf.ones((n_chains,1))])
This does not
dtype=tf.float32
root = tfd.JointDistributionCoroutine.Root
def basic_logistic_multiple(X_df):
X_df_copy = X_df.copy()
n_features = X_df_copy.shape[1] + 1 # have to include intercept term
prior_means = [0 for i in range(n_features)] # list of prior means
print(prior_means)
prior_sds = [10 for i in range(n_features)] # list of prior sds
X_df_copy.insert(0, 'intercept', np.ones(X_df_copy.shape[0])) # Add column of 1s for intercept
X = tf.convert_to_tensor(X_df_copy, dtype=dtype)
def _generator():
beta = yield root(tfd.Sample(
tfd.Normal(prior_means, prior_sds, name='beta')
))
print(beta)
l = tf.tensordot(X, beta, axes=1)
# l = tf.reshape(l, (l.shape[0], ))
print(l)
y = yield tfd.Independent(
tfd.Bernoulli(
logits = l,
name = 'success'
),
reinterpreted_batch_ndims=1
)
return tfd.JointDistributionCoroutine(_generator)
arm_0_test = basic_logistic_multiple(arm_0_test_df)
arm_0_log_prob = lambda *args: arm_0_test.log_prob(args + (tf.cast(arm_0_test_df['y'],dtype),))
n_chains=3
init_beta, _ = arm_0_test.sample(n_chains)
init_beta = tf.zeros_like(init_beta)
arm0_res = run_nuts(arm_0_log_prob, [init_beta,])
I get the following error
ValueError: Dimensions must be equal, but are 3 and 1000000 for '{{node mcmc_sample_chain/dual_averaging_step_size_adaptation___init__/_bootstrap_results/transformed_kernel_bootstrap_results/NoUTurnSampler/.bootstrap_results/process_args/maybe_call_fn_and_grads/value_and_gradients/value_and_gradient/JointDistributionCoroutine_CONSTRUCTED_AT_top_level/log_prob/add_1}} = AddV2[T=DT_FLOAT](mcmc_sample_chain/dual_averaging_step_size_adaptation___init__/_bootstrap_results/transformed_kernel_bootstrap_results/NoUTurnSampler/.bootstrap_results/process_args/maybe_call_fn_and_grads/value_and_gradients/value_and_gradient/JointDistributionCoroutine_CONSTRUCTED_AT_top_level/log_prob/add, mcmc_sample_chain/dual_averaging_step_size_adaptation___init__/_bootstrap_results/transformed_kernel_bootstrap_results/NoUTurnSampler/.bootstrap_results/process_args/maybe_call_fn_and_grads/value_and_gradients/value_and_gradient/JointDistributionCoroutine_CONSTRUCTED_AT_top_level/log_prob/Independentsuccess/log_prob/Sum)' with input shapes: [3,3], [1000000].
I can sample from both Jointdistributions fine so I believe it is something clashing in the sample_chain function. Possibly my initial state declaration?
Thanks for any help!

Fitting model to data using scipy differential evolution: "RuntimeError: The map-like callable must be of the form f(func, iterable)..."

I am trying to fit a model to data (extracted from an Excel file and imported using pandas), using a likelihood method. However, when running the code I get a "RuntimeError: The map-like callable must be of the form f(func, iterable), returning a sequence of numbers the same length as 'iterable'" error, which occurred at the "result_simul_G = minimize(negLogLike, params, method = 'differential_evolution', args=(x, y),)" line. Below I have my code; it's very integrated so I couldn't find a way to illustrate what's happening without showing most of it.
#================================================================================
import numpy as np
import pandas as pd
import os
from lmfit import minimize, Parameters, Parameter, report_fit
params = Parameters()
params.add('gamma', value=.45, min=0, max=1, vary = True)
params.add('n', value = 1, min=0, max=3, vary = True)
filename = 'data.xlsx'
#================================================================================
def negLogLike(params, xData, yData):
new_xData = []
new_yData = []
for i in range(len(yData)):
if ((yData[i] != 0) and (xData[i] != 0)):
new_xData.append(xData[i])
new_yData.append(yData[i])
model_result = model(new_xData, params)
nll = 0
epsilon = 10**-10
for i in range(len(new_yData)):
if (model_result[i] < epsilon):
model_result[i] = epsilon
if (model_result[i] > 1 - epsilon):
model_result[i] = 1 - epsilon
nll += new_yData[i] * np.log(model_result[i]) + (1 - new_yData[i]) * np.log(1 - model_result[i])
return -nll
#================================================================================
def model(x, params):
try: # Get parameters
g = params['gamma'].value
n = params['n'].value
except KeyError:
g, n = params
y = 1 - np.exp(-g * x**n)
return y
#================================================================================
def GetFits(DataFrame):
cell_count = 2300000
GFP_GC_SIMUL = np.ones(DataFrame.shape[0], float)
GFP_IC_SIMUL = np.ones(DataFrame.shape[0], float)
# Data
for i in range(DataFrame.shape[0]):
GFP_GC_SIMUL[i] = DataFrame.loc[i, 'GFP genomes'] / cell_count
GFP_IC_SIMUL[i] = DataFrame.loc[i, 'GFP IU'] / cell_count
x = np.array(GFP_GC_SIMUL[10:-10])
y = np.array(GFP_IC_SIMUL[10:-10])
print('len=', len(x), x.dtype, ', x=', x)
print('------------------------')
print('len=', len(y), y.dtype, ', y=', y)
result_simul_G = minimize(negLogLike, params, method = 'differential_evolution', args=(x, y),)
#================================================================================
DataFrame = pd.read_excel('data.xlsx', engine='openpyxl')
GetFits(DataFrame)
When debugging on my own I used print statements to see what x and y data was being supplied to the minimizer and this is what it showed:
len= 34 float64 , x= [0.14478261 0.28695652 0.28695652 0.28695652 0.57391304 0.57391304
0.57391304 0.8738913 0.8738913 0.8738913 1.16086957 1.16086957
1.16086957 1.44780435 1.44780435 1.44780435 1.73478261 1.73478261
1.73478261 2.03476087 2.03476087 2.03476087 2.32173913 2.32173913
2.32173913 2.60869565 2.60869565 2.60869565 2.86956522 2.86956522
2.86956522 7.17391304 7.17391304 7.17391304]
------------------------
len= 34 float64 , y= [0.005 0.01180435 0.01226087 0.01158696 0.036 0.03704348
0.03467391 0.07030435 0.06556522 0.07567391 0.1001087 0.09852174
0.0986087 0.13626087 0.13978261 0.13956522 0.16847826 0.16408696
0.19391304 0.1945 0.21319565 0.19052174 0.32204348 0.23330435
0.25028261 0.28136957 0.26293478 0.25893478 0.28273913 0.29717391
0.273 0.60826087 0.60834783 0.59482609]
I know this is quite a lot but I would appreciate any and all help.

Python class with functions that call and minimize other functions

I am trying to write a function within a class that is trying to minimise another function.
What is the correct way to write and call it ? I am stuck and I am not sure how to do this.
import pandas as pd
import scipy.optimize as sco
class Optimisation:
def __init__(self, rf, expected_return, cov):
self.rf = rf
self.expected_return = expected_return
self.cov = cov
def calculate_negative_sharpe(self):
self.portfolio_return = np.sum(expected_returns * weights) * 252
self.portfolio_std = np.sqrt(np.dot(self.weights.T, np.dot(self.cov, self.weights))) * np.sqrt(252)
self.sharpe_ratio = (self.portfolio_return - self.rf) / self.portfolio_std
return -self.sharpe_ratio
def max_sharpe_ratio(expected_returns, cov, rf):
num_assets = len(expected_returns)
args = (expected_returns, cov, rf)
constraints = ({'type': 'eq', 'fun': lambda x: np.sum(x) - 1})
bound = (0.0,1.0)
bounds = tuple(bound for asset in range(num_assets))
result = sco.minimize(calculate_negative_sharpe, num_assets*[1./num_assets,], args=args, method='SLSQP', bounds=bounds, constraints=constraints)
return result
opt = Optimisation(rf, er, cov)
result = opt.max_sharpe_ratio()
print(result)
or should it be like
import pandas as pd
import scipy.optimize as sco
class Optimisation:
def __init__(self, rf, expected_return, cov):
self.rf = rf
self.expected_return = expected_return
self.cov = cov
def calculate_negative_sharpe(self):
self.portfolio_return = np.sum(expected_returns * weights) * 252
self.portfolio_std = np.sqrt(np.dot(self.weights.T, np.dot(self.cov, self.weights))) * np.sqrt(252)
self.sharpe_ratio = (self.portfolio_return - self.rf) / self.portfolio_std
return -self.sharpe_ratio
def max_sharpe_ratio(self):
self.num_assets = len(self/expected_returns)
self.args = (self.expected_returns, self.cov, self.rf)
self.constraints = ({'type': 'eq', 'fun': lambda x: np.sum(x) - 1})
self.bound = (0.0,1.0)
self.bounds = tuple(self.bound for asset in range(self.num_assets))
self.result = sco.minimize(self.calculate_negative_sharpe, self.num_assets*[1./self.num_assets,], args=self.args, method='SLSQP', bounds=self.bounds, constraints=self.constraints)
return self.result
opt = Optimisation(rf, er, cov)
result = opt.max_sharpe_ratio()
print(result)
My goal is to return a result dataframe which I can print or save.

Porting pymc2 code to pymc3: custom likelihood function

I am trying to implement the censored data example in Lee&Wagenmakers' book (Chapter 5.5, page 70). In pymc2, I have the following model:
nattempts = 950
nfails = 949
n = 50 # Number of questions
y = np.zeros(nattempts)
y[nattempts-1] = 1
z = 30
unobsmin = 15
unobsmax = 25
unobsrange = np.arange(unobsmin,unobsmax+1)
theta = pymc.Uniform("theta",lower = .25, upper = 1)
#pymc.observed
def Ylike(value=z, theta = theta, n=n, censorn=nfails, unobs=unobsrange):
ylikeobs = pymc.binomial_like(x=value, n=n, p=theta)
ylikeunobs = np.array([])
for i in unobs:
ylikeunobs = np.append(pymc.binomial_like(x=i, n=n, p=theta),ylikeunobs)
return ylikeobs+sum(ylikeunobs)*censorn
testmodel = pymc.Model([theta,Ylike])
mcmc = pymc.MCMC(testmodel)
mcmc.sample(iter = 20000, burn = 50, thin = 2)
which involved the decorater #pymc.observed.
I think I need to express the likelihood using the pm.DensityDist, however, I could not figure it out how to.
OK, I found out how to do it:
with pm.Model():
theta = pm.Uniform("theta",lower = .25, upper = 1)
def logp(value,n,p):
return pm.dist_math.bound(
pm.dist_math.binomln(n, value)
+ pm.dist_math.logpow(p, value)
+ pm.dist_math.logpow(1 - p, n - value),
0 <= value, value <= n,
0 <= p, p <= 1)
def Censorlike(value=z, n=n, censorn=nfails, unobs=unobsrange):
ylikeobs = logp(value=value, n=n, p=theta)
ylikeunobs = 0
for i in unobs:
ylikeunobs += logp(value=i, n=n, p=theta)
return ylikeobs+ylikeunobs*censorn
ylike = pm.DensityDist('ylike', Censorlike, observed={'value':z,'n':n,'censorn':nfails,'unobs':unobsrange})
trace = pm.sample(3e3)

Categories