Matrix and vector shape in TVP-VAR in the statespace mlemodels - python

Thanks to everyone in advance for their time!
I am trying to run a TVP-VAR (time varying papramenter) for a panel in the statespace mlemodels in statsmodel. I get an error while trying to fit the model. My understanding is that mostly is regarding the dimensions for state covariance matrix. I suspect will get the same error later when I will deal with column shape.
The error is :
ValueError: Invalid dimensions for state covariance matrix matrix: requires 702 rows, got 3
How could I solve that ? The type error is showing is as below, highlighted in bold both the error and the Traceback :
preliminary = tvppanelvarmodel.fit(maxiter=1000)
Traceback (most recent call last):
File "/var/folders/m6/68zljfsj2t9_dzgpwwslj29r0000gp/T/ipykernel_6232/3038987883.py", line 1, in <module>
preliminary = tvppanelvarmodel.fit(maxiter=1000)
File "/opt/anaconda3/envs/spyder-env/lib/python3.10/site-packages/statsmodels/tsa/statespace/mlemodel.py", line 704, in fit
mlefit = super(MLEModel, self).fit(start_params, method=method,
File "/opt/anaconda3/envs/spyder-env/lib/python3.10/site-packages/statsmodels/base/model.py", line 563, in fit
xopt, retvals, optim_settings = optimizer._fit(f, score, start_params,
File "/opt/anaconda3/envs/spyder-env/lib/python3.10/site-packages/statsmodels/base/optimizer.py", line 241, in _fit
xopt, retvals = func(objective, gradient, start_params, fargs, kwargs,
File "/opt/anaconda3/envs/spyder-env/lib/python3.10/site-packages/statsmodels/base/optimizer.py", line 651, in _fit_lbfgs
retvals = optimize.fmin_l_bfgs_b(func, start_params, maxiter=maxiter,
File "/opt/anaconda3/envs/spyder-env/lib/python3.10/site-packages/scipy/optimize/lbfgsb.py", line 197, in fmin_l_bfgs_b
res = _minimize_lbfgsb(fun, x0, args=args, jac=jac, bounds=bounds,
File "/opt/anaconda3/envs/spyder-env/lib/python3.10/site-packages/scipy/optimize/lbfgsb.py", line 306, in _minimize_lbfgsb
sf = _prepare_scalar_function(fun, x0, jac=jac, args=args, epsilon=eps,
File "/opt/anaconda3/envs/spyder-env/lib/python3.10/site-packages/scipy/optimize/optimize.py", line 261, in _prepare_scalar_function
sf = ScalarFunction(fun, x0, args, grad, hess,
File "/opt/anaconda3/envs/spyder-env/lib/python3.10/site-packages/scipy/optimize/_differentiable_functions.py", line 140, in __init__
self._update_fun()
File "/opt/anaconda3/envs/spyder-env/lib/python3.10/site-packages/scipy/optimize/_differentiable_functions.py", line 233, in _update_fun
self._update_fun_impl()
File "/opt/anaconda3/envs/spyder-env/lib/python3.10/site-packages/scipy/optimize/_differentiable_functions.py", line 137, in update_fun
self.f = fun_wrapped(self.x)
File "/opt/anaconda3/envs/spyder-env/lib/python3.10/site-packages/scipy/optimize/_differentiable_functions.py", line 134, in fun_wrapped
return fun(np.copy(x), *args)
File "/opt/anaconda3/envs/spyder-env/lib/python3.10/site-packages/statsmodels/base/model.py", line 531, in f
return -self.loglike(params, *args) / nobs
File "/opt/anaconda3/envs/spyder-env/lib/python3.10/site-packages/statsmodels/tsa/statespace/mlemodel.py", line 933, in loglike
self.update(params, transformed=True, includes_fixed=True,
File "/var/folders/m6/68zljfsj2t9_dzgpwwslj29r0000gp/T/ipykernel_6232/3786466608.py", line 104, in update
self['state_cov'] = np.diag([params[2]**2, params[3]**2, params[4]**2]) # W
File "/opt/anaconda3/envs/spyder-env/lib/python3.10/site-packages/statsmodels/tsa/statespace/mlemodel.py", line 239, in __setitem__
return self.ssm.__setitem__(key, value)
File "/opt/anaconda3/envs/spyder-env/lib/python3.10/site-packages/statsmodels/tsa/statespace/representation.py", line 420, in __setitem__
setattr(self, key, value)
File "/opt/anaconda3/envs/spyder-env/lib/python3.10/site-packages/statsmodels/tsa/statespace/representation.py", line 54, in __set__
value = self._set_matrix(obj, value, shape)
File "/opt/anaconda3/envs/spyder-env/lib/python3.10/site-packages/statsmodels/tsa/statespace/representation.py", line 68, in _set_matrix
validate_matrix_shape(
File "/opt/anaconda3/envs/spyder-env/lib/python3.10/site-packages/statsmodels/tsa/statespace/tools.py", line 1474, in validate_matrix_shape
raise ValueError('Invalid dimensions for %s matrix: requires %d'
ValueError: Invalid dimensions for state covariance matrix matrix: requires 702 rows, got 3
When I defined the class initially, I did the following
class TVPVAR(sm.tsa.statespace.MLEModel):
def __init__(self, y):
# Create a matrix with [y_t' : y_{t-1}'] for t = 2, ..., T
augmented = sm.tsa.lagmat(y, 1, trim='both', original='in', use_pandas=True)
# Separate into y_t and z_t = [1 : y_{t-1}']
p = y.shape[1]
y_t = augmented.iloc[:, :p]
z_t = sm.add_constant(augmented.iloc[:, p:])
nobs = y.shape[0]
T=y.shape[0]
# Recall that the length of the state vector is p * (p + 1)
k_states = p * (p + 1)
super(TVPVAR,self).__init__(y_t, exog=None, k_states=k_states,k_posdef=k_states)
self.k_y = p
self.k_states = p * (p + 1)
self.nobs = T
self['design'] = np.zeros((self.k_y, self.k_states, 1))
self['transition'] = np.eye(k_states) # G
self['selection'] = np.eye(k_states) # R=1
def update_variances(self, obs_cov, state_cov_diag):
self['obs_cov'] = obs_cov
self['state_cov'] = np.diag(state_cov_diag) # W
init = initialization.Initialization(self.k_states)
init.set((0, 2), 'diffuse')
init.set((2, 4), 'stationary')
self.ssm.initialize(init)
OTHER CODE
def update(self, params, **kwargs):
params = super().update(params, **kwargs)
self['transition', 2,2] = params[0]
self['transition', 3,2] = params[1]
self['state_cov'] = np.diag([params[2]**2, params[3]**2, params[4]**2]) # W
How can I define the dimensions for state covariance matrix and the vector shape? Thanks your inputs.

Related

Pandas Array Exception: Data must be 1-Dimensional

This is my Python script for using Markov Blanket Algorithm on my Dataset:
df1 = read_csv("input-binary-120-training.csv")
Y1 = df1[df1.CategoryL == 1].CategoryL
X1 = minmax_scale(df1[df1.CategoryL == 1].ix[:, 1:24], axis = 0)
y_train = Y1.values
df2 = read_csv("input-binary-120-test.csv")
Y2 = df2[df2.CategoryL == 1].CategoryL
X2 = minmax_scale(df2[df2.CategoryL == 1].ix[:, 1:24], axis = 0)
y_test = Y2.values
x_test = X2.reshape(X2.shape[0], X2.shape[1], 1)
seed(2017)
kfold = KFold(n_splits=5, random_state=27, shuffle=True)
scores = list()
# Create a PyImpetus classification object and initialize with required parameters
model = PPIMBC(LogisticRegression(random_state=27, max_iter=1000, class_weight="balanced"), cv=0, num_simul=20, simul_type=0, simul_size=0.2, random_state=27, sig_test_type="non-parametric", verbose=2, p_val_thresh=0.05)
x_train = model.fit_transform(X1, Y1)
x_test = model.transform(x_test)
print("Markov Blanket: ", model.MB)
But for the line X_train = model.fit_transform(X1,Y1) I got the exception:
Data must be 1-Dimensional.
I used X1.flatten() but it doesn't work. Could you please advise me about this issue?
Full error:
x_train = model.fit_transform(X1, Y1)
File "/home/osboxes/Downloads/Thesis/PyImpetus.py", line 326, in fit_transform
self.fit(data, Y)
File "/home/osboxes/Downloads/Thesis/PyImpetus.py", line 299, in fit
final_MB, final_feat_imp = self._find_MB(data.copy(), Y)
File "/home/osboxes/Downloads/Thesis/PyImpetus.py", line 221, in _find_MB
Y = np.reshape(Y, (-1, 1))
File "<__array_function__ internals>", line 6, in reshape
File "/home/osboxes/venv/lib/python3.6/site-packages/numpy/core/fromnumeric.py", line 299, in reshape
return _wrapfunc(a, 'reshape', newshape, order=order)
File "/home/osboxes/venv/lib/python3.6/site-packages/numpy/core/fromnumeric.py", line 55, in _wrapfunc
return _wrapit(obj, method, *args, **kwds)
File "/home/osboxes/venv/lib/python3.6/site-packages/numpy/core/fromnumeric.py", line 48, in _wrapit
result = wrap(result)
File "/home/osboxes/venv/lib/python3.6/site-packages/pandas/core/generic.py", line 1999, in __array_wrap__
return self._constructor(result, **d).__finalize__(self)
File "/home/osboxes/venv/lib/python3.6/site-packages/pandas/core/series.py", line 311, in __init__
data = sanitize_array(data, index, dtype, copy, raise_cast_failure=True)
File "/home/osboxes/venv/lib/python3.6/site-packages/pandas/core/internals/construction.py", line 729, in sanitize_array
raise Exception("Data must be 1-dimensional")
Exception: Data must be 1-dimensional
Try to reshape Y1 either Y1=Y1[:, 0] or Y1=Y1.ravel() to get a 1D dimension.

PyTorch error in trying to backward through the graph a second time

I'm trying to run this code: https://github.com/aitorzip/PyTorch-CycleGAN
I modified only the dataloader and transforms to be compatible with my data.
When trying to run it I get this error:
Traceback (most recent call last):
File "models/CycleGANs/train",
line 150, in
loss_D_A.backward()
File "/opt/conda/lib/python3.8/site-packages/torch/tensor.py", line 221, in
backward
torch.autograd.backward(self, gradient, retain_graph, create_graph)
File
"/opt/conda/lib/python3.8/site-packages/torch/autograd/init.py",
line 130, in backward
Variable._execution_engine.run_backward(
RuntimeError: Trying to backward through the graph a second time, but the saved intermediate
results have already been freed. Specify retain_graph=True when
calling backward the first time.
This is the train loop up to the point of error:
for epoch in range(opt.epoch, opt.n_epochs):
for i, batch in enumerate(dataloader):
# Set model input
real_A = Variable(input_A.copy_(batch['A']))
real_B = Variable(input_B.copy_(batch['B']))
##### Generators A2B and B2A #####
optimizer_G.zero_grad()
# Identity loss
# G_A2B(B) should equal B if real B is fed
same_B = netG_A2B(real_B)
loss_identity_B = criterion_identity(same_B, real_B)*5.0
# G_B2A(A) should equal A if real A is fed
same_A = netG_B2A(real_A)
loss_identity_A = criterion_identity(same_A, real_A)*5.0
# GAN loss
fake_B = netG_A2B(real_A)
pred_fake = netD_B(fake_B)
loss_GAN_A2B = criterion_GAN(pred_fake, target_real)
fake_A = netG_B2A(real_B)
pred_fake = netD_A(fake_A)
loss_GAN_B2A = criterion_GAN(pred_fake, target_real)
# Cycle loss
# TODO: cycle loss doesn't allow for multimodality. I leave it for now but needs to be thrown out later
recovered_A = netG_B2A(fake_B)
loss_cycle_ABA = criterion_cycle(recovered_A, real_A)*10.0
recovered_B = netG_A2B(fake_A)
loss_cycle_BAB = criterion_cycle(recovered_B, real_B)*10.0
# Total loss
loss_G = loss_identity_A + loss_identity_B + loss_GAN_A2B + loss_GAN_B2A + loss_cycle_ABA + loss_cycle_BAB
loss_G.backward()
optimizer_G.step()
##### Discriminator A #####
optimizer_D_A.zero_grad()
# Real loss
pred_real = netD_A(real_A)
loss_D_real = criterion_GAN(pred_real, target_real)
# Fake loss
fake_A = fake_A_buffer.push_and_pop(fake_A)
pred_fale = netD_A(fake_A.detach())
loss_D_fake = criterion_GAN(pred_fake, target_fake)
# Total loss
loss_D_A = (loss_D_real + loss_D_fake)*0.5
loss_D_A.backward()
I am not familiar at all what it means. My guess is it's something to do with fake_A_buffer. It's just a fake_A_buffer = ReplayBuffer()
class ReplayBuffer():
def __init__(self, max_size=50):
assert (max_size > 0), 'Empty buffer or trying to create a black hole. Be careful.'
self.max_size = max_size
self.data = []
def push_and_pop(self, data):
to_return = []
for element in data.data:
element = torch.unsqueeze(element, 0)
if len(self.data) < self.max_size:
self.data.append(element)
to_return.append(element)
else:
if random.uniform(0,1) > 0.5:
i = random.randint(0, self.max_size-1)
to_return.append(self.data[i].clone())
self.data[i] = element
else:
to_return.append(element)
return Variable(torch.cat(to_return))
Error after setting `loss_G.backward(retain_graph=True)
Traceback (most recent call last): File "models/CycleGANs/train",
line 150, in
loss_D_A.backward() File "/opt/conda/lib/python3.8/site-packages/torch/tensor.py", line 221, in
backward
torch.autograd.backward(self, gradient, retain_graph, create_graph) File
"/opt/conda/lib/python3.8/site-packages/torch/autograd/init.py",
line 130, in backward
Variable._execution_engine.run_backward( RuntimeError: one of the variables needed for gradient computation has been modified by an
inplace operation: [torch.FloatTensor [3, 64, 7, 7]] is at version 2;
expected version 1 instead. Hint: enable anomaly detection to find the
operation that failed to compute its gradient, with
torch.autograd.set_detect_anomaly(True).
And after setting torch.autograd.set_detect_anomaly(True)
/opt/conda/lib/python3.8/site-packages/torch/autograd/init.py:130:
UserWarning: Error detected in MkldnnConvolutionBackward. Traceback of
forward call that caused the error:
File "models/CycleGANs/train",
line 115, in
fake_B = netG_A2B(real_A)
File "/opt/conda/lib/python3.8/site-packages/torch/nn/modules/module.py",
line 727, in _call_impl
result = self.forward(*input, **kwargs)
File "/home/Histology-Style-Transfer-Research/models/CycleGANs/models.py",
line 67, in forward
return self.model(x)
File "/opt/conda/lib/python3.8/site-packages/torch/nn/modules/module.py",
line 727, in _call_impl
result = self.forward(*input, **kwargs)
File "/opt/conda/lib/python3.8/site-packages/torch/nn/modules/container.py",
line 117, in forward
input = module(input)
File "/opt/conda/lib/python3.8/site-packages/torch/nn/modules/module.py",
line 727, in _call_impl
result = self.forward(*input, **kwargs)
File "/home/Histology-Style-Transfer-Research/models/CycleGANs/models.py",
line 19, in forward
return x + self.conv_block(x)
File "/opt/conda/lib/python3.8/site-packages/torch/nn/modules/module.py",
line 727, in _call_impl
result = self.forward(*input, **kwargs)
File "/opt/conda/lib/python3.8/site-packages/torch/nn/modules/container.py",
line 117, in forward
input = module(input)
File "/opt/conda/lib/python3.8/site-packages/torch/nn/modules/module.py",
line 727, in _call_impl
result = self.forward(*input, **kwargs)
File "/opt/conda/lib/python3.8/site-packages/torch/nn/modules/conv.py",
line 423, in forward
return self._conv_forward(input, self.weight)
File "/opt/conda/lib/python3.8/site-packages/torch/nn/modules/conv.py",
line 419, in _conv_forward
return F.conv2d(input, weight, self.bias, self.stride, (Triggered internally at
/opt/conda/conda-bld/pytorch_1603729096996/work/torch/csrc/autograd/python_anomaly_mode.cpp:104.)
Variable._execution_engine.run_backward(
Traceback (most recent call
last): File "models/CycleGANs/train", line 133, in
loss_G.backward(retain_graph=True)
File "/opt/conda/lib/python3.8/site-packages/torch/tensor.py", line 221, in
backward
torch.autograd.backward(self, gradient, retain_graph, create_graph)
File
"/opt/conda/lib/python3.8/site-packages/torch/autograd/init.py",
line 130, in backward
Variable._execution_engine.run_backward( RuntimeError: Function 'MkldnnConvolutionBackward' returned nan values in its 2th output.
loss_G.backward() should be loss_G.backward(retain_graph=True) this is because when you use backward normally it doesn't record the operations it performs in the backward pass, retain_graph=True is telling to do so.

Bayesian analysis of chemical network

I am trying to do a simple reaction network and see the sensitivity of reaction rates k1 and k2. So my reaction goes A --> B --> C with k1 and k2, respectively.
import numpy as np
import pymc3 as pm
from matplotlib.pyplot import figure, scatter, legend, plot
from scipy.integrate import solve_ivp
from sys import exit
Nt = 11
time = 10
tt = np.linspace(0,time,Nt+1)
y0 = [1,0,0]
k1_0, k2_0 = 1, 0.5
def equat(t,c):
da_dt = -k1_0*c[0]
db_dt = k1_0*c[0] - k2_0*c[1]
dc_dt = k2_0*c[1]
return da_dt, db_dt, dc_dt
c_est = solve_ivp(equat, t_span = [0,time], t_eval = tt, y0 = y0)
niter = 10
with pm.Model() as reak_model:
k1 = pm.Normal('k1', mu = 0, sd = 100)
k2 = pm.Normal('k2', mu=0, sd=100, shape = 1)
sigma = pm.HalfNormal('sigma', sd=1)
def equat_2(t,c):
da_dt = -k1*c[0]
db_dt = k1*c[0] - k1*c[1]
dc_dt = k1*c[1]
return da_dt, db_dt, dc_dt
c = solve_ivp(equat_2, t_span = [0,time], t_eval = tt, y0 = y0)
likelihood = pm.Normal('y', mu=c.y, sd=sigma, observed=c_est.y)
trace = pm.sample(niter, chains = 1)
pm.traceplot(trace, varnames=['k1','k2'])
ValueError: setting an array element with a sequence.
Im getting this error. Since I am new to Bayesian, I am wondering if its something with the distributions?
Traceback (most recent call last):
File "<ipython-input-95-26ff8f25faea>", line 1, in <module>
runfile('/Daft regresion.py', wdir='/Final code')
File "/python3.7/site-packages/spyder_kernels/customize/spydercustomize.py", line 786, in runfile
execfile(filename, namespace)
File "python3.7/site-packages/spyder_kernels/customize/spydercustomize.py", line 110, in execfile
exec(compile(f.read(), filename, 'exec'), namespace)
File "Daft regression.py", line 53, in <module>
c = solve_ivp(equat_2, t_span = [0,time], t_eval = tt, y0 = y0)
File "/python3.7/site-packages/scipy/integrate/_ivp/ivp.py", line 454, in solve_ivp
solver = method(fun, t0, y0, tf, vectorized=vectorized, **options)
File "/python3.7/site-packages/scipy/integrate/_ivp/rk.py", line 99, in __init__
self.f = self.fun(self.t, self.y)
File "/python3.7/site-packages/scipy/integrate/_ivp/base.py", line 139, in fun
return self.fun_single(t, y)
File "/python3.7/site-packages/scipy/integrate/_ivp/base.py", line 21, in fun_wrapped
return np.asarray(fun(t, y), dtype=dtype)
File "/python3.7/site-packages/numpy/core/numeric.py", line 501, in asarray
return array(a, dtype, copy=False, order=order)
ValueError: setting an array element with a sequence.
Edit: Apparently I need to implement Theano package instead of solve_ivp. Still help appreciated.

python Error: memoryError when apply ARIMA.fit with high order q=367 for MA

hi All stackoverflow Forum experts
i am using the software pyCharm2018.1.1
i have tried to build ARIMA model in python, my model has been identified by the parameters
(p=0, d=0, q=367), here is the code:
enter code here def arima_Model_Static_PlotErrorAC_PAC(series):
train, expctd =series , series
arima_orders = (0, 0, 367)
model = ARIMA(series, order=arima_orders)
results_MA = model.fit(disp=-1, start_params=[.1 for i in range(1 + arima_orders[2])])
yhatList=results_MA.fittedvalues
residuals = [expctd[i] - yhatList[i] for i in range(len(expctd))]
mse = mean_squared_error(expctd, yhatList)
rmse = sqrt(mse)
print(results_MA.summary())
print(rmse)
this model is called as follow:
enter code here series=DataSetDiff #DataSetDiff is a series with a length of 3652 values outputResidualError=arima_Model_Static_PlotErrorAC_PAC(series)
an error is loaded with this high q order which is:
Blockquote C:\109_personel\112_pyCharmArima\venv\Scripts\python.exe C:/109_personel/112_pyCharmArima/Presentation_Vers2_ModelOneFunct_3_5.py
Traceback (most recent call last):
File "C:/109_personel/112_pyCharmArima/Presentation_Vers2_ModelOneFunct_3_5.py", line 243, in arima_Model_Static_PlotErrorAC_PAC
results_MA = model.fit(disp=-1, start_params=[.1 for i in range(1 + arima_orders[2])], solver='bfgs')
File "C:\109_personel\112_pyCharmArima\venv\lib\site-packages\statsmodels\tsa\arima_model.py", line 959, in fit
callback=callback, **kwargs)
File "C:\109_personel\112_pyCharmArima\venv\lib\site-packages\statsmodels\base\model.py", line 466, in fit
full_output=full_output)
File "C:\109_personel\112_pyCharmArima\venv\lib\site-packages\statsmodels\base\optimizer.py", line 191, in _fit
hess=hessian)
File "C:\109_personel\112_pyCharmArima\venv\lib\site-packages\statsmodels\base\optimizer.py", line 327, in _fit_bfgs
disp=disp, retall=retall, callback=callback)
File "C:\109_personel\112_pyCharmArima\venv\lib\site-packages\scipy\optimize\optimize.py", line 916, in fmin_bfgs
res = _minimize_bfgs(f, x0, args, fprime, callback=callback, **opts)
File "C:\109_personel\112_pyCharmArima\venv\lib\site-packages\scipy\optimize\optimize.py", line 970, in _minimize_bfgs
gfk = myfprime(x0)
File "C:\109_personel\112_pyCharmArima\venv\lib\site-packages\scipy\optimize\optimize.py", line 300, in function_wrapper
return function(*(wrapper_args + args))
File "C:\109_personel\112_pyCharmArima\venv\lib\site-packages\statsmodels\base\model.py", line 451, in score
return -self.score(params, *args) / nobs
File "C:\109_personel\112_pyCharmArima\venv\lib\site-packages\statsmodels\tsa\arima_model.py", line 583, in score
return approx_fprime_cs(params, self.loglike, args=(False,))
File "C:\109_personel\112_pyCharmArima\venv\lib\site-packages\statsmodels\tools\numdiff.py", line 202, in approx_fprime_cs
for i, ih in enumerate(increments)]
File "C:\109_personel\112_pyCharmArima\venv\lib\site-packages\statsmodels\tools\numdiff.py", line 202, in
for i, ih in enumerate(increments)]
File "C:\109_personel\112_pyCharmArima\venv\lib\site-packages\statsmodels\tsa\arima_model.py", line 780, in loglike
return self.loglike_kalman(params, set_sigma2)
File "C:\109_personel\112_pyCharmArima\venv\lib\site-packages\statsmodels\tsa\arima_model.py", line 790, in loglike_kalman
return KalmanFilter.loglike(params, self, set_sigma2)
File "C:\109_personel\112_pyCharmArima\venv\lib\site-packages\statsmodels\tsa\kalmanf\kalmanfilter.py", line 654, in loglike
R_mat, T_mat)
File "kalman_loglike.pyx", line 359, in statsmodels.tsa.kalmanf.kalman_loglike.kalman_loglike_complex
File "kalman_loglike.pyx", line 228, in statsmodels.tsa.kalmanf.kalman_loglike.kalman_filter_complex
File "C:\109_personel\112_pyCharmArima\venv\lib\site-packages\numpy\core\numeric.py", line 2200, in identity
return eye(n, dtype=dtype)
File "C:\109_personel\112_pyCharmArima\venv\lib\site-packages\numpy\lib\twodim_base.py", line 186, in eye
m = zeros((N, M), dtype=dtype, order=order)
MemoryError
Process finished with exit code 1
my model is well running and forecasting until q order MA 150 that mean(0,0,150). the error memoryError raised on the selection of q=367 as order
is any one can help me to solve this error, i have googled this error many time and i did not found a suitable solutions
Thank you for any help.

scipy.minimize error : setting an array element with a sequence

I want to estimate parameters 'k,ru,sigma' that maximumize the function 'func'
('ru' means r upperba)
The'func'formula is compex, so I want to upload the image to show this fomula, but i have no enough reputation.
import numpy as np
sigma,k,ru=0.01,0.001,5
p0=np.array([[0.01,0.01,6]])
p=np.array([[sigma,k,ru]])
def func(p,r):
T=91/365
y=1/(np.sqrt(2*(np.pi)*p[0]**2/(2*p[1])*(1-np.exp(-(2*p[1]*T)))))*np.exp((r-p[2]-np.exp(-(p[1]*T))*(r-p[2]))**2/(p[0]**2/((-4)*p[1])*(1-np.exp(-(2*p[1]*T)))))
return -y
from scipy.optimize import minimize
r=np.array([[1.45,2.5,2.6,1.67,1.2]])
# r has 1350 datas like this
res=minimize(func,p0,args=(r))
Traceback (most recent call last):
File "<ipython-input-9-b94a05d2ede8>", line 1, in <module>
res=minimize(func,p0,args=(r))
File "C:\Users\hyun su\Anaconda3\lib\site-packages\scipy\optimize\_minimize.py", line 419, in minimize
return _minimize_bfgs(fun, x0, args, jac, callback, **options)
File "C:\Users\hyun su\Anaconda3\lib\site-packages\scipy\optimize\optimize.py", line 837, in _minimize_bfgs
gfk = myfprime(x0)
File "C:\Users\hyun su\Anaconda3\lib\site-packages\scipy\optimize\optimize.py", line 282, in function_wrapper
return function(*(wrapper_args + args))
File "C:\Users\hyun su\Anaconda3\lib\site-packages\scipy\optimize\optimize.py", line 616, in approx_fprime
return _approx_fprime_helper(xk, f, epsilon, args=args)
File "C:\Users\hyun su\Anaconda3\lib\site-packages\scipy\optimize\optimize.py", line 556, in _approx_fprime_helper
grad[k] = (f(*((xk + d,) + args)) - f0) / d[k]
ValueError: setting an array element with a sequence.
How can i solve this?
func here takes in a vector but it must be a scalar function of one or more variables as indicated in the scipy.optimize.minimize doc

Categories