I have a question about how to use the Levenberg-Marquardt optimize method in Python. In the library SCIPY there are many optimization methods.
I tried two methods (Nelder-Mead and Basin-hopping) and both work well with the follow command:
# Nelder-Mead
res0_10 = optimize.minimize(f0_10, x0, method='Nelder-Mead', options={'disp': True, 'maxiter': 2000})
# Basin-hopping
res0_10 = optimize.basinhopping(f0_10, x0, niter=100, disp=True)
The problem emerge when I use the Levenberg-Marquardt (I copy only the part of error, because the program is long)
def f0_10(x):
m, u, z, s = x
for i in range(alt_max):
if i==alt_min: suma=0
if i > alt_min:
suma = suma + (B(x, i)-b0_10(x, i))**2
return np.sqrt(suma/alt_max)
x0 = np.array([40., 0., 500., 50.])
res0_10 = root(f0_10, x0, jac=True, method='lm')
I only change the last sentence (res0_10 = root...). The program compile well, but when I execute the program:
Exception in Tkinter callback
Traceback (most recent call last):
File "C:\Users\Quini SB\AppData\Local\Enthought\Canopy\App\appdata\canopy-1.7.4.3348.win-x86_64\lib\lib-tk\Tkinter.py", line 1536, in __call__
return self.func(*args)
File "C:\Users\Quini SB\Desktop\tfg\Steyn - levmar.py", line 384, in askopenfilename
res0_10 = root(f0_10, x0, jac=True, method='lm')
File "C:\Users\Quini SB\AppData\Local\Enthought\Canopy\User\lib\site-packages\scipy\optimize\_root.py", line 188, in root
sol = _root_leastsq(fun, x0, args=args, jac=jac, **options)
File "C:\Users\Quini SB\AppData\Local\Enthought\Canopy\User\lib\site-packages\scipy\optimize\_root.py", line 251, in _root_leastsq
factor=factor, diag=diag)
File "C:\Users\Quini SB\AppData\Local\Enthought\Canopy\User\lib\site-packages\scipy\optimize\minpack.py", line 377, in leastsq
shape, dtype = _check_func('leastsq', 'func', func, x0, args, n)
File "C:\Users\Quini SB\AppData\Local\Enthought\Canopy\User\lib\site-packages\scipy\optimize\minpack.py", line 26, in _check_func
res = atleast_1d(thefunc(*((x0[:numinputs],) + args)))
File "C:\Users\Quini SB\AppData\Local\Enthought\Canopy\User\lib\site-packages\scipy\optimize\optimize.py", line 64, in __call__
self.jac = fg[1]
IndexError: invalid index to scalar variable.
Why does this error happen?
From documentation:
jac : bool or callable, optional
If jac is a Boolean and is True, fun is assumed to return the value
of Jacobian along with the objective function. If False, the
Jacobian will be estimated numerically. jac can also be a callable
returning the Jacobian of fun. In this case, it must accept the
same arguments as fun.
So, your function 'f0_10' needs to return two values, because you set jac to True
Related
I want to solve the following optimization problem with scipy (python3):
where I[.,.] is the "incomplete gamma function" (scipy.special.gammainc)
I followed the scipy optimization guide and came up with this code:
from scipy.special import gammainc
from scipy.optimize import NonlinearConstraint
from scipy.optimize import BFGS
from scipy.optimize import minimize
import numpy as np
q = 200
k = 1.5625
alpha = 0.03125
def func(s):
return s
def constraints(s):
return q - k/alpha * (gammainc(k+1, s*alpha) + gammainc(k+1, (s+q)*alpha)) + s*gammainc(k, s*alpha) + (s+q)*gammainc(k, (s+q)*alpha)
nonlinear_constraint = NonlinearConstraint(constraints, -np.inf, 10.0, jac='2-point', hess=BFGS())
s0 = 1000
res = minimize(func, s0, method='trust-constr', jac='2-point', hess=BFGS(), constraints=[nonlinear_constraint], options={'verbose': 1})
print(res.x)
when I run the code, I get this (error) output:
/home/andreasziegler/miniconda3/lib/python3.7/site-packages/scipy/optimize/_hessian_update_strategy.py:187: UserWarning: delta_grad == 0.0. Check if the approximated function is linear. If the function is linear better results can be obtained by defining the Hessian as zero instead of using quasi-Newton approximations.
'approximations.', UserWarning)
Traceback (most recent call last):
File "s_opt.py", line 23, in <module>
res = minimize(func, s0, method='trust-constr', jac='2-point', hess=BFGS(), constraints=[nonlinear_constraint], options={'verbose': 1})
File "/home/andreasziegler/miniconda3/lib/python3.7/site-packages/scipy/optimize/_minimize.py", line 622, in minimize
callback=callback, **options)
File "/home/andreasziegler/miniconda3/lib/python3.7/site-packages/scipy/optimize/_trustregion_constr/minimize_trustregion_constr.py", line 519, in _minimize_trustregion_constr
factorization_method)
File "/home/andreasziegler/miniconda3/lib/python3.7/site-packages/scipy/optimize/_trustregion_constr/tr_interior_point.py", line 329, in tr_interior_point
factorization_method, trust_lb, trust_ub, subprob.scaling)
File "/home/andreasziegler/miniconda3/lib/python3.7/site-packages/scipy/optimize/_trustregion_constr/equality_constrained_sqp.py", line 121, in equality_constrained_sqp
lb_t, ub_t)
File "/home/andreasziegler/miniconda3/lib/python3.7/site-packages/scipy/optimize/_trustregion_constr/qp_subproblem.py", line 499, in projected_cg
r = Z.dot(H.dot(x) + c)
File "/home/andreasziegler/miniconda3/lib/python3.7/site-packages/scipy/sparse/linalg/interface.py", line 415, in dot
return self.matvec(x)
File "/home/andreasziegler/miniconda3/lib/python3.7/site-packages/scipy/sparse/linalg/interface.py", line 229, in matvec
y = self._matvec(x)
File "/home/andreasziegler/miniconda3/lib/python3.7/site-packages/scipy/sparse/linalg/interface.py", line 527, in _matvec
return self.__matvec_impl(x)
File "/home/andreasziegler/miniconda3/lib/python3.7/site-packages/scipy/optimize/_trustregion_constr/projections.py", line 193, in null_space
aux2 = scipy.linalg.solve_triangular(R, aux1, lower=False)
File "/home/andreasziegler/miniconda3/lib/python3.7/site-packages/scipy/linalg/basic.py", line 336, in solve_triangular
b1 = _asarray_validated(b, check_finite=check_finite)
File "/home/andreasziegler/miniconda3/lib/python3.7/site-packages/scipy/_lib/_util.py", line 246, in _asarray_validated
a = toarray(a)
File "/home/andreasziegler/miniconda3/lib/python3.7/site-packages/numpy/lib/function_base.py", line 499, in asarray_chkfinite
"array must not contain infs or NaNs")
ValueError: array must not contain infs or NaNs
which tells me that I did something wrong. It's the first time I use the optimizer in scipy and therefore chances are high, that I misunderstand something (essential). I'm glad for any hint.
I'm trying to estimate a maximum likelihood model in python. I set up both the likelihood function and the analytic jacobian. When I run scipy minimize, I get a bizarre error (displayed below). This error doesn't seem to occur when I omit the jacobian, but I can't figure out why.
from numpy import log,sum,var
from numba import njit
#njit
def log_likelihood(params,surg_fx,surg_fx_ses):
mu_var = params[0]
exp_var = mu_var + surg_fx_ses**2
log_lik = -((surg_fx)**2 / (2*exp_var)) - .5*log(exp_var)
neg_sum_log_lik = -sum(log_lik)
print(mu_var)
print(neg_sum_log_lik)
if np.isnan(neg_sum_log_lik):
return 1e20
else:
return neg_sum_log_lik
#njit
def log_lik_jac(params,surg_fx,surg_fx_ses):
mu_var = params[0]
exp_var = mu_var + surg_fx_ses**2
jc = -sum(((surg_fx)**2 / (2*(exp_var**2))) - (.5/exp_var))
print(mu_var)
print(jc)
return jc
x0 = [np.var(cost_params3)]
shrinkage_est = minimize(log_likelihood,x0,args=(cost_params3,cost_SEs3),jac=log_lik_jac,options={'disp':True},method='BFGS')
cost_params3 and cost_SEs3 are (205,)-shaped numpy arrays.
And the return is:
0.10423462356390442
-580.1534424527905
0.10423462356390442
-67.02947836460727
[ 1.11423462]
26.84532144252225
[ 1.11423462]
77.95606471086792
[ 0.3741784]
-54.28224588483895
[ 0.3741784]
150.90730570822998
[ 0.19152581]
-79.19268133113846
[ 0.19152581]
68.81484893304786
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/local/anaconda3/lib/python3.5/site-packages/scipy/optimize/_minimize.py", line 444, in minimize
return _minimize_bfgs(fun, x0, args, jac, callback, **options)
File "/usr/local/anaconda3/lib/python3.5/site-packages/scipy/optimize/optimize.py", line 973, in _minimize_bfgs
A1 = I - sk[:, numpy.newaxis] * yk[numpy.newaxis, :] * rhok
TypeError: 'float' object is not subscriptable
I'm not really sure why this runs for a few iterations and just fails, especially given that nothing is being subscripted here. I'm also not sure why it seems to become a list after the first iteration? I tried running it without numba but it stopped at the same place with a different error:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/local/anaconda3/lib/python3.5/site-packages/scipy/optimize/_minimize.py", line 444, in minimize
return _minimize_bfgs(fun, x0, args, jac, callback, **options)
File "/usr/local/anaconda3/lib/python3.5/site-packages/scipy/optimize/optimize.py", line 973, in _minimize_bfgs
A1 = I - sk[:, numpy.newaxis] * yk[numpy.newaxis, :] * rhok
IndexError: invalid index to scalar variable.
Any help would be much appreciated!
I am using SciPy to minimize the variance:
port_returns=[]
port_variance=[]
for p in range(4000):
weights = np.random.random(5)
weights /=np.sum(weights)
port_returns.append(np.sum(returns.mean()*245*weights))
port_variance.append(np.sqrt(np.dot(weights.T, np.dot(returns.cov()*245, weights))))
def min_variance(weights):
return np.array(port_variance)
cons = {'type':'eq', 'fun':lambda x: np.sum(x)-1}
bnds = tuple((0,1) for x in range(245))
optv = sco.minimize(min_variance, 245*[1.0/245,], method='SLSQP',
bounds=bnds, constraints=cons)
I tried to run this function but got the error below:
File "D:\Python27\lib\site-packages\scipy\optimize\_minimize.py", line 458, in minimize
constraints, callback=callback, **options)
File "D:\Python27\lib\site-packages\scipy\optimize\slsqp.py", line 370, in _minimize_slsqp
raise ValueError("Objective function must return a scalar")
ValueError: Objective function must return a scalar
How can I return a scalar value?
upgrade scipy to v1.0. It was a bug in 0.19
import math
from scipy.optimize import fsolve
def sigma(s, Bpu):
return s - math.sin(s) - math.pi * Bpu
def jac_sigma(s):
return 1 - math.cos(s)
if __name__ == '__main__':
Bpu = 0.5
sig_r = fsolve(sigma, x0=[math.pi], args=(Bpu), fprime=jac_sigma)
Running the above script throws the following error,
Traceback (most recent call last):
File "C:\Users\RP12808\Desktop\_test_fsolve.py", line 12, in <module>
sig_r = fsolve(sigma, x0=[math.pi], args=(Bpu), fprime=jac_sigma)
File "C:\Users\RP12808\AppData\Local\Programs\Python\Python36\lib\site-packages\scipy\optimize\minpack.py", line 146, in fsolve
res = _root_hybr(func, x0, args, jac=fprime, **options)
File "C:\Users\RP12808\AppData\Local\Programs\Python\Python36\lib\site-packages\scipy\optimize\minpack.py", line 226, in _root_hybr
_check_func('fsolve', 'fprime', Dfun, x0, args, n, (n, n))
File "C:\Users\RP12808\AppData\Local\Programs\Python\Python36\lib\site-packages\scipy\optimize\minpack.py", line 26, in _check_func
res = atleast_1d(thefunc(*((x0[:numinputs],) + args)))
TypeError: jac_sigma() takes 1 positional argument but 2 were given
I am unsure how to pass jacobian to fsolve function... how do solve this?
Thanks in advance..RP
The function that computes the Jacobian matrix must take the same arguments as the function to be solved, and it must return an array:
def jac_sigma(s, Bpu):
return np.array([1 - math.cos(s)])
In general, the Jacobian matrix is a two-dimensional array, but
when the variable is a scalar (as it is here) and the Jacobian "matrix" is 1x1, the code accepts a one- or two-dimensional value. (It might be nice if it also accepted a scalar in this case, but it doesn't.)
Actually, it is sufficient that the return value be "array-like"; e.g. a list is also acceptable:
def jac_sigma(s, Bpu):
return [1 - math.cos(s)]
I have the code:
import numpy as np
import scipy.optimize
Basic variables:
eee=0.289;
nn=0.63;
E1k=0.0935;
pp=1.25;
B1k=0.12;
v1k=0.126;
Bkk=3.14;
VKb=0.76;
rKb=1.754;
Motion model, these equations are numerically integrated over:
def D2(y,t):
Vr,Vn,r,v,Q,dQ_dt=y
dVr_dt=(aob2*np.sin(Q))/(1-(aob2*t/wb))-1/(r**2)+(Vn**2)/r
dVn_dt=(aob2*np.cos(Q))/(1-((aob2*t)/wb))-(Vr*Vn)/r
dr_dt=Vr
dB_dt=Vn/r
Qt=0
dQ_dtt=0
return [dVr_dt,dVn_dt,dr_dt,dB_dt,Qt,dQ_dtt]
Function, the roots of which we seek:
#-----------------------------------------------------------------
def VrVnRB(x):
v2oA=2*np.arctan(np.sqrt((1+eee)/(1-eee))*np.tan(x[0]/2))
if v2oA>v1k:
v2o=v2oA
else:
v2o=v2oA+2*np.pi
t2=np.linspace(0,x[3],500)
tpas=1/nn*(x[0]-E1k+eee*(np.sin(E1k)-np.sin(x[0])))
Vro2=eee*np.sin(v2o)/np.sqrt(pp)
Vno2=(1+eee*np.cos(v2o))/np.sqrt(pp)
ro2=pp/(1+eee*np.cos(v2o))
Bo2=B1k+v2o-v1k
yo2=[Vro2,Vno2,ro2,Bo2,x[1],x[2]]
EE2=odeint(D2,yo2,t2)
Vrk=EE2[:,0]
Vrk=Vrk[-1]
Vnk=EE2[:,1]
Vnk=Vnk[-1]
rk =EE2[:,2]
rk= rk[-1]
Bk =EE2[:,3]
Bk= Bk[-1]
return [Vrk,Vnk-VKb,rk-rKb,Bk-Bkk]
x0=[E20,Qo2,dQ_dt2,t2b]
#-----------------------------------------------------------------
t1k=scipy.optimize.fsolve(VrVnRB,x0)
But interpreter resents:
File "<tmp 2>", line 36, in <module>
t1k=scipy.optimize.fsolve(VrVnRB,x0)
File "C:\pyzo2013c\lib\site-packages\scipy\optimize\minpack.py", line 139, in fsolve
res = _root_hybr(func, x0, args, jac=fprime, **options)
File "C:\pyzo2013c\lib\site-packages\scipy\optimize\minpack.py", line 208, in _root_hybr
ml, mu, epsfcn, factor, diag)
TypeError: Cannot cast array data from dtype('O') to dtype('float64') according to the rule 'safe'
I do not understand what my mistake. Please, help.