Scipy minimize erroring for large array of variables - python

Hi I am trying to use scipy for optimization. The minimize function with method as 'COBYLA' is working fine for small array size but errors out for larger sized arrays. I tried with 'COBYLA' and 'SLSQP' methods since I have a constrained optimization problem for non-linear functions.
Code snippet:
import scipy as sp
import random
def mytest7obj(x, x_d, y_d, z_d, power):
for x_i in x:
if x_i < 0:
return 0.
sum = 0.
for i in range(x_d):
for j in range(z_d):
term = 1.
for k in range(y_d):
term *= (x[i*(y_d*z_d)+j*(y_d)+k] ** power[k])
sum += term
return 0. - sum
def mytest7():
x_d = 30
y_d = 10
z_d = 100
goal = 1000000.
constraints = []
power = []
for i in range(y_d):
power.append(random.uniform(0.,0.3))
constraints.append({'type':'ineq', 'fun': lambda x: goal - sum(x)})
print 'power: %s\n' % (power,)
result = sp.optimize.minimize(fun = mytest7obj, x0 = [30.] * (x_d*y_d*z_d), method = 'COBYLA', args = (x_d, y_d, z_d, power), jac=False, constraints=constraints, options={'disp':True, 'rhobeg':3., 'maxiter': 10000})
print 'goal attained: %s'% (sum(result.x),)
if __name__ == “__main__”:
mytest7()
The traceback of the error with method 'COBYLA' is:
Traceback (most recent call last):
File "opt_test.py", line 584, in <module>
print 'mytest7'; mytest7()
File "opt_test.py", line 571, in mytest7
result = sp.optimize.minimize(fun = mytest7obj, x0 = [30.] * (x_d*y_d*z_d), method = 'COBYLA', args = (x_d, y_d, z_d, power), jac=False, constraints=constraints, options={'disp':True, 'rhobeg':3., 'maxiter': 10000})
File "/usr/lib64/python2.7/site-packages/scipy/optimize/_minimize.py", line 385, in minimize
return _minimize_cobyla(fun, x0, args, constraints, **options)
File "/usr/lib64/python2.7/site-packages/scipy/optimize/cobyla.py", line 238, in _minimize_cobyla
dinfo=info)
ValueError: failed to create intent(cache|hide)|optional array-- must have defined dimensions but got (-1594577286,)
With 'SLSQP', the error is:
File "opt_test.py", line 586, in <module>
print 'test'; test()
File "opt_test.py", line 454, in test
x = get_optimal(base, budget, initial_values, x_elas, y_elas, x_history, y_history, constraint_coeffs, opt_method = 'SLSQP')
File "opt_test.py", line 355, in get_optimal
constraints=constraints, options=opts)
File "/usr/lib64/python2.7/site-packages/scipy/optimize/_minimize.py", line 388, in minimize
constraints, **options)
File "/usr/lib64/python2.7/site-packages/scipy/optimize/slsqp.py", line 316, in _minimize_slsqp
w = zeros(len_w)
MemoryError
I am using python 2.7.5,
scipy version: 0.14.0rc1,
numpy version: 1.8.1

Related

Matrix and vector shape in TVP-VAR in the statespace mlemodels

Thanks to everyone in advance for their time!
I am trying to run a TVP-VAR (time varying papramenter) for a panel in the statespace mlemodels in statsmodel. I get an error while trying to fit the model. My understanding is that mostly is regarding the dimensions for state covariance matrix. I suspect will get the same error later when I will deal with column shape.
The error is :
ValueError: Invalid dimensions for state covariance matrix matrix: requires 702 rows, got 3
How could I solve that ? The type error is showing is as below, highlighted in bold both the error and the Traceback :
preliminary = tvppanelvarmodel.fit(maxiter=1000)
Traceback (most recent call last):
File "/var/folders/m6/68zljfsj2t9_dzgpwwslj29r0000gp/T/ipykernel_6232/3038987883.py", line 1, in <module>
preliminary = tvppanelvarmodel.fit(maxiter=1000)
File "/opt/anaconda3/envs/spyder-env/lib/python3.10/site-packages/statsmodels/tsa/statespace/mlemodel.py", line 704, in fit
mlefit = super(MLEModel, self).fit(start_params, method=method,
File "/opt/anaconda3/envs/spyder-env/lib/python3.10/site-packages/statsmodels/base/model.py", line 563, in fit
xopt, retvals, optim_settings = optimizer._fit(f, score, start_params,
File "/opt/anaconda3/envs/spyder-env/lib/python3.10/site-packages/statsmodels/base/optimizer.py", line 241, in _fit
xopt, retvals = func(objective, gradient, start_params, fargs, kwargs,
File "/opt/anaconda3/envs/spyder-env/lib/python3.10/site-packages/statsmodels/base/optimizer.py", line 651, in _fit_lbfgs
retvals = optimize.fmin_l_bfgs_b(func, start_params, maxiter=maxiter,
File "/opt/anaconda3/envs/spyder-env/lib/python3.10/site-packages/scipy/optimize/lbfgsb.py", line 197, in fmin_l_bfgs_b
res = _minimize_lbfgsb(fun, x0, args=args, jac=jac, bounds=bounds,
File "/opt/anaconda3/envs/spyder-env/lib/python3.10/site-packages/scipy/optimize/lbfgsb.py", line 306, in _minimize_lbfgsb
sf = _prepare_scalar_function(fun, x0, jac=jac, args=args, epsilon=eps,
File "/opt/anaconda3/envs/spyder-env/lib/python3.10/site-packages/scipy/optimize/optimize.py", line 261, in _prepare_scalar_function
sf = ScalarFunction(fun, x0, args, grad, hess,
File "/opt/anaconda3/envs/spyder-env/lib/python3.10/site-packages/scipy/optimize/_differentiable_functions.py", line 140, in __init__
self._update_fun()
File "/opt/anaconda3/envs/spyder-env/lib/python3.10/site-packages/scipy/optimize/_differentiable_functions.py", line 233, in _update_fun
self._update_fun_impl()
File "/opt/anaconda3/envs/spyder-env/lib/python3.10/site-packages/scipy/optimize/_differentiable_functions.py", line 137, in update_fun
self.f = fun_wrapped(self.x)
File "/opt/anaconda3/envs/spyder-env/lib/python3.10/site-packages/scipy/optimize/_differentiable_functions.py", line 134, in fun_wrapped
return fun(np.copy(x), *args)
File "/opt/anaconda3/envs/spyder-env/lib/python3.10/site-packages/statsmodels/base/model.py", line 531, in f
return -self.loglike(params, *args) / nobs
File "/opt/anaconda3/envs/spyder-env/lib/python3.10/site-packages/statsmodels/tsa/statespace/mlemodel.py", line 933, in loglike
self.update(params, transformed=True, includes_fixed=True,
File "/var/folders/m6/68zljfsj2t9_dzgpwwslj29r0000gp/T/ipykernel_6232/3786466608.py", line 104, in update
self['state_cov'] = np.diag([params[2]**2, params[3]**2, params[4]**2]) # W
File "/opt/anaconda3/envs/spyder-env/lib/python3.10/site-packages/statsmodels/tsa/statespace/mlemodel.py", line 239, in __setitem__
return self.ssm.__setitem__(key, value)
File "/opt/anaconda3/envs/spyder-env/lib/python3.10/site-packages/statsmodels/tsa/statespace/representation.py", line 420, in __setitem__
setattr(self, key, value)
File "/opt/anaconda3/envs/spyder-env/lib/python3.10/site-packages/statsmodels/tsa/statespace/representation.py", line 54, in __set__
value = self._set_matrix(obj, value, shape)
File "/opt/anaconda3/envs/spyder-env/lib/python3.10/site-packages/statsmodels/tsa/statespace/representation.py", line 68, in _set_matrix
validate_matrix_shape(
File "/opt/anaconda3/envs/spyder-env/lib/python3.10/site-packages/statsmodels/tsa/statespace/tools.py", line 1474, in validate_matrix_shape
raise ValueError('Invalid dimensions for %s matrix: requires %d'
ValueError: Invalid dimensions for state covariance matrix matrix: requires 702 rows, got 3
When I defined the class initially, I did the following
class TVPVAR(sm.tsa.statespace.MLEModel):
def __init__(self, y):
# Create a matrix with [y_t' : y_{t-1}'] for t = 2, ..., T
augmented = sm.tsa.lagmat(y, 1, trim='both', original='in', use_pandas=True)
# Separate into y_t and z_t = [1 : y_{t-1}']
p = y.shape[1]
y_t = augmented.iloc[:, :p]
z_t = sm.add_constant(augmented.iloc[:, p:])
nobs = y.shape[0]
T=y.shape[0]
# Recall that the length of the state vector is p * (p + 1)
k_states = p * (p + 1)
super(TVPVAR,self).__init__(y_t, exog=None, k_states=k_states,k_posdef=k_states)
self.k_y = p
self.k_states = p * (p + 1)
self.nobs = T
self['design'] = np.zeros((self.k_y, self.k_states, 1))
self['transition'] = np.eye(k_states) # G
self['selection'] = np.eye(k_states) # R=1
def update_variances(self, obs_cov, state_cov_diag):
self['obs_cov'] = obs_cov
self['state_cov'] = np.diag(state_cov_diag) # W
init = initialization.Initialization(self.k_states)
init.set((0, 2), 'diffuse')
init.set((2, 4), 'stationary')
self.ssm.initialize(init)
OTHER CODE
def update(self, params, **kwargs):
params = super().update(params, **kwargs)
self['transition', 2,2] = params[0]
self['transition', 3,2] = params[1]
self['state_cov'] = np.diag([params[2]**2, params[3]**2, params[4]**2]) # W
How can I define the dimensions for state covariance matrix and the vector shape? Thanks your inputs.

Python vestigial parameter and uncallable function

import numpy as np
from scipy.optimize import fsolve
from scipy.integrate import quad
import matplotlib.pyplot as plt
Rgas = 8.31446261815324 #Pa*m**3/mol*K
def Peng_Robinson_EOS(P,V,T,Tc,Pc,ω):
a = (1+(0.37464+1.54226*ω-0.26992*ω**2)*(1-(T/Tc)**(1/2)))**2*Rgas**2*Tc**2/Pc #Pa*m**3
b = 0.07780 * Rgas*Tc/Pc
return P + a/((V+(1-np.sqrt(2))*b)*(V+(1+np.sqrt(2)))) - Rgas*T/(V-b)
def PR_Psat(T,Tc,Pc,ω,V,Pguess = 100000):
def integral_diff (Pguess,T,Tc,Pc,ω,V):
def Psat_integrand (V,Pguess,T,Tc,Pc,ω):
integrand1 = fsolve(Peng_Robinson_EOS(Pguess,V,T,Tc,Pc,ω),Pguess)
integrand2 = Pguess
integrand = integrand1-integrand2
return integrand
Vl = fsolve(Psat_integrand(V,Pguess,T,Tc,Pc,ω),0)
Vv_guess = Rgas*T/Pguess
Vv = fsolve(Psat_integrand(V,Pguess,T,Tc,Pc,ω),Vv_guess)
Vinf_guess = (Vl + Vv)/2
Vinf = fsolve(Psat_integrand(V,Pguess,T,Tc,Pc,ω),Vinf_guess)
left = quad(Psat_integrand(V,Pguess,T,Tc,Pc,ω),Vl,Vinf)[0]
right = quad(Psat_integrand(V,Pguess,T,Tc,Pc,ω),Vinf,Vv)[0]
diff = left + right
return diff
Psat = fsolve(integral_diff(Pguess,T,Tc,Pc,ω,V),Pguess)
return Psat
There are two issues with this code.
1: in theory, PR_Psat should not depend on V, since all values of V used in calculation are found via fsolve. However, because Peng_Robinson_EOS depends on V, it Python won't let it be ignored in enclosing functions. Is there a way to eliminate the need to "specify" V?
from an earlier version (before V was a parameter of all functions), to demonstrate:
runfile('...', wdir='...')
Traceback (most recent call last):
File "<ipython-input-4-0875bc6411e8>", line 1, in <module>
runfile('...', wdir='...')
File "...\lib\site-packages\spyder\utils\site\sitecustomize.py", line 705, in runfile
execfile(filename, namespace)
File "...\lib\site-packages\spyder\utils\site\sitecustomize.py", line 102, in execfile
exec(compile(f.read(), filename, 'exec'), namespace)
File "...", line 40, in <module>
print(PR_Psat(300,647.1,22055000,0.345))
File "...", line 37, in PR_Psat
Psat = fsolve(integral_diff(Pguess,T,Tc,Pc,ω),Pguess)
File "...", line 25, in integral_diff
Vl = fsolve(Psat_integrand(V,Pguess,T,Tc,Pc,ω),0)
NameError: name 'V' is not defined
2: It seems that Peng_Robinson is not being treated as a callable function, but rather as a float. I'm not sure what is causing this.
runfile('...', wdir='...')
Traceback (most recent call last):
File "<ipython-input-13-0875bc6411e8>", line 1, in <module>
runfile('...', wdir='...')
File "C:\Users\Spencer\Anaconda3\lib\site-packages\spyder\utils\site\sitecustomize.py", line 705, in runfile
execfile(filename, namespace)
File "...\lib\site-packages\spyder\utils\site\sitecustomize.py", line 102, in execfile
exec(compile(f.read(), filename, 'exec'), namespace)
File "...", line 42, in <module>
print(PR_Psat(300,647.1,22055000,0.345,1))
File "...", line 39, in PR_Psat
Psat = fsolve(integral_diff(Pguess,T,Tc,Pc,ω,V),Pguess)
File "...", line 27, in integral_diff
Vl = fsolve(Psat_integrand(V,Pguess,T,Tc,Pc,ω),0)
File "...", line 23, in Psat_integrand
integrand1 = fsolve(Peng_Robinson_EOS(Pguess,V,T,Tc,Pc,ω),Pguess)
File "...\lib\site-packages\scipy\optimize\minpack.py", line 148, in fsolve
res = _root_hybr(func, x0, args, jac=fprime, **options)
File "...\lib\site-packages\scipy\optimize\minpack.py", line 214, in _root_hybr
shape, dtype = _check_func('fsolve', 'func', func, x0, args, n, (n,))
File "...\lib\site-packages\scipy\optimize\minpack.py", line 27, in _check_func
res = atleast_1d(thefunc(*((x0[:numinputs],) + args)))
TypeError: 'numpy.float64' object is not callable
In theory, Peng_Robinson_EOS should, if plotted as P(V) with constant T, produce a cubic regression. The goal of PR_Psat is to find the value of P for which the integrals between P and the cubic regression cancel. (Hence, integral_diff being plugged into fsolve)
To summarize the questions,
1) Is there a way to eliminate the need for V in PR_Psat?
2) Why is Peng_Robinson_EOS being flagged as an un-callable numpy.float64 object?
The problem was a syntax error. Arguments for fsolve and quad were placed wrong. To fix both problems, I moved the args to the back. For example,
incorrect:
Vv = fsolve(Psat_integrand(V,Pguess,T,Tc,Pc,ω),Vv_guess)
correct:
Vv = fsolve(Psat_integrand,Vv_guess,args=(Pguess,T,Tc,Pc,ω))
The reason Peng_Robinson_EOS is being called a numpy.float64 is because with the syntax of the code in the question the program is evaluated before being passed to the solver.
Also, with proper syntax, V is no longer an issue.
def PR_Psat(T,Tc,Pc,ω,Pguess = 1000):
def integral_diff (Pguess,T,Tc,Pc,ω):
def Psat_integrand (V,Pguess,T,Tc,Pc,ω):
integrand1 = fsolve(Peng_Robinson_EOS,Pguess,args=(V,T,Tc,Pc,ω))
integrand2 = Pguess
integrand = integrand1-integrand2
return integrand
Vl = fsolve(Psat_integrand,0,args=(Pguess,T,Tc,Pc,ω))
Vv_guess = Rgas*T/Pguess
Vv = fsolve(Psat_integrand,Vv_guess,args=(Pguess,T,Tc,Pc,ω))
Vinf_guess = (Vl + Vv)/2
Vinf = fsolve(Psat_integrand,Vinf_guess,args=(Pguess,T,Tc,Pc,ω))
left = quad(Psat_integrand,Vl,Vinf,args=(Pguess,T,Tc,Pc,ω))[0]
right = quad(Psat_integrand,Vinf,Vv,args=(Pguess,T,Tc,Pc,ω))[0]
diff = left + right
return diff
Psat = fsolve(integral_diff,Pguess,args=(T,Tc,Pc,ω))
return Psat

Bayesian analysis of chemical network

I am trying to do a simple reaction network and see the sensitivity of reaction rates k1 and k2. So my reaction goes A --> B --> C with k1 and k2, respectively.
import numpy as np
import pymc3 as pm
from matplotlib.pyplot import figure, scatter, legend, plot
from scipy.integrate import solve_ivp
from sys import exit
Nt = 11
time = 10
tt = np.linspace(0,time,Nt+1)
y0 = [1,0,0]
k1_0, k2_0 = 1, 0.5
def equat(t,c):
da_dt = -k1_0*c[0]
db_dt = k1_0*c[0] - k2_0*c[1]
dc_dt = k2_0*c[1]
return da_dt, db_dt, dc_dt
c_est = solve_ivp(equat, t_span = [0,time], t_eval = tt, y0 = y0)
niter = 10
with pm.Model() as reak_model:
k1 = pm.Normal('k1', mu = 0, sd = 100)
k2 = pm.Normal('k2', mu=0, sd=100, shape = 1)
sigma = pm.HalfNormal('sigma', sd=1)
def equat_2(t,c):
da_dt = -k1*c[0]
db_dt = k1*c[0] - k1*c[1]
dc_dt = k1*c[1]
return da_dt, db_dt, dc_dt
c = solve_ivp(equat_2, t_span = [0,time], t_eval = tt, y0 = y0)
likelihood = pm.Normal('y', mu=c.y, sd=sigma, observed=c_est.y)
trace = pm.sample(niter, chains = 1)
pm.traceplot(trace, varnames=['k1','k2'])
ValueError: setting an array element with a sequence.
Im getting this error. Since I am new to Bayesian, I am wondering if its something with the distributions?
Traceback (most recent call last):
File "<ipython-input-95-26ff8f25faea>", line 1, in <module>
runfile('/Daft regresion.py', wdir='/Final code')
File "/python3.7/site-packages/spyder_kernels/customize/spydercustomize.py", line 786, in runfile
execfile(filename, namespace)
File "python3.7/site-packages/spyder_kernels/customize/spydercustomize.py", line 110, in execfile
exec(compile(f.read(), filename, 'exec'), namespace)
File "Daft regression.py", line 53, in <module>
c = solve_ivp(equat_2, t_span = [0,time], t_eval = tt, y0 = y0)
File "/python3.7/site-packages/scipy/integrate/_ivp/ivp.py", line 454, in solve_ivp
solver = method(fun, t0, y0, tf, vectorized=vectorized, **options)
File "/python3.7/site-packages/scipy/integrate/_ivp/rk.py", line 99, in __init__
self.f = self.fun(self.t, self.y)
File "/python3.7/site-packages/scipy/integrate/_ivp/base.py", line 139, in fun
return self.fun_single(t, y)
File "/python3.7/site-packages/scipy/integrate/_ivp/base.py", line 21, in fun_wrapped
return np.asarray(fun(t, y), dtype=dtype)
File "/python3.7/site-packages/numpy/core/numeric.py", line 501, in asarray
return array(a, dtype, copy=False, order=order)
ValueError: setting an array element with a sequence.
Edit: Apparently I need to implement Theano package instead of solve_ivp. Still help appreciated.

Cuda out of resources error when running python numbapro

I am trying to run a cuda kernel in numbapro python, but I keep getting an out of resources error.
I then tried to execute the kernel into a loop and send smaller arrays, but that still gave me the same error.
Here is my error message:
Traceback (most recent call last):
File "./predict.py", line 418, in <module>
predict[griddim, blockdim, stream](callResult_d, catCount, numWords, counts_d, indptr_d, indices_d, probtcArray_d, priorC_d)
File "/home/mhagen/Developer/anaconda/lib/python2.7/site-packages/numba/cuda/compiler.py", line 228, in __call__
sharedmem=self.sharedmem)
File "/home/mhagen/Developer/anaconda/lib/python2.7/site-packages/numba/cuda/compiler.py", line 268, in _kernel_call
cu_func(*args)
File "/home/mhagen/Developer/anaconda/lib/python2.7/site-packages/numba/cuda/cudadrv/driver.py", line 1044, in __call__
self.sharedmem, streamhandle, args)
File "/home/mhagen/Developer/anaconda/lib/python2.7/site-packages/numba/cuda/cudadrv/driver.py", line 1088, in launch_kernel
None)
File "/home/mhagen/Developer/anaconda/lib/python2.7/site-packages/numba/cuda/cudadrv/driver.py", line 215, in safe_cuda_api_call
self._check_error(fname, retcode)
File "/home/mhagen/Developer/anaconda/lib/python2.7/site-packages/numba/cuda/cudadrv/driver.py", line 245, in _check_error
raise CudaAPIError(retcode, msg)
numba.cuda.cudadrv.driver.CudaAPIError: Call to cuLaunchKernel results in CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES
Here is my source code:
from numbapro.cudalib import cusparse
from numba import *
from numbapro import cuda
#cuda.jit(argtypes=(double[:], int64, int64, double[:], int64[:], int64[:], double[:,:], double[:] ))
def predict( callResult, catCount, wordCount, counts, indptr, indices, probtcArray, priorC ):
i = cuda.threadIdx.x + cuda.blockIdx.x * cuda.blockDim.x
correct = 0
wrong = 0
lastDocIndex = -1
maxProb = -1e6
picked = -1
for cat in range(catCount):
probSum = 0.0
for j in range(indptr[i],indptr[i+1]):
wordIndex = indices[j]
probSum += (counts[j]*math.log(probtcArray[cat,wordIndex]))
probSum += math.log(priorC[cat])
if probSum > maxProb:
maxProb = probSum
picked = cat
callResult[i] = picked
predictions = []
counter = 1000
for i in range(int(math.ceil(numDocs/(counter*1.0)))):
docTestSliceList = docTestList[i*counter:(i+1)*counter]
numDocsSlice = len(docTestSliceList)
docTestArray = np.zeros((numDocsSlice,numWords))
for j,doc in enumerate(docTestSliceList):
for ind in doc:
docTestArray[j,ind['term']] = ind['count']
docTestArraySparse = cusparse.ss.csr_matrix(docTestArray)
start = time.time()
OPT_N = numDocsSlice
blockdim = 1024, 1
griddim = int(math.ceil(float(OPT_N)/blockdim[0])), 1
catCount = len(music_categories)
callResult = np.zeros(numDocsSlice)
stream = cuda.stream()
with stream.auto_synchronize():
probtcArray_d = cuda.to_device(numpy.asarray(probtcArray),stream)
priorC_d = cuda.to_device(numpy.asarray(priorC),stream)
callResult_d = cuda.to_device(callResult, stream)
counts_d = cuda.to_device(docTestArraySparse.data, stream)
indptr_d = cuda.to_device(docTestArraySparse.indptr, stream)
indices_d = cuda.to_device(docTestArraySparse.indices, stream)
predict[griddim, blockdim, stream](callResult_d, catCount, numWords, counts_d, indptr_d, indices_d, probtcArray_d, priorC_d)
callResult_d.to_host(stream)
#stream.synchronize()
predictions += list(callResult)
print "prediction %d: %f" % (i,time.time()-start)
I found out this was in the cuda procedure.
When you call predict the blockdim is set to 1024.
predict[griddim, blockdim, stream](callResult_d, catCount, numWords, counts_d, indptr_d, indices_d, probtcArray_d, priorC_d)
But the procedure is called iteratively with slice sizes of 1000 elements not 1024.
So, in the procedure it will attempt to write 24 elements that are out of bounds in the return array.
Sending a number of elements parameter (n_el) and placing an error checking call in the cuda procedure solves it.
#cuda.jit(argtypes=(double[:], int64, int64, int64, double[:], int64[:], int64[:], double[:,:], double[:] ))
def predict( callResult, n_el, catCount, wordCount, counts, indptr, indices, probtcArray, priorC ):
i = cuda.threadIdx.x + cuda.blockIdx.x * cuda.blockDim.x
if i < n_el:
....

How to implement div and grad in sympy?

Playing around a lot with sympy lately, I came up with the problem of calculating divergence and gradient for scalar and vector fields. What I want to do for now is solving the heat equation, i.e.
d/dt u(x,t) - a * lap(u(x,t)) = 0, with lap(x) = (div(grad(x))
on a scalar field. Since I could not find lap, div and grad in sympy.physics.mechanics, I tried to implement them by myself
from sympy import *
from sympy.physics.mechanics import *
o = ReferenceFrame('o')
x,y,z = symbols('x y z')
class div(Function):
#classmethod
def eval(cls, F):
return F.diff(x, o).dot(o.x)+F.diff(y, o).dot(o.y)+F.diff(z, o).dot(o.z)
class grad(Function):
#classmethod
def eval(cls, F):
return o.x * F.diff(x) + o.y * F.diff(y) + o.z * F.diff(z)
f = x**2 + y**3 + z*0.5
print grad(f)
print type(grad(f))
print div(grad(f))
unluckily, this gives
2*x*o.x + 3*y**2*o.y + 0.500000000000000*o.z
Traceback (most recent call last):
File "/home/fortmeier/Desktop/autokernel/autokernel/tools/GenerateCode.py", line 24, in <module>
print div(grad(f))
File "/usr/local/lib/python2.7/dist-packages/sympy/core/cache.py", line 93, in wrapper
r = func(*args, **kw_args)
File "/usr/local/lib/python2.7/dist-packages/sympy/core/function.py", line 368, in __new__
result = super(Function, cls).__new__(cls, *args, **options)
File "/usr/local/lib/python2.7/dist-packages/sympy/core/cache.py", line 93, in wrapper
r = func(*args, **kw_args)
File "/usr/local/lib/python2.7/dist-packages/sympy/core/function.py", line 188, in __new__
args = list(map(sympify, args))
File "/usr/local/lib/python2.7/dist-packages/sympy/core/sympify.py", line 313, in sympify
expr = parse_expr(a, local_dict=locals, transformations=transformations, evaluate=evaluate)
File "/usr/local/lib/python2.7/dist-packages/sympy/parsing/sympy_parser.py", line 757, in parse_expr
return eval_expr(code, local_dict, global_dict)
File "/usr/local/lib/python2.7/dist-packages/sympy/parsing/sympy_parser.py", line 691, in eval_expr
code, global_dict, local_dict) # take local objects in preference
File "<string>", line 1, in <module>
AttributeError: 'Symbol' object has no attribute 'x'
[Finished in 0.3s with exit code 1]
I know that I could do something with the galgebra module, but first I'd like to understand more whats going on here. The question thus is what am I missing?
This looks like a bug in SymPy. sympify(ReferenceFrame('o').x) does not work.

Categories