'numpy.float64' object is not callable - python

For some reason that I can't wrap my head around, it says TypeError: 'numpy.float64' object is not callable for the following code:
import pylab as pl
import scipy as sp
import numpy as num
import scipy.integrate as spi
import matplotlib.pyplot as mat
import scipy.optimize as spo
from itertools import cycle
from matplotlib.font_manager import FontProperties
rs=.14
ra=0.0027
Mz=91.
ja=0.81
js=-.033
Gz=2.5
k=10**6
def sig_a(s,Gz):
return (sp.pi)*((s*ra+(s-Mz**2)*ja)/((s-Mz**2)**2+Mz**2*Gz**2))
def sig_s(s,Gz):
return (4*sp.pi/3)*(1/s+(s*rs+(s-Mz**2)*js)/((s-Mz**2)**2+Mz**2*Gz**2))
cos_theta=num.arange(-0.95,0.95,0.05)
E=num.arange(20,140,.1)
s=E**2
def f_theta(x,s):
ans=k*(sig_s(s,Gz)*(1+(x)**2)+sig_a(s,Gz)*x)
return and
d=num.arange(0.05,1.80,0.25)
x1=[]
for t in cos_theta:
m=((t+t+0.05)/2)
x1.append(m)
x01=num.array(x1)
def N_mu1(x0,sig_a,sig_s): #<-------d=0.05
n=(k*(sig_s*((x0**2.)*0.05+(0.05/4.)+(0.05**3.)/(12.))+sig_a(2.*x0*0.05)))
return n
idealN=[]
randomN=[]
est_sig_a=[]
est_sig_s=[]
ratio_error=[]
for i in s:
for j in cos_theta:
n=(spi.quad(f_theta,j,j+0.05,args=i))
idealN.append(n[0])
for k in idealN:
r=num.random.poisson(k,1)
randomN.append(r[0])
siga=sig_a(i,Gz)
sigs=sig_s(i,Gz)
R=num.array(randomN)
Error=(R**0.5)
po,po_cov=spo.curve_fit(N_mu1,x01,R,[siga,sigs],Error)
est_sig_a.append(po[0])
est_sig_s.append(po[1])
e=((((po_cov[0])/(po[0]))+((po_cov[1])/(po[1])))*((po[0])/(po[1])))
ratio_error.append(e)
idealN=[]
randomN=[]
And the error show was:
TypeError Traceback (most recent call last)
118 R=num.array(randomN)
119 error=(R**0.5)
--> 120 po,po_cov=spo.curve_fit(N_mu1,x01,R,[siga,sigs],error)
121 est_sig_a.append(po[0])
122 est_sig_s.append(po[1])
TypeError: 'numpy.float64' object is not callable
I am struggling to find the mistake in the code.

The problem is here:
def N_mu1(x0,sig_a,sig_s): #<-------d=0.05
n=(k*(sig_s*((x0**2.)*0.05+(0.05/4.)+(0.05**3.)/(12.))+sig_a(2.*x0*0.05)))
return n # ^ argument is an array not function
sig_a is an argument to your function so it doesn't refer to the sig_a function you defined, and in this function it is being called as if it was.

In f_theta Changing return ans Into return and.
Also sig_a(2.*x0*0.05) should be changed into sig_a*(2.*x0*0.05) in N_mu1.
These changes will make your program executable.

Related

optimization "objective function must return a scalar value"

I'm having a problem with the optimization function, I tried to summarize the problem and to write the minimum amount of problem, but I couldn't do anything better.
hope you can help me the same
import datetime as dt
import pandas_datareader as pdr
import numpy as np
import scipy.optimize as sp
def gettin_data(stock,start,end):
stock_data=pdr.get_data_yahoo(stock,start=start,end=end)
stock_data=stock_data['Close'].dropna(axis=0)
returns= np.diff(np.log(stock_data),axis=0)
meanret=np.mean(returns,axis=0)
cov_mat=np.cov(returns, rowvar=False)
return meanret , cov_mat
def ptf_performance(weight, mean, cov_mat):
returns=np.sum(weight*mean)*252
var=np.sqrt(np.dot(weight.T, np.dot(cov_mat,weight)))*np.sqrt(252)
return returns,var
def negative_sr(mean_ret, cov_mat, weight, risk_free=0 ):
p_ret,p_std=ptf_performance(weight,mean_ret,cov_mat)
return -(p_ret-risk_free)/p_std
def opt(mean_ret,cov_mat,riskfree=0,constraintset=(0,1)):
numasset=len(mean_ret)
args=(mean_ret, cov_mat,riskfree)
bound=constraintset
bounds=tuple(bound for asset in range(numasset))
constraint=({'type':'eq','fun': lambda x :np.sum(x)-1})
return sp.minimize(negative_sr,numasset*[1./numasset],args=args,method='SLSQP',bounds=bounds,constraints=constraint)
stock_list=['AAPL','TSLA','MSFT']
endate=dt.datetime.now()
weight=np.array((0.3,0.3,0.4))
startdate=endate-dt.timedelta(365)
meanret,cov_mat=gettin_data(stock_list, startdate, endate)
ret,std=ptf_performance(weight, meanret, cov_mat)
b=opt(meanret,cov_mat)
print(b)
the error is:
The user-provided objective function must return a scalar value.
The above exception was the direct cause of the following exception:
File "/Users/federicoruggieri/Desktop/phyton/import datetime as dt.py", line 35, in opt
return sp.minimize(negative_sr,numasset*[1./numasset],args=args,method='SLSQP',bounds=bounds,constraints=constraint)
File "/Users/federicoruggieri/Desktop/phyton/import datetime as dt.py", line 43, in <module>
b=opt(meanret,cov_mat)

TypeError: 'module' object is not callable in python and pandas

I imported the module correctly into pandas and called it correctly using import main and then main.main(data, 1,10,2.5) but I am getting an error:
TypeError Traceback (most recent call last)
<ipython-input-52-e9913b227737> in <module>()
----> 1 main.main(data, 1, 10, 2.5)
38 dat_sh = data.shape[0]
39 #Z = random.sample(range(0,U),k_max)
---> 40 Z = cf.centroid_finder(data,sp_atr,k_max)
41
42 prototypes = {i: data[j:j+1].values.tolist()[0] for i,j in enumerate(Z)}
11 for i in range(dat_sh):
12 for j in range(dat_sh):
---> 13 D[i][j] = ed(sub_atr[i],sub_atr[j])
14
15
TypeError: 'module' object is not callable
ed is euclidean:
def ed(X2, X1):
return sqrt(sum(np.subtract(X1,X2)**2))
There may be some confusion regarding the importing of modules vs functions.
It's possible to recreate your error with a simple example, assuming that module/function importing has gotten mixed up somewhere along the way. Consider a case where we pass initial values a and b through a centroid_finder() function, which lives in cf.py:
# import cf.py as cf
import cf
a, b = ([1,2,3], [2,3,4])
cf.centroid_finder(a, b)
But centroid_finder() calls ed(), which lives in ed.py:
## cf.py
# import ed.py as ed
import ed
def centroid_finder(a, b):
print(ed(a, b))
## ed.py
from numpy import sqrt, sum
import numpy as np
def ed(X2, X1):
return sqrt(sum(np.subtract(X1,X2)**2))
Here, calling centroid_finder() will give the error you observed:
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-1-e76ec6a0496c> in <module>()
3 a, b = ([1,2,3], [2,3,4])
4
----> 5 cf.centroid_finder(a, b)
cf.py in centroid_finder(a, b)
2
3 def centroid_finder(a, b):
----> 4 print(ed(a, b))
TypeError: 'module' object is not callable
That's because you imported a module, ed.py as ed...but what you wanted was to call the ed() function that lives inside of ed.py. That's ed.ed()!
Changing centroid_finder() to call ed.ed() produces the desired result:
# cf.py
import ed
def centroid_finder(a, b):
print(ed.ed(a, b))
Now, from the main script:
cf.centroid_finder(a, b)
# 1.73205080757
There's at least one undisclosed import shorthand in your example code, where you call sqrt in ed(). There isn't a natural sqrt() in Python, it most likely is imported from either math or numpy, e.g. from numpy import sqrt. That's not a problem, per se, but given the error you're getting about modules being non-callable, you might benefit from explicitly calling functions in your code from the modules they live in.
For example, using import numpy as np, call np.sqrt() instead of just importing sqrt() directly. This is a defensive programming posture that will prevent similar confusion in the future. (You do this already with np.subtract() in ed(); it's unclear why sqrt() doesn't get the same treatment.)

sympy lambdify with numexpr and sqrt

I'm trying to speed up some numeric code generated by lambdify using numexpr. Unfortunately, the numexpr-based function breaks when using the sqrt function, even though it's one of the supported functions.
This reproduces the issue for me:
import sympy
import numpy as np
import numexpr
from sympy.utilities.lambdify import lambdify
expr = sympy.S('b*sqrt(a) - a**2')
a, b = sorted(expr.free_symbols, key=lambda s: s.name)
func_numpy = lambdify((a,b), expr, modules=[np], dummify=False)
func_numexpr = lambdify((a,b), expr, modules=[numexpr], dummify=False)
foo, bar = np.random.random((2, 4))
print sympy.__version__
print func_numpy(foo, bar)
print func_numexpr(foo, bar)
When I run this, the output is:
0.7.6
[-0.02062061 0.08648306 -0.57868128 0.27598245]
Traceback (most recent call last):
File "sympy_test.py", line 17, in <module>
print func_numexpr(foo, bar)
File "<string>", line 1, in <lambda>
NameError: global name 'sqrt' is not defined
As a sanity check, I also tried calling numexpr directly:
numexpr.evaluate('b*sqrt(a) - a**2', local_dict=dict(a=foo, b=bar))
which works as expected, producing the same result as func_numpy.
EDIT: It works when I use the line:
func_numexpr = lambdify((a,b), expr, modules=['numexpr'], dummify=False)
Is this a sympy bug?
you can change np.sqrt(9) to numexpr.evaluate('9**0.5')

Factorial in numpy and scipy

How can I import factorial function from numpy and scipy separately in order to see which one is faster?
I already imported factorial from python itself by import math. But, it does not work for numpy and scipy.
You can import them like this:
In [7]: import scipy, numpy, math
In [8]: scipy.math.factorial, numpy.math.factorial, math.factorial
Out[8]:
(<function math.factorial>,
<function math.factorial>,
<function math.factorial>)
scipy.math.factorial and numpy.math.factorial seem to simply be aliases/references for/to math.factorial, that is scipy.math.factorial is math.factorial and numpy.math.factorial is math.factorial should both give True.
The answer for Ashwini is great, in pointing out that scipy.math.factorial, numpy.math.factorial, math.factorial are the same functions. However, I'd recommend use the one that Janne mentioned, that scipy.special.factorial is different. The one from scipy can take np.ndarray as an input, while the others can't.
In [12]: import scipy.special
In [13]: temp = np.arange(10) # temp is an np.ndarray
In [14]: math.factorial(temp) # This won't work
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-14-039ec0734458> in <module>()
----> 1 math.factorial(temp)
TypeError: only length-1 arrays can be converted to Python scalars
In [15]: scipy.special.factorial(temp) # This works!
Out[15]:
array([ 1.00000000e+00, 1.00000000e+00, 2.00000000e+00,
6.00000000e+00, 2.40000000e+01, 1.20000000e+02,
7.20000000e+02, 5.04000000e+03, 4.03200000e+04,
3.62880000e+05])
So, if you are doing factorial to a np.ndarray, the one from scipy will be easier to code and faster than doing the for-loops.
SciPy has the function scipy.special.factorial (formerly scipy.misc.factorial)
>>> import math
>>> import scipy.special
>>> math.factorial(6)
720
>>> scipy.special.factorial(6)
array(720.0)
from numpy import prod
def factorial(n):
print prod(range(1,n+1))
or with mul from operator:
from operator import mul
def factorial(n):
print reduce(mul,range(1,n+1))
or completely without help:
def factorial(n):
print reduce((lambda x,y: x*y),range(1,n+1))
after running different aforementioned functions for factorial, by different people, turns out that math.factorial is the fastest to calculate the factorial.
find running times for different functions in the attached image
You can save some homemade factorial functions on a separate module, utils.py, and then import them and compare the performance with the predefinite one, in scipy, numpy and math using timeit.
In this case I used as external method the last proposed by Stefan Gruenwald:
import numpy as np
def factorial(n):
return reduce((lambda x,y: x*y),range(1,n+1))
Main code (I used a framework proposed by JoshAdel in another post, look for how-can-i-get-an-array-of-alternating-values-in-python):
from timeit import Timer
from utils import factorial
import scipy
n = 100
# test the time for the factorial function obtained in different ways:
if __name__ == '__main__':
setupstr="""
import scipy, numpy, math
from utils import factorial
n = 100
"""
method1="""
factorial(n)
"""
method2="""
scipy.math.factorial(n) # same algo as numpy.math.factorial, math.factorial
"""
nl = 1000
t1 = Timer(method1, setupstr).timeit(nl)
t2 = Timer(method2, setupstr).timeit(nl)
print 'method1', t1
print 'method2', t2
print factorial(n)
print scipy.math.factorial(n)
Which provides:
method1 0.0195569992065
method2 0.00638914108276
93326215443944152681699238856266700490715968264381621468592963895217599993229915608941463976156518286253697920827223758251185210916864000000000000000000000000
93326215443944152681699238856266700490715968264381621468592963895217599993229915608941463976156518286253697920827223758251185210916864000000000000000000000000
Process finished with exit code 0

basic python query. NameError in __init__ method

NameError: name 'the_shape' is not defined
I get the following error when I try to import my KMeans module containing the class named KMeansClass. The KMeans.py module has the following structure:
import numpy as np
import scipy
class KMeansClass:
#takes in an npArray like object
def __init__(self,dataset,the_shape=5):
self.dataset=dataset
self.mu = np.empty(shape=the_shape)
and in ipython when I try
import KMeans
I get the NameError: name 'the_shape' is not defined
I am really new to python OOP and don't know why this is happening as all I'm doing is passing arguments to init and assigning those arguments to instance variables.
Any help would be appreciated.
Thanks in advance!
Full Traceback:
NameError Traceback (most recent call last)
<ipython-input-2-44169aae5584> in <module>()
----> 1 import kmeans
/Users/path to file/kmeans.py in <module>()
1 import numpy as np
2 import scipy
----> 3 class KMeansClass:
4 #takes in an npArray like object
5 def __init__(self,dataset,the_shape=5):
/Users/path_to_file/kmeans.py in KMeansClass()
5 def __init__(self,dataset,the_shape=5):
6 self.dataset=dataset
----> 7 self.mu = np.empty(shape=the_shape)
8
9
NameError: name 'the_shape' is not defined

Categories