Python 3: My decorated function runs twice - python

I have some simple fibonacci sequence functions that I am practicing unit testing and builds with Travis-CI/Docker:
fib_recursive.py:
from fib.fib import benchmark, fib_rec_memo
#benchmark
def print_fib(n):
for x in range(0, n):
print(fib_rec_memo(x))
print_fib(100)
here is the fib.fib import source code:
from time import time
from functools import wraps
def benchmark(func):
#wraps(func)
def wrapper(*args, **kwargs):
t = time()
func(*args, **kwargs)
print(func.__name__, 'took:', time() - t)
return func(*args, **kwargs)
return wrapper
def fib_rec_memo(n, hash = {0:1, 1:1}):
if n not in hash:
hash[n] = fib_rec_memo(n-1) + fib_rec_memo(n-2)
return hash[n]
#benchmark
def fib_standard(num):
a, b = 0, 1
c = []
while a < num: # First iteration:
c.append(a) # yield 0 to start with and then
a, b = b, a + b # a will now be 1, and b will also be 1, (0 + 1)
return c
For some reason executing python3 ./fib_recursive.py launches the function twice:
# python3 ./fib_recursive.py
1
1
2
3
5
8
13
21
34
55
print_fib took: 0.00011181831359863281
1
1
2
3
5
8
13
21
34
55
#
Does anyone know why?
Thanks.

You are calling the decorated function twice in the wrapper function:
func(*args, **kwargs) # here ...
print(func.__name__, 'took:', time() - t)
return func(*args, **kwargs) # ... and here again
You can avoid that by storing the result to a variable and returning that stored result after your timing output:
rval = func(*args, **kwargs) # call it once and store result ...
print(func.__name__, 'took:', time() - t)
return rval # ... then return result

Related

Function missing a positional argument

I've got the code below that is giving me the following error: bubble_sort() missing 1 required positional argument: 'a_list'. I am passing a list to the function, so I do not know where the error is coming from. I am learning python right now, so understanding what I am doing wrong and the why is important to me.
import functools
import time
def sort_timer(func):
"""
Timer function that counts how many seconds it takes the decoration function to run, in this case the sort functions
described above. Returns the number of seconds using the time module.
:param func:
:return total_time:
"""
#functools.wraps(func)
def wrapper(*args, **kwargs):
"""
Calculates the total time a function runs.
:return:
"""
total_time = None
start_time = time.perf_counter()
func(*args, **kwargs)
end_time = time.perf_counter()
local_total_time = end_time - start_time
return total_time
return wrapper()
#sort_timer
def bubble_sort(a_list):
"""
Sorts a_list in ascending order
"""
for pass_num in range(len(a_list) - 1):
for index in range(len(a_list) - 1 - pass_num):
if a_list[index] > a_list[index + 1]:
temp = a_list[index]
a_list[index] = a_list[index + 1]
a_list[index + 1] = temp
def main():
random_list = [5,1,10,15,3,6,45,21,90, 76,44,33,41,27,81]
print(bubble_sort(random_list))
if __name__ == "__main__":
main()
There are two problems with your code:
You are calling the wrapper function when you return it from sort_timer, but instead you should return the function without calling it.
return wrapper
You are printing the return value of the bubble_sort function, but that function doesn't return a value, so it just ends up printing None. Instead, you should print random_list after running bubble_sort on it.
bubble_sort(random_list)
print(random_list)
Here is your code with those fixes applied:
import functools
import time
def sort_timer(func):
"""
Timer function that counts how many seconds it takes the decoration function to run, in this case the sort functions
described above. Returns the number of seconds using the time module.
:param func:
:return total_time:
"""
#functools.wraps(func)
def wrapper(*args, **kwargs):
"""
Calculates the total time a function runs.
:return:
"""
total_time = None
start_time = time.perf_counter()
func(*args, **kwargs)
end_time = time.perf_counter()
local_total_time = end_time - start_time
return total_time
return wrapper
#sort_timer
def bubble_sort(a_list):
"""
Sorts a_list in ascending order
"""
for pass_num in range(len(a_list) - 1):
for index in range(len(a_list) - 1 - pass_num):
if a_list[index] > a_list[index + 1]:
temp = a_list[index]
a_list[index] = a_list[index + 1]
a_list[index + 1] = temp
def main():
random_list = [5,1,10,15,3,6,45,21,90, 76,44,33,41,27,81]
bubble_sort(random_list)
print(random_list)
if __name__ == "__main__":
main()
Running it produces the following output:
[1, 3, 5, 6, 10, 15, 21, 27, 33, 41, 44, 45, 76, 81, 90]

List of functions->single function (in python)

Say that I have a list of functions: [f1, f2, f3] (in python)
How do I return a single function F:=f3(f2(f1())) (notice that F is of function type). I know that it's possible to do it with .reduce() but I was wondering if there's another way to do it without using libraries.
edit:
Also, notice that the length of the list can be greater than 3
I tried:
def func(list):
i = 1
new_function = filters[0]
while i<=len(filters)-1:
new_function = filters[i](new_function)
i+=1
return new_function
but it doesn't work
The problem in your code is that you pass a function as argument with filters[i](new_function).
I would suggest this recursive solution:
def chain(first, *rest):
return lambda x: first(chain(*rest)(x) if rest else x)
Example use:
def inc(x):
return x + 1
def double(x):
return x * 2
def square(x):
return x * x
f = chain(square, double, inc)
print(f(5)) # ((5+1)*2) ** 2 == 144
I see that in the code you tried, you never actually call the first of your functions. (I also assume that your code starts: def func(filters):
Taking into account that f1() takes no parameter, but the rest take the parameter of the return of the previous function, this should work:
def fit(funcs):
v = funcs[0]()
for f in funcs[1:]:
v = f(v)
return v
def f1():
return 42
def f2(x):
return x
def f3(x):
return x
fs = [f1, f2, f3]
a = lambda:fit(fs)
print(a())
Output: 42
def get_single_func(func_list):
def single_func(*args, **kwargs):
ret = func_list[0](*args, **kwargs)
for func in func_list[1:]:
ret = func(ret)
return ret
return single_func

how to override inner method function in outer

How to modify the code
def mackfunc(a):
def func(b):
return a+b
return func
f = mackfunc(1)
print(f(1))
print(f(2))
The output is 2 and 3
def mackfunc(a):
def func(b):
return a+b
return func
f = mackfunc(1)
add sth here to this out
print(f(1))
print(f(2))
I hope that the output here is 3 and 4
You can't. func is a local variable, which cannot be interfered with from outside the function. You can either redefine mackfunc entirely, or wrap it.
# redefine
def mackfunc(a):
def func(b):
return a + b + 1
return func
# wrap
orig_mackfunc = mackfunc
def mackfunc(a):
orig_func = orig_mackfunc(a)
def func(b):
return orig_func(b) + 1
return func
You can use a custom function inside you inner function and then redefine it later
def custom(arg):
return arg
def mackfunc(arg):
def func(b):
return custom(arg)+b
return func
f = mackfunc(1)
print(f(1))
print(f(2))
print()
def newcustom(arg):
return arg + 1
custom = newcustom
print(f(1))
print(f(2))
OUTPUT
2
3
3
4
Hope this helps. :)

Simple way to count the number of times def f(x) is evaluated?

I am trying to count the number of times that f(x) is evaluated without having to change my code too much, it doesn't seem like it should be very difficult but I can't seem to figure it out.
def f (x):
f = 12*x**5-45*x**4+40*x**3+5
return f
def bounding():
d=.1
x=6
n=0
while(n<50):
Lb=x-d
n+=1
Ub=x+d
if f(Lb)>=f(x) and f(Ub)<=f(x):
x=x+d
elif f(Lb)<=f(x) and f(Ub)>=f(x):
x=x-d
elif f(Lb)>=f(x) and f(Ub)>=f(x):
print("Lower bound:",Lb,"Upperbound:",Ub)
break
print (n)
bounding()
A decorator based solution, that you can apply to any function you want...
def count(fn):
def wrapper(*args, **kwargs):
wrapper.called+= 1
return fn(*args, **kwargs)
wrapper.called= 0
wrapper.__name__= fn.__name__
return wrapper
#count
def test():
print "something"
test()
print test.called #will print 1
class F:
count = 0
def __call__(self, x):
self.count += 1
return 12*x**5-45*x**4+40*x**3+5
f = F()
From here on as before and the count is given by f.count. Tested :)
>>> f = F()
>>> f(1)
12
>>> f(2)
-11
>>> f.count
2
>>> f(2)
-11
>>> f.count
3

Avoiding unnecessary key evaluations when sorting a list

I have a list which I want to sort by multiple keys, like:
L = [ ... ]
L.sort(key = lambda x: ( f(x), g(x) ))
This works fine. However, this results with unnecessary calls to g, which I would like to avoid (for being potentially slow). In other words, I want to partially and lazily evaluate the key.
For example, if f is unique over L (i.e. len(L) == len(set(map(f,L)))) no calls to g should be made.
What would be the most elegant/pythonic way to do this?
One way I can think of is to define a custom cmp function (L.sort(cmp=partial_cmp)), but IMO this is less elegant and more complicated than using the key parameter.
Another way would be to define a key-wrapper class which takes a generator expression to generate the different parts of the key, and override the comparison operators to compare one-by-one. However, I'm feeling there must be a simpler way...
EDIT: I'm interested in a solution for the general problem of sorting by multiple functions, not only two as in my example above.
You can try using itertools.groupby:
result = []
for groupKey, group in groupby(sorted(L, key=f), key=f):
sublist = [y for y in group]
if len(sublist) > 1:
result += sorted(sublist, key=g)
else:
result += sublist
Another possibility, even less elegant, but in place:
L.sort(key = f)
start = None
end = None
for i,x in enumerate(L):
if start == None:
start = i
elif f(x) == f(L[start]):
end = i
elif end == None:
start = i
else:
L[start:end+1] = sorted(L[start:end+1], key=g)
start = None
if start != None and end != None:
L[start:end+1] = sorted(L[start:end+1], key=g)
First version generalized to any number of functions:
def sortBy(l, keyChain):
if not keyChain:
return l
result = []
f = keyChain[0]
for groupKey, group in groupby(sorted(l, key=f), key=f):
sublist = [y for y in group]
if len(sublist) > 1:
result += sortBy(sublist, keyChain[1:])
else:
result += sublist
return result
The second version generalized to any number of functions (not fully in place though):
def subSort(l, start, end, keyChain):
part = l[start:end+1]
sortBy(part, keyChain[1:])
l[start:end+1] = part
def sortBy(l, keyChain):
if not keyChain:
return
f = keyChain[0]
l.sort(key = f)
start = None
end = None
for i,x in enumerate(l):
if start == None:
start = i
elif f(x) == f(l[start]):
end = i
elif end == None:
start = i
else:
subSort(l, start, end, keyChain)
start = i
end = None
if start != None and end != None:
subSort(l, start, end, keyChain)
Given a function, you could create a LazyComparer class like this:
def lazy_func(func):
class LazyComparer(object):
def __init__(self, x):
self.x = x
def __lt__(self, other):
return func(self.x) < func(other.x)
def __eq__(self, other):
return func(self.x) == func(other.x)
return lambda x: LazyComparer(x)
To make a lazy key function out of multiple functions, you could create a utility function:
def make_lazy(*funcs):
def wrapper(x):
return [lazy_func(f)(x) for f in funcs]
return wrapper
And together they could be used like this:
def countcalls(f):
"Decorator that makes the function count calls to it."
def _f(*args, **kwargs):
_f._count += 1
return f(*args, **kwargs)
_f._count = 0
return _f
#countcalls
def g(x): return x
#countcalls
def f1(x): return 0
#countcalls
def f2(x): return x
def report_calls(*funcs):
print(' | '.join(['{} calls to {}'.format(f._count, f.func_name)
for f in funcs]))
L = range(10)[::-1]
L.sort(key=make_lazy(f1, g))
report_calls(f1, g)
g._count = 0
L.sort(key=make_lazy(f2, g))
report_calls(f2, g)
which yields
18 calls to f1 | 36 calls to g
36 calls to f2 | 0 calls to g
The #countcalls decorator above is used to connfirm that when f1 returns a lot
of ties, g is called to break the ties, but when f2 returns distinct values,
g does not get called.
NPE's solution adds memoization within the Key class. With the solution above,
you could add memoization outside (independent of) the LazyComparer class:
def memo(f):
# Author: Peter Norvig
"""Decorator that caches the return value for each call to f(args).
Then when called again with same args, we can just look it up."""
cache = {}
def _f(*args):
try:
return cache[args]
except KeyError:
cache[args] = result = f(*args)
return result
except TypeError:
# some element of args can't be a dict key
return f(*args)
_f.cache = cache
return _f
L.sort(key=make_lazy(memo(f1), memo(g)))
report_calls(f1, g)
which results in fewer calls to g:
10 calls to f1 | 10 calls to g
You could use a key object that would lazily evaluate and cache g(x):
class Key(object):
def __init__(self, obj):
self.obj = obj
self.f = f(obj)
#property
def g(self):
if not hasattr(self, "_g"):
self._g = g(self.obj)
return self._g
def __cmp__(self, rhs):
return cmp(self.f, rhs.f) or cmp(self.g, rhs.g)
Here is an example of use:
def f(x):
f.count += 1
return x // 2
f.count = 0
def g(x):
g.count += 1
return x
g.count = 0
L = [1, 10, 2, 33, 45, 90, 3, 6, 1000, 1]
print sorted(L, key=Key)
print f.count, g.count

Categories