Compute once, use multiple times within Python class - python

I am trying to define a class within which a function of many variables is optimized. Normally I'm working with ~500-1000 variables. In this class, I need to pass function and its derivative to minimize in scipy to find the x0 which minimizes this function.
The following is a simple working example of the concept and it works fine. But as you see both the function (f) and its derivative (df) depend on another function g (In this example, it looks trivial and can be written in another way but actual functions are much more complicated).
I was wondering if I can calculate g only once at each iteration and then use that value within the class. Considering that f and df get updated in minimize multiple times so at each step g should be re-evaluated as well.
Thanks!
from scipy.optimize import minimize
class Minimization(object):
'''A class to optimizae a function'''
def __init__(self,x,y):
self.x = x
self.y = y
self.p = np.array([x,y])
def g(self,x,y):
return x-y
def f(self,p):
return (self.g(*p) - 1)**2
def df(self,p):
fprime = 2*(self.g(*p) - 1)
return np.array([fprime,-fprime])
def optimize(self):
P1 = minimize(fun=self.f, x0=self.p, args=(), method='Newton-CG',jac=self.df)
return P1
m = Minimization(2,4)
m.optimize()
#fun: 0.0
# jac: array([ 0., -0.])
#message: 'Optimization terminated successfully.'
# nfev: 3
# nhev: 0
# nit: 2
#njev: 6
#status: 0
#success: True
# x: array([ 3.5, 2.5])

What you want is called "memoizing". When the function g calculates a value it stores the result in a dictionary, indexed by the arguments x, y. Every time g is called it checks the dictionary to see if the value it needs is already stored there. If you need to reset the values, you clear the dictionary. Something like this:
class Minimization(object):
'''A class to optimizae a function'''
def __init__(self,x,y):
self.x = x
self.y = y
self.p = np.array([x,y])
self.cache = {} # previously computed values of g
def g(self,x,y):
cache_index = (x, y)
if cache_index in self.cache: # check cache first
return self.cache[cache_index]
value = x - y
self.cache[cache_index] = value # save for later
return value
def f(self,p):
return (self.g(*p) - 1)**2
def df(self,p):
fprime = 2*(self.g(*p) - 1)
return np.array([fprime,-fprime])
def optimize(self):
self.cache.clear() # Blow the cache
P1 = minimize(fun=self.f, x0=self.p, args=(), method='Newton-CG',jac=self.df)
return P1

To complement Paul's answer, you could define a class aggregating caching-like methods that you will then (re-) use as decorator.
import functools as ft #<------ used to keep meth-related docstring
class Cache(object):
def __init__(self):
self._cache = {}
#classmethod
def _property(cls, meth):
#property
#ft.wraps(meth)
def __property(cls):
meth_name = meth.__name__
if meth_name not in cls._cache:
cls._cache[meth_name] = meth(cls)
return cls._cache[meth_name]
return __property
#classmethod
def _method(cls, meth):
#ft.wraps(meth)
def __method(cls, *args, **kwargs):
meth_key = '{}_{}'.format(meth.__name__, args)# <---- considered as string so as avoid unhashable-type errors
if meth_key not in cls._cache:
cls._cache[meth_key] = meth(cls, *args, **kwargs)
return cls._cache[meth_key]
return __method
And then using the class Cache as ancestor to Minimization, as follows
import numpy as np
from scipy.optimize import minimize
class Minimization(Cache):#<----------Inherits of Cache instead of object
'''A class to optimizae a function'''
def __init__(self,x,y):
super(Minimization,self).__init__()
self.x0 = x # I changed the names because as it stands,
self.y0 = y # these attributes are actually used as first guesses
self.p0 = np.array([x,y]) # for the resolution process
#Cache._method
def g(self, x, y):
return x - y
##Cache._method
def f(self,p):
return (self.g(*p) - 1)**2
##Cache._method
def df(self,p):
fprime = 2*(self.g(*p) - 1)
return np.array([fprime,-fprime])
#Cache._property
def optimized(self):#<----- I changed the name into optimized to make it representative of what it is, a property
return minimize(fun=self.f, x0=self.p0, args=(), method='Newton-CG',jac=self.df)
Use Case (tested under Python 2.7.11 and 3.6.1)
>>> m = Minimization(2,4)
>>> # Take care to clear the cache if optimized is not called for the first time and that you changed one of its "dependencies", doing m._cache.clear().
>>> # something you may want to do is simply removing the #Cache._property decorator
>>> m.optimized
status: 0
success: True
njev: 6
nfev: 3
fun: 0.0
x: array([ 3.5, 2.5])
message: 'Optimization terminated successfully.'
nhev: 0
jac: array([ 0., -0.])

Without having looked too deeply at the code itself, here is a sample class to demonstrate how to calculate a value once and avoid recomputing it on each invocation. You could also make this a property.
class StackOverflow:
def __init__(self, value=None):
self._value = value
def compute_value(self):
if self._value is None:
self._value = 100 # Compute value here
return self._value

Related

Getting a type error while using fori_loop with JAX

I'm developing a code using JAX, and I wanted to JIT some parts of that had big loops. I didn't want the code to be unrolled so I used fori_loop, but I'm getting an error and can't figure out what I am doing wrong.
The error is:
self.arr = self.arr.reshape(new_shape+new_shape)
TypeError: 'aval_method' object is not callable
I was able to reduce the code to the following:
import jax.numpy as jnp
import jax
class UB():
def __init__(self, arr, new_shape):
self.arr = arr
self.shape = new_shape
if type(arr) is not object:
self.arr = self.arr.reshape(new_shape+new_shape)
def _tree_flatten(self):
children = (self.arr,) # arrays / dynamic values
aux_data = {
'new_shape': self.shape
} # static values
return (children, aux_data)
#classmethod
def _tree_unflatten(cls, aux_data, children):
return cls(*children, **aux_data)
class UM():
def __init__(self, arr, r=None):
self.arr = arr
self.r = tuple(r)
def _tree_flatten(self):
children = (self.arr,) # arrays / dynamic values
aux_data = {
'r': self.r
} # static values
return (children, aux_data)
#classmethod
def _tree_unflatten(cls, aux_data, children):
return cls(*children, **aux_data)
for C in [UB, UM]:
jax.tree_util.register_pytree_node(
C,
C._tree_flatten,
C._tree_unflatten,
)
def s_w(ub, ums):
e = jnp.identity(2)
u = UM(e, [2])
ums[0] = u
return ub, ums
def s_c(t, uns):
n = 20
ums = []
for un in uns:
ums.append(UM(un, [2]))
tub = UB(t.arr, t.r)
s_loop_body = lambda i,x: s_w( ub=x[0], ums=x[1])
tub, ums = jax.lax.fori_loop(0, n, s_loop_body, (tub, ums))
# for i in range(n):
# tub, ums = s_loop_body(i, (tub, ums))
return jnp.array([u.arr.flatten() for u in ums])
uns = jnp.array([jnp.array([1, 2, 3, 4]) for _ in range(6)])
t = UM(jnp.array([1, 0, 0, 1]), r=[2])
uns = s_c(t, uns)
Has anyone encountered this issue or can explain how to fix it?
The issue is discussed here: https://jax.readthedocs.io/en/latest/pytrees.html#custom-pytrees-and-initialization
Namely, in JAX pytrees are used as general containers, and are sometimes initialized with abstract values or other place-holders, and so you cannot assume that arguments to a custom PyTree will be of array type. You might account for this by doing something like the following:
class UB():
def __init__(self, arr, new_shape):
self.arr = arr
self.shape = new_shape
if isinstance(arr, jnp.ndarray):
self.arr = self.arr.reshape(new_shape+new_shape)
When I run your code with this modification, it gets past the error you asked about, but unfortunately does trigger another error due to the body function of the fori_loop not having a valid signature (namely, the arr attributes of the ums have different shapes on input and output, which is not supported by fori_loop).
Hopefully this gets you on the path toward working code!

Not sure why I'm stuck in a Python stuck recursion loop

The add and mul definitions here are nonsensical because of their dependence on returning self, causing infinite loops. If they create a new distribution using the lambdas then it works fine, as in my own answer below.
I'm just playing around with classes and overriding trying to build a small statistics tool. However, when I run this code I get stuck in a recursion loop inside the __mul__ call which is being run in the n1.pdf call and I cannot figure out why. I think it has something to do with Python lazily executing the __mul__ instead of doing what I kind of 'wanted' (let's say in the language of CS) which was to create a new pointer to the old function call for pdf that is owned by the new pointer to pdf, and then to set the old pointer (the main .pdf pointer) to the new function.
I think this is quite poorly worded so edits extremely welcome if you understand what I'm asking.
import math
import random
class Distribution:
def __init__(self, pdf, cdf):
self.pdf = pdf
self.cdf = cdf
def pdf(self, x):
return self.pdf(x)
def cdf(self, x):
return self.cdf(x)
def __mul__(self, other):
if isinstance(other, float) or isinstance(other, int):
newpdf = lambda x : self.pdf(x) * other
self.pdf = newpdf
newcdf = lambda x : self.cdf(x) * other
self.cdf = newcdf
return self
else:
return NotImplemented
def __add__(self, other):
self.pdf = lambda x : self.pdf(x) + other.pdf(x)
self.cdf = lambda x : self.cdf(x) + other.cdf(x)
return Distribution(self.pdf, self.cdf)
class Normal(Distribution):
def __init__(self, mean, stdev):
self.mean = mean
self.stdev = stdev
def pdf(self, x):
return (1.0 / math.sqrt(2 * math.pi * self.stdev ** 2)) * math.exp(-0.5 * (x - self.mean) ** 2 / self.stdev ** 2)
def cdf(self, x):
return (1 + math.erf((x - self.mean) / math.sqrt(2) / self.stdev)) / 2
def sample(self):
return self.mean + self.stdev * math.sqrt(2) * math.cos(2 * math.pi * random.random())
if __name__ == "__main__":
n1 = Normal(1,2)
n1half = n1 * 0.5
x = n1.pdf(1)
print(x)
p.s. I know that it is no longer a pdf after being multiplied by 0.5, this is not an issue.
class Distribution:
...
def pdf(self, x):
return self.pdf(x)
pdf() calls itself, which calls itself, which calls itself... infinitely.
Same with cdf().
def pdf(self, x):
return self.pdf(x)
def cdf(self, x):
return self.cdf(x)
I assume your intent is to delegate to the attributes. Since they are always assigned, they will be found (assuming you do the lookup on an instance) instead of the class methods (which would straightforwardly be infinite recursion without those attributes); but this in turn means that these class methods are just useless. x.cdf(y), where cdf is a callable instance attribute, just works; there is no need to provide a method as well.
newpdf = lambda x : self.pdf(x) * other
self.pdf = newpdf
I assume your intent is to create a new function that relies upon the existing value of self.pdf. Unfortunately, it doesn't work that way. The problem is that the lambda is late binding. When it executes, that is the time at which it will look up self.pdf... and find itself.
There is a separate problem here, in that you are writing __mul__ and __add__ implementations - that is, the * and + operators, which are supposed to return a new value, and not mutate either operand. (If you wrote a = 3 and b = 4 and then c = a * b, you would be extremely surprised if the values of a or b changed, yes?)
We can solve both problems at once, by simply using the computed pdf and cdf to create a new instance (which we need anyway):
def __mul__(self, other):
if isinstance(other, float) or isinstance(other, int):
newpdf = lambda x : self.pdf(x) * other
newcdf = lambda x : self.cdf(x) * other
return Distribution(newpdf, newcdf)
else:
return NotImplemented
Similarly, __add__ should use local variables, rather than modifying self:
def __add__(self, other):
newpdf = lambda x : self.pdf(x) + other.pdf(x)
newcdf = lambda x : self.cdf(x) + other.cdf(x)
return Distribution(newpdf, newcdf)
Note that implementing these methods also gives you the augmented assignment operators *= and += (albeit by creating a new object and rebinding the name).
Let's test it:
if __name__ == "__main__":
n1 = Normal(1,2)
n1half = n1 * 0.5
print(n1.pdf(1))
print(n1half.pdf(1))
n1 += n1
print(n1.pdf(1))
I get:
>py test.py
0.19947114020071635
0.09973557010035818
0.3989422804014327
Thanks for the help #John and #Tom and #bbbbbb... The problem was trying to return self instead of creating a new distribution. If I change the def'n of mul to
def __mul__(self, other):
if isinstance(other, float) or isinstance(other, int):
def newpdf(x):
return self.pdf(x) * other
def newcdf(x):
return self.cdf(x) * other
return Distribution(newpdf, newcdf)
else:
return NotImplemented
Then this solves this problem

Class Method Not Returning Value When Accessed Via Inheritance

I have a class method that stops returning a value when I try accessing through an inherited subclass.
Have no idea what's causing it to not return the appropriate value.
Here's what I have:
class KNN():
def __init__(self, neighbors=5, centered=True):
self.neighbors = neighbors
self.centered = centered
def _get_distance(self, xi):
return np.sqrt(((xi - self.X_fit)**2).sum(1))
def fit(self, X, y):
if self.centered:
self.X_fit = standardize(X)
else:
self.X_fit = X
self.y_fit = y
def predict(self, X, centered=False):
m, n = X.shape[0], self.X_fit.shape[0]
self.dist_matrix = np.zeros((m, n))
X_pred = np.zeros(X.shape)
if standardize:
X_pred = standardize(X)
else:
X_pred = X
for row in range(m):
self.dist_matrix[row] = self._get_distance(X_pred[row])
self.idx_vals = np.argsort(self.dist_matrix)[:, :self.neighbors]
self.y_idx = self.y_fit[self.idx_vals]
self.preds = [self.neighbor_calculation(self.y_idx[i]) for i in range(len(self.y_idx))]
return self.preds
If I access the KNN class directly the predict method works as intended, and it returns an array of the predicted values.
However, it stops when I try and create a subclass that inherits from KNN:
class KNNClassifier(KNN):
def predict(self, X, centered=False):
self.neighbor_calculation = majority_vote
super().predict(X, standardize)
When I access the predict method through the KNNClassifier class it doesn't return a value.
However, self.preds contains the actual predictions.
But trying something like KNNClassifier.predict(X)[:10] gives the error message:
'NoneType' object is not subscriptable'
I don't know why the returned value is suddenly being interpreted as None.
You are using super to call the parent method from the child, which is right, but you need to use return to return the value:
return super().predict(X, standardize)

Memoizing Recursive Class Instances that use Scipy Optmize

I am using Python 2.7, and have a program that solves a recursive optimization problem, that is, a dynamic programming problem. A simplified version of the code is:
from math import log
from scipy.optimize import minimize_scalar
class vT(object):
def __init__(self,c):
self.c = c
def x(self,w):
return w
def __call__(self,w):
return self.c*log(self.x(w))
class vt(object):
def __init__(self,c,vN):
self.c = c
self.vN = vN
def objFunc(self,x,w):
return -self.c*log(x) - self.vN(w - x)
def x(self,w):
x_star = minimize_scalar(self.objFunc,args=(w,),method='bounded',
bounds=(1e-10,w-1e-10)).x
return x_star
def __call__(self,w):
return self.c*log(self.x(w)) + self.vN(w - self.x(w))
p3 = vT(2.0)
p2 = vt(2.0,p3)
p1 = vt(2.0,p2)
w1 = 3.0
x1 = p1.x(w1)
w2 = w1 - x1
x2 = p2.x(w2)
w3 = w2 - x2
x3 = w3
x = [x1,x2,x3]
print('Optimal x when w1 = 3 is ' + str(x))
If enough periods are added, the program can begin to take a long time to run. When x1 = p1.x(w1) is run, p2 and p3 are being evaluated multiple times by the minimize_scalar. Also, when x2 = p2(w2) is run, we know the ultimate solution will involve evaluating p2 and p3 in ways that were already done in the first step.
I have two questions:
What's the best way to use a memoize wrapper on the vT and vt classes to speed up this program?
When minimize_scalar is run, will it benefit from this memoization?
In my actually application, the solutions can take hours to solve currently. So, speeding this up would be of great value.
UPDATE: A response below points out that the example above could be written without the use of classes, and the normal decoration can be used for functions. In my actual application, I do have to use classes, not function. Moreover, my first question is whether the call of the function or method (when it's a class) inside of minimize_scalar will benefit from the memoization.
I found out the answer. Below is an example of how to memoize the program. There may be an even more efficient approach, but this approach memoizes the methods of the class. Furthermore, when minimize_scalar is run, the memoize wrapper records the results each time it evaluates the functions:
from math import log
from scipy.optimize import minimize_scalar
from functools import wraps
def memoize(obj):
cache = obj.cache = {}
#wraps(obj)
def memoizer(*args, **kwargs):
key = str(args) + str(kwargs)
if key not in cache:
cache[key] = obj(*args, **kwargs)
return cache[key]
return memoizer
class vT(object):
def __init__(self,c):
self.c = c
#memoize
def x(self,w):
return w
#memoize
def __call__(self,w):
return self.c*log(self.x(w))
class vt(object):
def __init__(self,c,vN):
self.c = c
self.vN = vN
#memoize
def objFunc(self,x,w):
return -self.c*log(x) - self.vN(w - x)
#memoize
def x(self,w):
x_star = minimize_scalar(self.objFunc,args=(w,),method='bounded',
bounds=(1e-10,w-1e-10)).x
return x_star
#memoize
def __call__(self,w):
return self.c*log(self.x(w)) + self.vN(w - self.x(w))
p3 = vT(2.0)
p2 = vt(2.0,p3)
p1 = vt(2.0,p2)
x1 = p1.x(3.0)
len(p3.x.cache) # how many times was p3.x evaluated?
Out[3]: 60
x2 = p2.x(3.0 - x1)
len(p3.x.cache) # how many additional times was p3.x evaluated?
Out[5]: 60

Class python in loop

I created a class of matrix with python:
class MatrixOperation:
...
def create_matrix(self):
some function for creation of matrix.
return matrix
def matrix_vector_multiplication(self, x):
mat = self.create_matrix()
return numpy.dot(mat, x)
And:
M = MatrixOperation(...)
x = some set of vector
for i in range(n):
M.matrix_vector_multiplication(x[i])
The problem is, for each iteration, M.matrix_vector_multiplication(x[i]) will recompute mat = self.create_matrix() before calculating numpy.dot(mat, x), that is unnecessary (since it could be computed once at the beginning). How can I avoid this?
Thanks,
To avoid recreating the matrix each time, create an instance attribute in the class's __init__ method - similar to this.
class Matrix(object):
def __init__(self, data):
self.matrix = self.create_matrix(data)
# or simply
# self.matrix = np.matrix(data)
def create_matrix(data):
# create the_matrix
return the_matrix
def do_something(self, x):
z = some_function(self.matrix, x)
return z
my_matrix = matrix([[1,2,3,4],[4,3,2,1]])
just making a copy of the matrix should fix your problem.
import copy
class MatrixOperation:
matrix = None
...
def create_matrix(self):
if self.matrix is not None:
return copy.copy(self.matrix)
some function for creation of matrix.
self.matrix = matrix
return matrix
def matrix_vector_multiplication(self, x):
mat = self.create_matrix()
return numpy.dot(mat, x)

Categories