class Demo1:
def __init__(self, *args):
self.__list = list(args)
def __len__(self):
return len(self.__list)
def __getitem__(self, index):
return self.__list[index]
class Demo2:
def __init__(self, *args):
self.__list = list(args)
def __len__(self):
return len(self.__list)
def __iter__(self):
self.i = -1
return self
def __next__(self):
if self.i < len(self.__list):
self.i += 1
return self.__list[self.i]
raise StopIteration
If I only use __getitem__, when I want to iterative Demo1 object, smartly python can work well; I can also use __iter__ and __next__ to implement the "iterator interface", then iterative Demo2 object.
So what is the difference between these two approaches?
Related
I would like to be able to unpack an object from a dict-similar class.
current:
f(**m.to_dict())
preferred
f(**m)
This would work if starstarprepare existed:
class M:
#... __getitem__, __setitem__
def __starstarprepare__(self):
md = self.to_dict()
return md
You can use collections.abc.Mapping.
from collections.abc import Mapping
class M(Mapping):
def __iter__(self):
return iter(self.to_dict())
def __getitem__(self, item):
return self.to_dict()[item]
def __len__(self):
return len(self.to_dict())
** works with any mapping type. One way to make M a mapping type is to subclass collections.abc.Mapping and implement __getitem__, __iter__, and __len__:
from collections.abc import Mapping
class M(Mapping):
def __init__(self):
self.a = 3
self.b = 5
def __getitem__(self, key):
return getattr(self, key)
def __iter__(self):
yield 'a'
yield 'b'
def __len__(self):
return 2
def foo(**kwargs):
for k, v in kwargs.items():
print(k, v)
m = M()
foo(**m)
If you already have a to_dict method, all three of the magic methods can be wrappers around the corresponding dict methods.
class M(Mapping):
def __init__(self):
self.a = 3
self.b = 5
def to_dict(self):
return {'a': self.a, 'b': self.b}
def __getitem__(self, key):
return self.to_dict()[key]
def __iter__(self):
return iter(self.to_dict())
def __len__(self):
return len(self.to_dict())
Solution due to #peter
class M:
# ... __getitem__ and other functions
def keys(self):
k = self.to_dict().keys()
return k
This is my implementation of a MinHeap and MaxHeap in python. This uses a comparator to reverse the sequence of storage in the MaxHeap
import heapq
class MinHeap:
def __init__(self):
self.heap = []
def push(self, item):
heapq.heappush(self.heap, item)
def pop(self):
return heapq.heappop(self.heap)
def peek(self):
return self.heap[0]
def __getitem__(self, item):
return self.heap[item]
def __len__(self):
return len(self.heap)
class MaxHeap(MinHeap):
def push(self, item):
heapq.heappush(self.heap, Comparator(item))
def pop(self):
return heapq.heappop(self.heap)
def peek(self):
return self.heap[0]
def __getitem__(self, i):
return self.heap[i].val
class Comparator:
def __init__(self, val):
self.val = val
def __lt__(self, other):
return self.val > other
def __eq__(self, other):
return self.val == other
if __name__ == '__main__':
max_heap = MaxHeap()
max_heap.push(12)
max_heap.push(3)
max_heap.push(17)
print(max_heap.pop())
The MinHeap seems to work fine, however the MaxHeap throw up the following error.
<__main__.Comparator object at 0x10a5c1080>
I don't quite seem to understand what am I doing wrong here. Can someone help me with this.
I've added __repr__ and __gt__ methods to your Comparator class, so the code now runs, and the Comparator instances display their val when printed.
The important thing is to get those comparison methods to do the comparisons correctly between two Comparator instances.
You'll notice that I've eliminated most of the methods from MaxHeap. They aren't needed because the methods inherited from MinHeap work ok. You may wish to restore this one to MaxHeap
def __getitem__(self, i):
return self.heap[i].val
depending on how you intend to use MaxHeap.
import heapq
class MinHeap:
def __init__(self):
self.heap = []
def push(self, item):
heapq.heappush(self.heap, item)
def pop(self):
return heapq.heappop(self.heap)
def peek(self):
return self.heap[0]
def __getitem__(self, item):
return self.heap[item]
def __len__(self):
return len(self.heap)
class MaxHeap(MinHeap):
def push(self, item):
heapq.heappush(self.heap, Comparator(item))
class Comparator:
def __init__(self, val):
self.val = val
def __lt__(self, other):
return self.val > other.val
def __eq__(self, other):
return self.val == other.val
def __repr__(self):
return repr(self.val)
if __name__ == '__main__':
max_heap = MaxHeap()
max_heap.push(12)
max_heap.push(3)
max_heap.push(17)
while True:
try:
print(max_heap.pop())
except IndexError:
# The heap's empty, bail out
break
output
17
12
3
It's probably a Good Idea to give Comparator the full set of rich comparison methods. They aren't needed to make the above code work, but they will make the Comparator instances more flexible. So in case you want them, here they are:
def __lt__(self, other):
return self.val > other.val
def __le__(self, other):
return self.val >= other.val
def __gt__(self, other):
return self.val < other.val
def __ge__(self, other):
return self.val <= other.val
def __eq__(self, other):
return self.val == other.val
def __ne__(self, other):
return self.val != other.val
I'm working on a homework assignment where I shall implement selection sorting using forward iterators for both python lists and linked lists(single).
Here are some codes I have for iterators:
from abc import *
class ForwardIterator(metaclass=ABCMeta):
#abstractmethod
def getNext(self):
return
#abstractmethod
def getItem(self):
return
#abstractmethod
def getLoc(self):
return
#abstractmethod
def clone(self):
return
def __eq__(self, other):
return self.getLoc() == other.getLoc()
def __ne__(self, other):
return not (self == other)
def __next__(self):
if self.getLoc() == None:
raise StopIteration
else:
item = self.getItem()
self.getNext()
return item
class ForwardAssignableIterator(ForwardIterator):
#abstractmethod
def setItem(self, item):
"""Sets the item at the current position."""
return
class PythonListFAIterator(ForwardAssignableIterator):
def __init__(self, lst, startIndex):
self.lst = lst
self.curIndex = startIndex
def getNext(self):
self.curIndex += 1
def getItem(self):
if self.curIndex < len(self.lst):
return self.lst[self.curIndex]
else:
return None
def setItem(self, item):
if self.curIndex < len(self.lst):
self.lst[self.curIndex] = item
def getLoc(self):
if self.curIndex < len(self.lst):
return self.curIndex
else:
return None
def clone(self):
return PythonListFAIterator(self.lst, self.curIndex)
The LinkedListFAIterator is similar to PythonListFAIterator, plus getStartIterator, and __iter__ method.
I don't know how I can write codes to implement selection sort with one paraemter, a FAIterator (the forward iterator). Please help me. I know I shall find the minimum element and put it at the beginning of the list. I also know that I shall use the clone method to create multiple iterators to keep track of multiple locations at once. But I don't know how to write the code.
Please give me some hints.
I have a size-limited dictionary class, and I want to make the iter method works like this:
if the value not None, then generate. Otherwise, skip it.
Do you know how to implement it guys?
class myclass(object):
def __init__(self):
self.data = {1:'a', 2:None, 3:'c'}
def __iter__(self):
return iter(self.data.values())
def __next__(self): ## <== I THINK THIS IS A WRONG EXAMPLE
if iter(self): ## BUT I DON"T KNOW HOW TO FIX IT
return iter(self)
mc = myclass()
for i in mc:
print(i)
If your __iter__ method directly returns an iterator, you do not need to implement __next__; it will not be consulted in that case (it is the __next__ method of the returned iterator that is used instead).
Return a generator expression:
class myclass(object):
def __init__(self):
self.data = {1:'a', 2:None, 3:'c'}
def __iter__(self):
return (v for v in self.data.values() if v is not None)
Demo:
>>> class myclass(object):
... def __init__(self):
... self.data = {1:'a', 2:None, 3:'c'}
... def __iter__(self):
... return (v for v in self.data.values() if v is not None)
...
>>> list(myclass())
['a', 'c']
You'd have to return self from __iter__ if you wanted the class to be its own iterator; this would mean you'd need to track state between __next__ calls to know what value to return from each call. For your usecase, you most probably do not want that.
To create a single-use iterator:
class myclass(object):
def __init__(self):
self.data = {1:'a', 2:None, 3:'c'}
self.keys = self.data.keys()
self.index = 0
def __iter__(self):
return self
def __next__(self):
index = self.index
while 'searching for value':
if index >= len(self.keys):
raise StopIteration
key = self.keys[index]
value = self.data[key]
index += 1
if value is not None:
self.index = index
return value
As you can see, using Martjin's answer is much nicer.
This question already has answers here:
How can I decorate an instance method with a decorator class?
(2 answers)
Closed 4 years ago.
I'm trying to memoize using a decorator with the decorator being a class not a function, but I'm getting the error
TypeError: seqLength() takes exactly 2 arguments (1 given)
I'm guessing this has something to do with the classes, but not sure what's wrong from there.
The code:
import sys
class memoize(object):
'''memoize decorator'''
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
try:
return self.cache[args]
except KeyError:
value = self.func(self, *args)
self.cache[args] = value
return value
class collatz(object):
def __init__(self, n):
self.max = 1
self.n = n
#memoize
def seqLength(self, n):
if n>1:
if n%2 == 0:
return 1+self.seqLength(n/2)
else:
return 1+self.seqLength(3*n+1)
else:
return 1
def maxLength(self):
for n in xrange(1, self.n):
l = self.seqLength(n)
if l > self.max:
self.max = n
return self.max
n = int(sys.argv[1])
c = collatz(n)
print c.maxLength()
This is confusing, syntactically. It's not clear if self.func is part of your memoize or a separate function that's part of some other object of some other class. (You mean the latter, BTW)
value = self.func(self, *args)
Do this to make it clear that the_func is just a function, not a member of the memoize class.
the_func= self.func
value= the_func( *args )
That kind of thing prevents confusion over the class to which self. is bound.
Also, please spell it Memoize. With a leading capital letter. It is a class definition, after all.
Using a class as a decorator is tricky, because you have to implement the descriptor protocol correctly (the currently accepted answer doesn't.) A much, much easier solution is to use a wrapper function, because they automatically implement the descriptor protocol correctly. The wrapper equivalent of your class would be:
import functools
def memoize(func):
cache = {}
#functools.wraps(func)
def wrapper(*args):
try:
return cache[args]
except KeyError:
value = func(*args)
cache[args] = value
return value
return wrapper
When have so much state you want to encapsulate it in a class anyway, you can still use a wrapper function, for example like so:
import functools
class _Memoize(object):
'''memoize decorator helper class'''
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
try:
return self.cache[args]
except KeyError:
value = self.func(*args)
self.cache[args] = value
return value
def memoize(func):
o = _Memoize(func)
#functools.wraps(func)
def wrapper(*args):
return o(*args)
return wrapper
A decorator is just syntactic sugar for foo = decorator(foo), so in this case you're ending up making the self of seqLength be memoize instead of collatz. You need to use descriptors. This code works for me:
class memoize(object):
'''memoize descriptor'''
def __init__(self, func):
self.func = func
def __get__(self, obj, type=None):
return self.memoize_inst(obj, self.func)
class memoize_inst(object):
def __init__(self, inst, fget):
self.inst = inst
self.fget = fget
self.cache = {}
def __call__(self, *args):
# if cache hit, done
if args in self.cache:
return self.cache[args]
# otherwise populate cache and return
self.cache[args] = self.fget(self.inst, *args)
return self.cache[args]
More on descriptors:
http://docs.python.org/howto/descriptor.html#descriptor-example