Implementing a state machine with decorators - python

While learning the concepts of decorators in python I came to the question if it is possible to use decorators to simulate a state machine.
Example:
from enum import Enum
class CoffeeMachine(object):
def __init__(self):
self.state = CoffeeState.Initial
##Statemachine(shouldbe, willbe)
#Statemachine(CoffeeState.Initial, CoffeeState.Grounding)
def ground_beans(self):
print("ground_beans")
#Statemachine(CoffeeState.Grounding, CoffeeState.Heating)
def heat_water(self):
print("heat_water")
#Statemachine(CoffeeState.Heating, CoffeeState.Pumping)
def pump_water(self):
print("pump_water")
class CoffeeState(Enum):
Initial = 0
Grounding = 1
Heating = 2
Pumping = 3
So all the statemachine does is to check if my current state is the requested one, if it is, it should call the underlying function and lastly it should set the state further.
How would you implement this?

Sure you can, provided your decorator makes an assumption about where the state is stored:
from functools import wraps
class StateMachineWrongState(Exception):
def __init__(self, shouldbe, current):
self.shouldbe = shouldbe
self.current = current
super().__init__((shouldbe, current))
def statemachine(shouldbe, willbe):
def decorator(f):
#wraps(f)
def wrapper(self, *args, **kw):
if self.state != shouldbe:
raise StateMachineWrongState(shouldbe, self.state)
try:
return f(self, *args, **kw)
finally:
self.state = willbe
return wrapper
return decorator
The decorator expects to get self passed in; i.e. it should be applied to methods in a class. It then expects self to have a state attribute to track the state machine state.
Demo:
>>> cm = CoffeeMachine()
>>> cm.state
<CoffeeState.Initial: 0>
>>> cm.ground_beans()
ground_beans
>>> cm.state
<CoffeeState.Grounding: 1>
>>> cm.ground_beans()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "<stdin>", line 6, in wrapper
__main__.StateMachineWrongState: (<CoffeeState.Initial: 0>, <CoffeeState.Grounding: 1>)
>>> cm.heat_water()
heat_water
>>> cm.pump_water()
pump_water
>>> cm.state
<CoffeeState.Pumping: 3>

Related

How to create a python class with a single use context

If we look at python docs it states:
Most context managers are written in a way that means they can only be used effectively in a with statement once. These single use context managers must be created afresh each time they’re used - attempting to use them a second time will trigger an exception or otherwise not work correctly.
This common limitation means that it is generally advisable to create context managers directly in the header of the with statement where they are used (as shown in all of the usage examples above).
Yet, the example most commonly shared for creating context managers inside classes is:
from contextlib import ContextDecorator
import logging
logging.basicConfig(level=logging.INFO)
class track_entry_and_exit(ContextDecorator):
def __init__(self, name):
self.name = name
def __enter__(self):
logging.info('Entering: %s', self.name)
def __exit__(self, exc_type, exc, exc_tb):
logging.info('Exiting: %s', self.name)
But, when I instantiate this class, I can pass it several times to a with statement:
In [8]: test_context = track_entry_and_exit('test')
In [9]: with test_context:
...: pass
...:
INFO:root:Entering: test
INFO:root:Exiting: test
In [10]: with test_context:
...: pass
...:
INFO:root:Entering: test
INFO:root:Exiting: test
How can I create a class that fails on the second call to the with statement?
Here is a possible solution:
from functools import wraps
class MultipleCallToCM(Exception):
pass
def single_use(cls):
if not ("__enter__" in vars(cls) and "__exit__" in vars(cls)):
raise TypeError(f"{cls} is not a Context Manager.")
org_new = cls.__new__
#wraps(org_new)
def new(clss, *args, **kwargs):
instance = org_new(clss)
instance._called = False
return instance
cls.__new__ = new
org_enter = cls.__enter__
#wraps(org_enter)
def enter(self):
if self._called:
raise MultipleCallToCM("You can't call this CM twice!")
self._called = True
return org_enter(self)
cls.__enter__ = enter
return cls
#single_use
class CM:
def __enter__(self):
print("Enter to the CM")
def __exit__(self, exc_type, exc_value, exc_tb):
print("Exit from the CM")
with CM():
print("Inside.")
print("-----------------------------------")
with CM():
print("Inside.")
print("-----------------------------------")
cm = CM()
with cm:
print("Inside.")
print("-----------------------------------")
with cm:
print("Inside.")
output:
Enter to the CM
Inside.
Exit from the CM
-----------------------------------
Enter to the CM
Inside.
Exit from the CM
-----------------------------------
Enter to the CM
Inside.
Exit from the CM
-----------------------------------
Traceback (most recent call last):
File "...", line 51, in <module>
with cm:
File "...", line 24, in enter
raise MultipleCallToCM("You can't call this CM twice!")
__main__.MultipleCallToCM: You can't call this CM twice!
I used a class decorator for it so that you can apply it to other context manager classes. I dispatched the __new__ method and give every instance a flag called __called, then change the original __enter__ to my enter which checks to see if this object has used in a with-statement or not.
How robust is this? I don't know. Seems like it works, I hope it gave an idea at least.
Arguably the simplest method is mentioned two paragraphs further down in the documentation you have cited:
Context managers created using contextmanager() are also single use context managers, and will complain about the underlying generator failing to yield if an attempt is made to use them a second time
Here is the corresponding invocation for your example:
>>> from contextlib import contextmanager
>>> #contextmanager
... def track_entry_and_exit(name):
... print('Entering', name)
... yield
... print('Exiting', name)
...
>>> c = track_entry_and_exit('test')
>>> with c:
... pass
...
Entering test
Exiting test
>>> with c:
... pass
...
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/lib/python3.9/contextlib.py", line 115, in __enter__
del self.args, self.kwds, self.func
AttributeError: args
It's even a class although it is written as a function:
>>> type(c)
<class 'contextlib._GeneratorContextManager'>
I suggest to consider iterable class instead of context manager, like this
class Iterable:
"""Iterable that can be iterated only once."""
def __init__(self, name):
self.name = name
self.it = iter([self])
def __iter__(self):
# code to acquire resource
print('enter')
yield next(self.it)
print('exit')
# code to release resource
def __repr__(self):
return f'{self.__class__.__name__}({self.name})'
It can be iterated only one
>>> it = Iterable('iterable')
>>> for item in it:
>>> print('entered', item)
enter
entered Iterable(iterable)
exit
>>> for item in it:
>>> print('entered', item)
RuntimeError: generator raised StopIteration
Context manager can be written in this manner:
class Context:
"""Context manager that can be used only once."""
def __init__(self, name):
self.name = name
self.it = iter([self])
def __enter__(self):
print('enter')
return next(self.it)
def __exit__(self, exc_type, exc, exc_tb):
print('exit')
def __repr__(self):
return f'{self.__class__.__name__}({self.name})'
It works only once
>>> ctx = Context('context')
>>> with ctx as c:
>>> print('entered', c)
enter
entered Context(context)
exit
>>> with ctx as c:
>>> print('entered', c)
enter
StopIteration:

Decorated and wrapped function is not passing self to instance method

I am trying to run some instance methods as background threads using a decorator. Several nested functions are chained (as found there) to make it work:
import traceback
from functools import partial
from threading import Thread
def backgroundThread(name=''):
def fnWrapper(decorated_func):
def argsWrapper(name, *inner_args, **inner_kwargs):
def exceptionWrapper(fn, *args, **kwargs):
try:
fn(*args, **kwargs)
except:
traceback.print_exc()
if not name:
name = decorated_func.__name__
th = Thread(
name=name,
target=exceptionWrapper,
args=(decorated_func, ) + inner_args,
kwargs=inner_kwargs
)
th.start()
return partial(argsWrapper, name)
return fnWrapper
class X:
#backgroundThread()
def myfun(self, *args, **kwargs):
print(args, kwargs)
print("myfun was called")
#1 / 0
x = X()
x.myfun(1, 2, foo="bar")
x.myfun()
Output/Error (on Windows, Python 3.6.6):
(2,) {'foo': 'bar'}
myfun was called
Traceback (most recent call last):
File "t3.py", line 11, in exceptionWrapper
fn(*args, **kwargs)
TypeError: myfun() missing 1 required positional argument: 'self'
The code works partly, how to be able to 'bind' self to the call: x.myfun() which takes no arguments ?
Fundamentally, the problem is that #backgroundThread() doesn't wrap an instance method x.myfun; it wraps the function X.myfun that is namespaced to the class.
We can inspect the wrapped result:
>>> X.myfun
functools.partial(<function backgroundThread.<locals>.fnWrapper.<locals>.argsWrapper at 0x7f0a1e2e7a60>, '')
This is not usable as a method, because functools.partial is not a descriptor:
>>> X.myfun.__get__
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: 'functools.partial' object has no attribute '__get__'
>>> class Y:
... # no wrapper
... def myfun(self, *args, **kwargs):
... print(args, kwargs)
... print("myfun was called")
... #1 / 0
...
>>> Y.myfun.__get__
<method-wrapper '__get__' of function object at 0x7f0a1e2e7940>
Because X.myfun is not usable as a descriptor, when it is looked up via x.myfun, it is called like an ordinary function. self does not receive the value of x, but instead of the first argument that was passed, resulting in the wrong output for the (1, 2, foo='bar') case and the exception for the () case.
Instead of having argsWrapper accept a name and then binding it with partial, we can just use the name from the closure - since we are already doing that with decorated_func anyway. Thus:
def backgroundThread(name=''):
def fnWrapper(decorated_func):
def argsWrapper(*inner_args, **inner_kwargs):
nonlocal name
def exceptionWrapper(fn, *args, **kwargs):
try:
fn(*args, **kwargs)
except:
traceback.print_exc()
if not name:
name = decorated_func.__name__
th = Thread(
name=name,
target=exceptionWrapper,
args=(decorated_func, ) + inner_args,
kwargs=inner_kwargs
)
th.start()
return argsWrapper
return fnWrapper
Here, nonlocal name is needed so that argsWrapper has access to a name from a scope that is not the immediate closure, but also isn't global.

How to restrict access of property and methods to Enum members only?

I have an unorthodox Enum that I plan to use in my code, but I've come to a problem where I need my property needed to throw an error when the Enum is used incorrectly, however, instead of throwing the Exception, it instead outputted my property's address.
How I want my code to work is:
When user writes Enum.MEMBER.text, return Enum.MEMBER alt text.
When user writes Enum.text, throw an error.
Here's the code snippet
class MyEnum(Enum):
#property
def text(self):
if isinstance(self._value_,MyCapsule): return self._value_.text
raise Exception('You are not using an Enum!')
return None
#property
def value(self):
if isinstance(self._value_,MyCapsule): return self._value_.value
raise Exception('You are not using an Enum!')
return None
class MyCapsule:
def __init__(self,value,text,more_data):
self._value_, self._text_ = (value,text)
#property
def text(self): return self._text_
#property
def value(self): return self._value_
class CustomData(MyEnum):
ONE = MyCapsule(1,'One','Lorem')
TWO = MyCapsule(2,'Two','Ipsum')
TRI = MyCapsule(3,'Tri','Loipsum')
A = CustomData.ONE
B = CustomData
print(A.text, A.value,sep=' | ')
print(B.text, B.value,sep=' | ')
The output is:
One | 1
<property object at 0x0000016CA56DF0E8> | <property object at 0x0000016CA56DF278>
What I expect was
One | 1
Unexpected exception at ....
Is there a solution to this problem, or I shouldn't write my Enum this way to begin with?
A custom descriptor will do the trick:
class property_only(object):
#
def __init__(self, func):
self.func = func
#
def __get__(self, instance, cls):
if instance is None:
raise Exception('You are not using an Enum!')
else:
return self.func(instance)
#
def __set__(self, instance, value):
# raise error or set value here
pass
Then change your base Enum to use it:
class MyEnum(Enum):
#property_only
def text(self):
return self._value_.text
#property_only
def value(self):
return self._value_.value
class MyCapsule:
def __init__(self, value, text, more_data):
self._value_, self._text_ = (value, text)
class CustomData(MyEnum):
ONE = MyCapsule(1, 'One', 'Lorem')
TWO = MyCapsule(2, 'Two', 'Ipsum')
TRI = MyCapsule(3, 'Tri', 'Loipsum')
and in use:
>>> CustomData.text
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "<stdin>", line 8, in __get__
Exception: You are not using an Enum!
While that solves the "access-only-from-enum" problem, you still have a lot of indirection when you want to access text and value:
>>> CustomData.ONE.value._value_
1
>>> CustomData.ONE.value._text_
'One'
The solution is to incorporate MyCapsule directly into CustomData:
from enum import Enum
class property_only(object):
#
def __init__(self, func):
self.func = func
#
def __get__(self, instance, cls):
if instance is None:
raise Exception('You are not using an Enum!')
else:
return self.func(instance)
#
def __set__(self, instance, value):
# raise error or set value here
pass
class CustomData(Enum):
#
ONE = 1, 'One', 'Lorem'
TWO = 2, 'Two', 'Ipsum'
TRI = 3, 'Tri', 'Loipsum'
#
def __new__(cls, value, text, more_data):
member = object.__new__(cls)
member._value_ = value
member._text_ = text
# ignoring more_data for now...
return member
#
#property_only
def text(self):
return self._text_
#
#property_only
def value(self):
return self._value_
and in use:
>>> CustomData.ONE
<CustomData.ONE: 1>
>>> CustomData.ONE.value
1
>>> CustomData.ONE.text
'One'
>>> CustomData.ONE.name
'ONE'
>>> CustomData.text
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "<stdin>", line 8, in __get__
Exception: You are not using an Enum!
>>> CustomData.value
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "<stdin>", line 8, in __get__
Exception: You are not using an Enum!
Disclosure: I am the author of the Python stdlib Enum, the enum34 backport, and the Advanced Enumeration (aenum) library.

How to block the generation of superclass with .__new__, while allowing the generation of its subclass?

I have an class 'A', which has subclasses 'B', 'C', and D.
'A' serves only the purpose of categorization and inheritance, so I don't want user to create the instance of 'A'.
However, I want to create the instances of 'B', 'C', and 'D' as usual.
First, I blocked the generation of the instance of 'A' by overriding A.__new__.
Then, I overrode B.__new__ again, just like below.
class A(object):
def __new__(cls, *args, **kwargs):
raise AssertionError('A cannot be generated directly.')
class B(A):
def __new__(cls, *args, **kwargs):
return super(cls,B).__new__(cls, *args, **kwargs)
However, with this code, generation of B returns same AssertionError.
I understand that since A.__new__ is disabled, super(cls,B).__new__ (which is exactly same as A.__new__) is disabled as well.
Is there any way to generate the instance of subclass, without invoking the __new__ of its superclass?
Edited - Resolved
Instead of invoking A.__new__, I invoked the __new__ method of higher superclass (which is 'object'). This solution circumvented the problem.
The new code is something like this:
class A(object):
def __new__(cls, *args, **kwargs):
raise AssertionError('A cannot be generated directly.')
class B(A):
def __new__(cls, *args, **kwargs):
return object.__new__(cls)
Note that it is return object.__new__(cls), not return object.__new__(cls, *args, **kwargs). This is because object.__new__ doesn't have any argument except cls, therefore no arguments must be passed.
You're explicitly invoking A.__new__ in the B.__new__ method. This version works fine:
class A:
def __new__(cls, *args, **kwargs):
raise AssertionError()
class B(A):
def __new__(cls, *args, **kwargs):
pass
Result:
>>> x = A()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "<stdin>", line 3, in __new__
AssertionError
>>> y = B()
>>> y
>>>
Note that B.__new__ is now pretty much useless: I don't actually get a B instance (y is empty). I'm assuming you have an actual reason to be modifying __new__, in which case you'd put your replacement code where I have pass.
If you don't have a good reason to modify __new__, don't. You almost always want __init__ instead.
>>> class A1:
... def __init__(self):
... raise ValueError()
...
>>> class B1(A1):
... def __init__(self):
... pass
...
>>> A1()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "<stdin>", line 3, in __init__
ValueError
>>> B1()
<__main__.B1 object at 0x7fa97b3f26d8>
Note how I get an actual default-constructed B1 object now.
As a general rule, you'll only want to modify __new__ if you're working with immutable types (like str and tuple) or building new metaclasses. You can see more details on what each method is responsible for in the docs.

Decorating methods: AttributeError: 'function' object has no attribute '__self__'?

I'm working with asyncio for scheduling methods to be called at certain relative times intervals. I decided to centralize the scheduling into one method of the class I wrote to reduce the chance of getting errors with the logic of my project.
Such method should be called every time a scheduled method finished. I though on adding loop.call_soon at the end of each method, but I decided to give a shot to decorators.
I wrote a class decorator and then applied it to some methods of my main class, wrote the rest of the logic and all that. But when trying to test my changes on my project I get an exception:
AttributeError: 'function' object has no attribute '__self__'
Somehow, decorating my method made it a function. This is something I cannot understand, why does this happened? how can I work around this without giving up decorators?
Here is a minimal, complete, and verifiable example of what I'm trying to do:
import asyncio
from datetime import datetime
class thinkagain:
loop = asyncio.get_event_loop()
def __init__(self, f):
self.fun = f
self.class_ = f.__self__
def __call__(self):
self.fun(*args, **kwords)
# everything in Python is an object
setattr(self.fun, "called", datetime.utcnow())
self.loop.call_later(self.class_.think, 5 * 60)
class DoSomething:
loop = asyncio.get_event_loop()
#thinkagain
def think(self):
attr = getattr(self.dosomething, "called")
if attr:
elapsed = attr - datetime.utcnow()
seconds = elapsed.seconds
else:
seconds = 99999
if seconds >= 20 * 60:
self.loop.call_soon(self.dosomething)
#thinkagain
def dosomething(self):
print("I did something awesome!")
loop = asyncio.get_event_loop()
something = DoSomething()
loop.call_soon(something.think)
loop.run_forever()
and here is the exception I get:
Python 3.5.1 (default, Dec 7 2015, 13:41:59)
[GCC 5.2.0] on linux
Type "help", "copyright", "credits" or "license" for more information.
>>> Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/tmp/mcve.py", line 19, in <module>
class DoSomething:
File "/tmp/mcve.py", line 22, in DoSomething
#thinkagain
File "/tmp/mcve.py", line 10, in __init__
self.class_ = f.__self__
AttributeError: 'function' object has no attribute '__self__'
>>>
Regarding decorators, Graham Dumpleton gave excellent talk
Advanced methods for creating decorators, discussing internal implementations of various decoration flavours and techniques. Highly recommended.
Relevant module he introduced at the end: https://github.com/GrahamDumpleton/wrapt
Never the less, i modified your example with two versions.
Version below stores attributes directly in methods as you intended.
from datetime import datetime
class thinkagain:
def __init__(self, f):
# Plain function as argument to be decorated
self.func = f
def __get__(self, instance, owner):
self.instance_ = instance
return self.__call__
def __call__(self, *args, **kwargs):
"""Invoked on every call of any decorated method"""
# set attribute directly within bound method
bound_method = getattr(self.instance_, self.func.__name__)
bound_method.__dict__['called'] = datetime.utcnow()
# returning original function with class' instance as self
return self.func(self.instance_, *args, **kwargs)
class DoSomething_A:
#thinkagain
def think(self, *args, **kwargs):
print('\n%s' % locals())
print(self.think.called, args, kwargs)
self.dosomething()
#thinkagain
def dosomething(self):
print('%s\n' % ('-'*30), locals())
print("%s I did something awful" % self.dosomething.called)
Second version looks cleaner and it skips storing attributes inside methods and assigns them directly within the instance.
from datetime import datetime
class thinkagain:
def __init__(self, f):
# Plain function as argument to be decorated
self.func = f
def __get__(self, instance, owner):
self.instance_ = instance
return self.__call__
def __call__(self, *args, **kwargs):
"""Invoked on every call of decorated method"""
# set attribute on instance
name = '%s_called' % self.func.__name__
setattr(self.instance_, name, datetime.utcnow())
# returning original function with class' instance as self
return self.func(self.instance_, *args, **kwargs)
class DoSomething_B:
#thinkagain
def think(self, *args, **kwargs):
print('\n%s' % locals())
print(self.think_called)
self.dosomething()
#thinkagain
def dosomething(self):
print('%s\n' % ('-'*30), locals())
print(self.dosomething_called)
Both produce same desired behaviour:
>>> something = DoSomething_A().think(1, 2)
{'args': (1, 2), 'kwargs': {}, 'self': <__main__.DoSomething_A object at 0x10209f128>}
2015-12-26 04:13:25.629887 (1, 2) {}
------------------------------
{'self': <__main__.DoSomething_A object at 0x10209f128>}
2015-12-26 04:13:25.647476 I did something awful
and
>>> something = DoSomething_B().think('arg_a', 'arg_b')
{'args': ('arg_a', 'arg_b'), 'kwargs': {}, 'self': <__main__.DoSomething_B object at 0x10209f208>}
2015-12-26 04:13:25.648039
------------------------------
{'self': <__main__.DoSomething_B object at 0x10209f208>}
2015-12-26 04:13:25.648390
Somehow, decorating my method made it a function.
Incorrect. The function is created, then it is decorated, and then it becomes a method. You will need to write a wrapper that captures the self argument at runtime and then invokes the actual function.
You ight need to wrap the inner function again to get the reference to your self object. Here's a sample code that worked for me.
And you need to use #time() when calling the decorator.
def timer():
def inner(func):
#wraps(func)
def wrapper(self, *args, **kwargs):
ts = time.time()
result = func(self, *args, **kwargs)
elapsed_time = time.time() - ts
write_ot = f"Elapsed time for {self.__class__.__name__} is :{elapsed_time: 0.4f}\n"
with open("profiler_timing.txt", 'a+') as f:
f.write(write_ot)
return result
return wrapper
return inner

Categories