How to lazy load a data structure (python) - python

I have some way of building a data structure (out of some file contents, say):
def loadfile(FILE):
return # some data structure created from the contents of FILE
So I can do things like
puppies = loadfile("puppies.csv") # wait for loadfile to work
kitties = loadfile("kitties.csv") # wait some more
print len(puppies)
print puppies[32]
In the above example, I wasted a bunch of time actually reading kitties.csv and creating a data structure that I never used. I'd like to avoid that waste without constantly checking if not kitties whenever I want to do something. I'd like to be able to do
puppies = lazyload("puppies.csv") # instant
kitties = lazyload("kitties.csv") # instant
print len(puppies) # wait for loadfile
print puppies[32]
So if I don't ever try to do anything with kitties, loadfile("kitties.csv") never gets called.
Is there some standard way to do this?
After playing around with it for a bit, I produced the following solution, which appears to work correctly and is quite brief. Are there some alternatives? Are there drawbacks to using this approach that I should keep in mind?
class lazyload:
def __init__(self,FILE):
self.FILE = FILE
self.F = None
def __getattr__(self,name):
if not self.F:
print "loading %s" % self.FILE
self.F = loadfile(self.FILE)
return object.__getattribute__(self.F, name)
What might be even better is if something like this worked:
class lazyload:
def __init__(self,FILE):
self.FILE = FILE
def __getattr__(self,name):
self = loadfile(self.FILE) # this never gets called again
# since self is no longer a
# lazyload instance
return object.__getattribute__(self, name)
But this doesn't work because self is local. It actually ends up calling loadfile every time you do anything.

The csv module in the Python stdlibrary will not load the data until you start iterating over it, so it is in fact lazy.
Edit: If you need to read through the whole file to build the datastructure, having a complex Lazy load object that proxies things is overkill. Just do this:
class Lazywrapper(object):
def __init__(self, filename):
self.filename = filename
self._data = None
def get_data(self):
if self._data = None:
self._build_data()
return self._data
def _build_data(self):
# Now open and iterate over the file to build a datastructure, and
# put that datastructure as self._data
With the above class you can do this:
puppies = Lazywrapper("puppies.csv") # Instant
kitties = Lazywrapper("kitties.csv") # Instant
print len(puppies.getdata()) # Wait
print puppies.getdata()[32] # instant
Also
allkitties = kitties.get_data() # wait
print len(allkitties)
print kitties[32]
If you have a lot of data, and you don't really need to load all the data you could also implement something like class that will read the file until it finds the doggie called "Froufrou" and then stop, but at that point it's likely better to stick the data in a database once and for all and access it from there.

If you're really worried about the if statement, you have a Stateful object.
from collections import MutableMapping
class LazyLoad( MutableMapping ):
def __init__( self, source ):
self.source= source
self.process= LoadMe( self )
self.data= None
def __getitem__( self, key ):
self.process= self.process.load()
return self.data[key]
def __setitem__( self, key, value ):
self.process= self.process.load()
self.data[key]= value
def __contains__( self, key ):
self.process= self.process.load()
return key in self.data
This class delegates the work to a process object which is either a Load or a
DoneLoading object. The Load object will actually load. The DoneLoading
will not load.
Note that there are no if-statements.
class LoadMe( object ):
def __init__( self, parent ):
self.parent= parent
def load( self ):
## Actually load, setting self.parent.data
return DoneLoading( self.parent )
class DoneLoading( object ):
def __init__( self, parent ):
self.parent= parent
def load( self ):
return self

Wouldn't if not self.F lead to another call to __getattr__, putting you into an infinite loop? I think your approach makes sense, but to be on the safe side, I'd make that line into:
if name == "F" and not self.F:
Also, you could make loadfile a method on the class, depending on what you're doing.

Here's a solution that uses a class decorator to defer initialisation until the first time an object is used:
def lazyload(cls):
original_init = cls.__init__
original_getattribute = cls.__getattribute__
def newinit(self, *args, **kwargs):
# Just cache the arguments for the eventual initialization.
self._init_args = args
self._init_kwargs = kwargs
self.initialized = False
newinit.__doc__ = original_init.__doc__
def performinit(self):
# We call object's __getattribute__ rather than super(...).__getattribute__
# or original_getattribute so that no custom __getattribute__ implementations
# can interfere with what we are doing.
original_init(self,
*object.__getattribute__(self, "_init_args"),
**object.__getattribute__(self, "_init_kwargs"))
del self._init_args
del self._init_kwargs
self.initialized = True
def newgetattribute(self, name):
if not object.__getattribute__(self, "initialized"):
performinit(self)
return original_getattribute(self, name)
if hasattr(cls, "__getitem__"):
original_getitem = cls.__getitem__
def newgetitem(self, key):
if not object.__getattribute__(self, "initialized"):
performinit(self)
return original_getitem(self, key)
newgetitem.__doc__ = original_getitem.__doc__
cls.__getitem__ = newgetitem
if hasattr(cls, "__len__"):
original_len = cls.__len__
def newlen(self):
if not object.__getattribute__(self, "initialized"):
performinit(self)
return original_len(self)
newlen.__doc__ = original_len.__doc__
cls.__len__ = newlen
cls.__init__ = newinit
cls.__getattribute__ = newgetattribute
return cls
#lazyload
class FileLoader(dict):
def __init__(self, filename):
self.filename = filename
print "Performing expensive load operation"
self[32] = "Felix"
self[33] = "Eeek"
kittens = FileLoader("kitties.csv")
print "kittens is instance of FileLoader: %s" % isinstance(kittens, FileLoader) # Well obviously
print len(kittens) # Wait
print kittens[32] # No wait
print kittens[33] # No wait
print kittens.filename # Still no wait
print kittens.filename
The output:
kittens is instance of FileLoader: True
Performing expensive load operation
2
Felix
Eeek
kitties.csv
kitties.csv
I tried to actually restore the original magic methods after the initialization, but it wasn't working out. It may be necessary to proxy additional magic methods, I didn't investigate every scenario.
Note that kittens.initialized will always return True because it kicks off the initialization if it hasn't already been performed. Obviously it would be possible to add an exemption for this attribute so that it would return False if no other operation had been performed on the object, or the check could be changed to the equivalent of a hasattr call and the initialized attribute could be deleted after the initialization.

Here's a hack that makes the "even better" solution work, but I think it's annoying enough that it's probably better to just use the first solution. The idea is to execute the step self = loadfile(self.FILE) by passing the the variable name as an attribute:
class lazyload:
def __init__(self,FILE,var):
self.FILE = FILE
self.var = var
def __getattr__(self,name):
x = loadfile(self.FILE)
globals()[self.var]=x
return object.__getattribute__(x, name)
Then you can do
kitties = lazyload("kitties.csv","kitties")
^ ^
\ /
These two better match exactly
After you call any method on kitties (aside from kitties.FILE or kitties.var), it will become completely indistinguishable from what you'd have gotten with kitties = loadfile("kitties.csv"). In particular, it will no longer be an instance of lazyload and kitties.FILE and kitties.var will no longer exist.

If you need use puppies[32] you need also define __getitem__ method because __getattr__ don't catch that behaviour.
I implement lazy load for my needs, there is non-adapted code:
class lazy_mask(object):
'''Fake object, which is substituted in
place of masked object'''
def __init__(self, master, id):
self.master=master
self.id=id
self._result=None
self.master.add(self)
def _res(self):
'''Run lazy job'''
if not self._result:
self._result=self.master.get(self.id)
return self._result
def __getattribute__(self, name):
'''proxy all queries to masked object'''
name=name.replace('_lazy_mask', '')
#print 'attr', name
if name in ['_result', '_res', 'master', 'id']:#don't proxy requests for own properties
return super(lazy_mask, self).__getattribute__(name)
else:#but proxy requests for masked object
return self._res().__getattribute__(name)
def __getitem__(self, key):
'''provide object["key"] access. Else can raise
TypeError: 'lazy_mask' object is unsubscriptable'''
return self._res().__getitem__(key)
(master is registry object that load data when i run it's get() method)
This implementation works ok for isinstance() and str() and json.dumps() with it

Related

How would I 'listen' to/decorate a setter from an imported class

I'm not sure whether this is a great approach to be using, but I'm not hugely experienced with Python so please accept my apologies. I've tried to do some research on this but other related questions have been given alternative problem-specific solutions - none of which apply to my specific case.
I have a class that handles the training/querying of my specific machine learning model. This algorithm is running on a remote sensor, various values are fed into the object which returns None if the algorithm isn't trained. Once trained, it returns either True or False depending on the classification assigned to new inputs. Occasionally, the class updates a couple of threshold parameters and I need to know when this occurs.
I am using sockets to pass messages from the remote sensor to my main server. I didn't want to complicate the ML algorithm class by filling it up with message passing code and so instead I've been handling this in a Main class that imports the "algorithm" class. I want the Main class to be able to determine when the threshold parameters are updated and report this back to the server.
class MyAlgorithmClass:
def feed_value(self):
....
class Main:
def __init__(self):
self._algorithm_data = MyAlgorithmClass()
self._sensor_data_queue = Queue()
def process_data(self):
while True:
sensor_value = self._sensor_data_queue.get()
result, value = self._algorithm_data.feed_value(sensor_value)
if result is None:
# value represents % training complete
self._socket.emit('training', value)
elif result is True:
# value represents % chance that input is categoryA
self._socket.emit('categoryA', value)
elif result is False:
...
My initial idea was to add a property to MyAlgorithmClass with a setter. I could then decorate this in my Main class so that every time the setter is called, I can use the value... for example:
class MyAlgorithmClass:
#property
def param1(self):
return self._param1
#param1.setter
def param1(self, value):
self._param1 = value
class Main:
def __init__(self):
self._algorithm_data = MyAlgorithmClass()
self._sensor_data_queue = Queue()
def watch_param1(func):
def inner(*args):
self._socket.emit('param1_updated', *args)
func(*args)
My problem now, is how do I decorate the self._algorithm_data.param1 setter with watch_param1? If I simply set self._algorithm_data.param1 = watch_param1 then I will just end up setting self._algorithm_data._param1 equal to my function which isn't what I want to do.
I could use getter/setter methods instead of a property, but this isn't very pythonic and as multiple people are modifying this code, I don't want the methods to be replaced/changed for properties by somebody else later on.
What is the best approach here? This is a small example but I will have slightly more complex examples of this later on and I don't want something that will cause overcomplication of the algorithm class. Obviously, another option is the Observer pattern but I'm not sure how appropriate it is here where I only have a single variable to monitor in some cases.
I'm really struggling to get a good solution put together so any advice would be much appreciated.
Thanks in advance,
Tom
Use descriptors. They let you customize attribute lookup, storage, and deletion in Python.
A simplified toy version of your code with descriptors looks something like:
class WatchedParam:
def __init__(self, name):
self.name = name
def __get__(self, instance, insttype=None):
print(f"{self.name} : value accessed")
return getattr(instance, '_' + self.name)
def __set__(self, instance, new_val):
print(f"{self.name} : value set")
setattr(instance, '_' + self.name, new_val)
class MyAlgorithmClass:
param1 = WatchedParam("param1")
param2 = WatchedParam("param2")
def __init__(self, param1, param2, param3):
self.param1 = param1
self.param2 = param2
self.param3 = param3
class Main:
def __init__(self):
self._data = MyAlgorithmClass(10, 20, 50)
m = Main()
m._data.param1 # calls WatchedParam.__get__
m._data.param2 = 100 # calls WatchedParam.__set__
The WatchedParam class is a descriptor and can be used in MyAlgorithmClass to specify the parameters that need to be monitored.
The solution I went for is as follows, using a 'Proxy' subclass which overrides the properties. Eventually, once I have a better understanding of the watched parameters, I won't need to watch them anymore. At this point I will be able to swap out the Proxy for the base class and continue using the code as normal.
class MyAlgorithmClassProxy(MyAlgorithmClass):
#property
def watch_param1(self):
return MyAlgorithmClass.watch_param1.fget(self)
#watch_param1.setter
def watch_param1(self, value):
self._socket.emit('param1_updated', *args)
MyAlgorithmClass.watch_param1.fset(self, value)

Does circular reference between two objects requires the use of weakref?

I am trying to implement something that works on the principle below:
from weakref import WeakValueDictionary
class Container(object):
def __init__(self):
self.dic = WeakValueDictionary({})
def put_in(self, something):
self.dic[something] = Thing(self, something)
class Thing(object):
def __init__(self, container, name):
self.container = container
self.name = name
def what_I_am(self):
print("I am a thing called {}".format(self.name))
pot = Container()
pot.put_in('foo')
pot.dic['foo'].what_I_am()
But I get :
File "C:/Users/jacques/ownCloud/dev/weakref.py", line 26, in <module>
pot.dic['foo'].what_I_am()
File "C:\Program Files\Anaconda3\lib\weakref.py", line 131, in __getitem__
o = self.data[key]()
KeyError: 'foo'
I understand that my implementation is not correct because Thing instance gets GCed and deleted from the WeakValueDictionary.
Is there any way I could achieve something like this to prevent the circular reference between Container and Thing ?
Edit : If I change the code above for the one below, would it solve the circular reference issue ?
from weakref import proxy
class Container(dict):
def put_in(self, something):
self[something] = Thing(self)
class Thing(object):
def __init__(self, container):
self.container = proxy(container)
def what_is_it(self):
print("I am a thing called {}".format(self))
def __getattr__(self, name):
try: #Look up the Thing instance first
return object.__getattribute__(self, name)
except AttributeError: #Try to find the attribute in container
return self.container.__getattribute__(name)
def __format__(self, spec):
(name,) = (key for key, val in self.container.items() if self == val)
return name
pot = Container()
pot.location = 'Living room'
pot.put_in('foo')
pot['foo'].what_is_it()
print(pot['foo'].location)
The very point of WeakValueDictionary is that its keys are automatically deleted once the objects are no longer in use.
Immediately after
self.dic[thing] = Thing(self)
there is no reference to the Thing object outside of the WeakValueDictionary anymore, so the behavior you are seeing is correct and expected.
If you expect the key to be reachable, replace WeakValueDictionary with the regular dict. Alternatively, make sure there is a reference to the thing, for instance by returning it or referencing it somewhere else.
You do not need to worry about circular references. Python is fully capable of managing its own memory in this case. And will delete objects with circular references as and when necessary.
Your implemenation need only look like this:
class Container(dict):
def put_in(self, something):
self[something] = Thing(self, something)
class Thing:
def __init__(self, container, name):
self.container = container
self.name = name
def what_is_it(self):
assert self.container[self.name] is self, "Thing stored under wrong name"
print("I am a thing called {}".format(self.name))
def __getattr__(self, name):
# By the time __getattr__ is called, normal attribute access on Thing has
# already failed. So, no need to check again. Go straight to checking the
# container
try:
return getattr(self.container, name)
except AttributeError:
# raise a fresh attribute error to make it clearer that the
# attribute was initially accessed on a Thing object
raise AttributeError("'Thing' object has no attribute {!r}".format(name)) from e
A quick test to show you how things work:
c = Container()
c.put_in("test")
c.value = 0
# Attribute demonstration
c["test"].what_is_it()
t = c["test"]
print("name:", t.name) # get a Thing attribute
print("value:", t.value) # get a Container Attribute
c.name = "another name"
print("Thing name:" t.name) # gets Thing attrs in preference to Container attrs
# Garbage collection demonstration
import weakref
import gc
r = weakref.ref(c["test"])
del c, t
# no non-weak references to t exist anymore
print(r()) # but Thing object not deleted yet
# collecting circular references is non-trivial so Python does this infrequently
gc.collect() # force a collection
print(r()) # Thing object has now been deleted

Use an object method with the Initializer (Same line)

I'm cleaning up a python object class, focusing mainly on how the object is created. The __init__ method creates a an empty dictionary that needs to be filled almost instantly. But this should NOT happen within the __init__, as the method used will vary widely. Here's an example:
class Storage:
def __init__(self):
self.data = {}
def fill_1(self):
self.data['solo'] = 'all alone'
def fill_2(self, buddy, bff):
self.data['buddy'] = buddy
self.data['bff'] = bff
def fill_3(self, that_guy, house):
self.data[that_guy] = house
Normally, I can just call one after the other like so:
box = Storage.Storage()
box.fill_1()
However, this can be overwhelming when I create many of these objects sequentially. My goal is to use the __init__ method with one of the fill methods on the same line. I've tried using the call below:
box = Storage.Storage().fill_1()
But this does not create the object and instead returns None. So I have two questions:
Is my code returning a None object because the line is calling an instance method?
And how can I create the Storage object and then call it's fill method within the same line?
This is not an idiom you tend to see that often in python (though it's quite prevalent in many other languages, especially javascript), but you could do this by returning self from the mutator functions. (It looks like you were missing the self argument to the instance methods as well). This means you could also chain mutator calls -- Storage().fill_1().fill_2()
class Storage(object):
def __init__(self):
super(Storage, self).__init__()
data = {}
def fill_1(self):
data['solo'] = 'all alone'
return self
def fill_2(self, buddy, bff):
data['buddy'] = buddy
data['bff'] = bff
return self
def fill_3(self, that_guy, house):
data[that_guy] = house
return self
box = Storage().fill_1()
Make alternate constructors:
class Storage(object):
def __init__(self):
self.data = {}
#staticmethod
def filled_1():
obj = Storage()
obj.data['solo'] = 'all alone'
return obj
#staticmethod
def filled_2(self, buddy, bff):
obj = Storage()
obj.data['buddy'] = buddy
obj.data['bff'] = bff
return obj
#staticmethod
def filled_3(self, that_guy, house):
obj = Storage()
obj.data[that_guy] = house
return obj
Then you don't need to worry about separate creation and initialization calls, or muddle command-query separation with call chaining:
obj1 = Storage.filled_1()
obj2 = Storage.filled_2('Jenny', 'Joe')
...

Unclear descriptor caller reference evaluation

I am using Python descriptors to create complex interfaces on host objects.
I don't get the behaviour I would intuitively expect when I run code such as this:
class Accessor(object):
def __get__(self,inst,instype):
self._owner = inst
return self
def set(self,value):
self._owner._val = value
def get(self):
if hasattr(self._owner,'_val'):
return self._owner._val
else: return None
class TestClass(object):
acc = Accessor()
source = TestClass()
destination = TestClass()
source.acc.set('banana')
destination.acc.set('mango')
destination.acc.set(source.acc.get())
print destination.acc.get()
# Result: mango
I would expect in this case for destination.acc.get() to return 'banana', not 'mango'.
However, the intention (to copy _val from 'source' to 'destination') works if the code is refactored like this:
val = source.acc.get()
destination.acc.set(val)
print destination.acc.get()
# Result: banana
What is is that breaks down the 'client' reference passed through get if descriptors are used in a single line versus broken into separate lines? Is there a way to get the behaviour I would intuitively expect?
Many thanks in advance.
K
Your implementation ALMOST works. The problem with it comes up with destination.acc.set(source.acc.get()). What happens is that it first looks up destination.acc, which will set _owner to destination, but before it can call set(), it has to resolve the parameter, source.acc.get(), which will end up setting acc's _owner to source.
Since destination.acc and source.acc are the same object (descriptors are stored on the class, not the instance), you're calling set() on it after its _owner is set to source. That means you're setting source._val, not destination._val.
The way to get the behavior you would intuitively expect is to get rid or your get() and set() and replace them with __get__() and __set__(), so that your descriptor can be used for the reason a descriptor is used.
class Accessor(object):
def __get__(self, instance, owner): # you should use the conventional parameter names
if instance is None:
return self
else:
return instance._val
def __set__(self, instance, value):
instance._val = value
Then you could rewrite your client code as
source = TestClass()
destination = TestClass()
source.acc = 'banana'
destination.acc = 'mango'
destination.acc = source.acc
print destination.acc
The point of descriptors is to remove explicit getter and setter calls with implicit ones that look like simple attribute use. If you still want to be using your getters and setters on Accessor, then don't make it a descriptor. Do this instead:
class Accessor(object):
def get(self):
if hasattr(self, '_val'):
return self._val
else:
return None
def set(self, val):
self._val = val
Then rewrite TestClass to look more like this:
class TestClass(object):
def __init__(self):
self.acc = Accessor()
After that, your original client code would work.
I already said why it's not working in my other post. So, here's a way to use a descriptor while still retaining your get() and set() methods.
class Accessor(object):
def __get__(self, instance, owner):
if instance is None:
return self
elif not hasattr(instance, '_val'):
setattr(instance, '_val', Acc())
return getattr(instance, '_val')
class Acc(object):
def get(self):
if hasattr(self, '_val'):
return self._val
else:
return None
def set(self, val):
self._val = val
class TestClass(object):
acc = Accessor()
The trick is to move the get() and set() methods to a new class that is returned instead of returning self from the descriptor.

Mapping obj.method({argument:value}) to obj.argument(value)

I don't know if this will make sense, but...
I'm trying to dynamically assign methods to an object.
#translate this
object.key(value)
#into this
object.method({key:value})
To be more specific in my example, I have an object (which I didn't write), lets call it motor, which has some generic methods set, status and a few others. Some take a dictionary as an argument and some take a list. To change the motor's speed, and see the result, I use:
motor.set({'move_at':10})
print motor.status('velocity')
The motor object, then formats this request into a JSON-RPC string, and sends it to an IO daemon. The python motor object doesn't care what the arguments are, it just handles JSON formatting and sockets. The strings move_at and velocity are just two of what might be hundreds of valid arguments.
What I'd like to do is the following instead:
motor.move_at(10)
print motor.velocity()
I'd like to do it in a generic way since I have so many different arguments I can pass. What I don't want to do is this:
# create a new function for every possible argument
def move_at(self,x)
return self.set({'move_at':x})
def velocity(self)
return self.status('velocity')
#and a hundred more...
I did some searching on this which suggested the solution lies with lambdas and meta programming, two subjects I haven't been able to get my head around.
UPDATE:
Based on the code from user470379 I've come up with the following...
# This is what I have now....
class Motor(object):
def set(self,a_dict):
print "Setting a value", a_dict
def status(self,a_list):
print "requesting the status of", a_list
return 10
# Now to extend it....
class MyMotor(Motor):
def __getattr__(self,name):
def special_fn(*value):
# What we return depends on how many arguments there are.
if len(value) == 0: return self.status((name))
if len(value) == 1: return self.set({name:value[0]})
return special_fn
def __setattr__(self,attr,value): # This is based on some other answers
self.set({attr:value})
x = MyMotor()
x.move_at = 20 # Uses __setattr__
x.move_at(10) # May remove this style from __getattr__ to simplify code.
print x.velocity()
output:
Setting a value {'move_at': 20}
Setting a value {'move_at': 10}
10
Thank you to everyone who helped!
What about creating your own __getattr__ for the class that returns a function created on the fly? IIRC, there's some tricky cases to watch out for between __getattr__ and __getattribute__ that I don't recall off the top of my head, I'm sure someone will post a comment to remind me:
def __getattr__(self, name):
def set_fn(self, value):
return self.set({name:value})
return set_fn
Then what should happen is that calling an attribute that doesn't exist (ie: move_at) will call the __getattr__ function and create a new function that will be returned (set_fn above). The name variable of that function will be bound to the name parameter passed into __getattr__ ("move_at" in this case). Then that new function will be called with the arguments you passed (10 in this case).
Edit
A more concise version using lambdas (untested):
def __getattr__(self, name):
return lambda value: self.set({name:value})
There are a lot of different potential answers to this, but many of them will probably involve subclassing the object and/or writing or overriding the __getattr__ function.
Essentially, the __getattr__ function is called whenever python can't find an attribute in the usual way.
Assuming you can subclass your object, here's a simple example of what you might do (it's a bit clumsy but it's a start):
class foo(object):
def __init__(self):
print "initting " + repr(self)
self.a = 5
def meth(self):
print self.a
class newfoo(foo):
def __init__(self):
super(newfoo, self).__init__()
def meth2(): # Or, use a lambda: ...
print "meth2: " + str(self.a) # but you don't have to
self.methdict = { "meth2":meth2 }
def __getattr__(self, name):
return self.methdict[name]
f = foo()
g = newfoo()
f.meth()
g.meth()
g.meth2()
Output:
initting <__main__.foo object at 0xb7701e4c>
initting <__main__.newfoo object at 0xb7701e8c>
5
5
meth2: 5
You seem to have certain "properties" of your object that can be set by
obj.set({"name": value})
and queried by
obj.status("name")
A common way to go in Python is to map this behaviour to what looks like simple attribute access. So we write
obj.name = value
to set the property, and we simply use
obj.name
to query it. This can easily be implemented using the __getattr__() and __setattr__() special methods:
class MyMotor(Motor):
def __init__(self, *args, **kw):
self._init_flag = True
Motor.__init__(self, *args, **kw)
self._init_flag = False
def __getattr__(self, name):
return self.status(name)
def __setattr__(self, name, value):
if self._init_flag or hasattr(self, name):
return Motor.__setattr__(self, name, value)
return self.set({name: value})
Note that this code disallows the dynamic creation of new "real" attributes of Motor instances after the initialisation. If this is needed, corresponding exceptions could be added to the __setattr__() implementation.
Instead of setting with function-call syntax, consider using assignment (with =). Similarly, just use attribute syntax to get a value, instead of function-call syntax. Then you can use __getattr__ and __setattr__:
class OtherType(object): # this is the one you didn't write
# dummy implementations for the example:
def set(self, D):
print "setting", D
def status(self, key):
return "<value of %s>" % key
class Blah(object):
def __init__(self, parent):
object.__setattr__(self, "_parent", parent)
def __getattr__(self, attr):
return self._parent.status(attr)
def __setattr__(self, attr, value):
self._parent.set({attr: value})
obj = Blah(OtherType())
obj.velocity = 42 # prints setting {'velocity': 42}
print obj.velocity # prints <value of velocity>

Categories