I would like to create a data structure that behaves like a dictionary with one added functionality which is to keep track of which keys have been "consumed". Please note that I can't just pop the values as they are being reused.
The structure should support these three cases, i.e. mark the key as consumed when accessed as:
if key in d:
...
d[key]
d.get(key)
This is what I have written:
class DictWithMemory(dict):
def __init__(self, *args, **kwargs):
self.memory = set()
return super(DictWithMemory, self).__init__(*args, **kwargs)
def __getitem__(self, key):
self.memory.add(key)
return super(DictWithMemory, self).__getitem__(key)
def __contains__(self, key):
self.memory.add(key)
return super(DictWithMemory, self).__contains__(key)
def get(self, key, d=None):
self.memory.add(key)
return super(DictWithMemory, self).get(key, d)
def unused_keys(self):
"""
Returns the list of unused keys.
"""
return set(self.keys()).difference(self.memory)
As I am not very familiar with the internals of dict, is there a better way to achieve this result?
Here's a solution that abstracts everything away inside a metaclass. I'm not sure if this is really any more elegant, but it does provide some amount of encapsulation should you change your mind about how to store the used keys:
class KeyRememberer(type):
def __new__(meta, classname, bases, classDict):
cls = type.__new__(meta, classname, bases, classDict)
# Define init that creates the set of remembered keys
def __init__(self, *args, **kwargs):
self.memory = set()
return super(cls, self).__init__(*args, **kwargs)
cls.__init__ = __init__
# Decorator that stores a requested key in the cache
def remember(f):
def _(self, key, *args, **kwargs):
self.memory.add(key)
return f(self, key, *args, **kwargs)
return _
# Apply the decorator to each of the default implementations
for method_name in [ '__getitem__', '__contains__', 'get' ]:
m = getattr(cls, method_name)
setattr(cls, method_name, remember(m))
return cls
class DictWithMemory(dict):
# A metaclass that ensures the object
# has a set called 'memory' as an attribute,
# which is updated on each call to __getitem__,
# __contains__, or get.
__metaclass__ = KeyRememberer
def unused_keys(self):
"""
Returns the list of unused keys.
"""
print "Used", self.memory
return list(set(super(DictWithMemory,
self).keys()).difference(self.memory))
Related
I am attempting to set attributes on a dict key and have a feeling my approach is wrong. How would I get the current key inside alfa? Alfa in production will be used to create and update a dataclass.
class Dictn(dict):
def __init__(self, *args, **kwds):
super(Dictn, self).__init__(*args, **kwds)
self.__dict__ = self
def __getitem__(self, key):
if key not in self.keys():
self.__setitem__(key, None)
dict.__getitem__(self, key)
return self
def alfa(self, **kwds):
''' Need current key to set attributes on it '''
self.__setattr__(**kwds)
def beta(self, **kwds):
''' Need current key to set attributes on it '''
self.__setattr__(**kwds)
dictn = Dictn()
dictn['ABC'].alfa(es=1, te=2)
dictn['ABC'].beta(es=3, te=4)
A dictionary stores objects that can be recovered as key-value pairs. (Remember, everything in python is an object.) If you want those objects to have additional attributes, you need to edit them directly, not the dict class which only acts as a container. The dict doesn't really care what it stores, and it certainly isn't (and shouldn't be) responsible for handling any properties on its contents.
(By the way, dictn["ABC"] represents a value in the dictionary, not a key. They key here is "ABC". I'm not sure if you're just confusing the definitions here or if you mean something else entirely.)
You could set an instance variable to the key used in __getitem__().
class Dictn(dict):
def __init__(self, *args, **kwds):
super(Dictn, self).__init__(*args, **kwds)
self.__dict__ = self
def __getitem__(self, key):
if key not in self.keys():
self.__setitem__(key, None)
self.current_key = key
dict.__getitem__(self, key)
return self
def alfa(self, **kwds):
// use self.current_key to get current_key to set attributes on it
self.__setattr__(**kwds)
def beta(self, **kwds):
// use self.current_key to get current_key to set attributes on it
self.__setattr__(**kwds)
Solution is to use a class as an attribute setter:
#dataclass()
class Position:
es: int = None
te: int = None
class ValueObj():
def alfa(self, **kwds):
setattr(self, 'alfa', Position(**kwds))
def beta(self, **kwds):
setattr(self, 'beta', Position(**kwds))
class Dictn(dict):
def __init__(self, *args, **kwds):
super(Dictn, self).__init__(*args, **kwds)
self.__dict__ = self
def __getitem__(self, key):
if key not in self.keys():
self.__setitem__( key,
ValueObj(),
)
return dict.__getitem__(self, key)
dictn = Dictn()
dictn['ABC'].alfa(es=1, te=2)
dictn['ABC'].alfa
>> Position(es=1, te=2)
This answer was posted as an edit to the question Attributes as dictionary values by the OP misantroop under CC BY-SA 4.0.
Here is a simple code of attribute dict:
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
More context:
https://stackoverflow.com/a/14620633/1179925
I can use it like:
d = AttrDict({'a':1, 'b':2})
print(d)
I want this to be possible:
d.b = 10
print(d)
But I want this to be impossible:
d.c = 4
print(d)
Is it possible to throw an error on new key creation?
You could check if they are already in there
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.__dict__ = self
def __setattr__(self, key, value):
if key not in [*self.keys(), '__dict__']:
raise KeyError('No new keys allowed')
else:
super().__setattr__(key, value)
def __setitem__(self, key, value):
if key not in self:
raise KeyError('No new keys allowed')
else:
super().__setitem__(key, value)
First I thought this would be a bad idea since no initial values could be added but from the builtins it states the following:
dict(iterable) -> new dictionary initialized as if via:
d = {}
for k, v in iterable:
d[k] = v
So this does allow you to change the methods without them having effect on the initialization of the Class as it creates a new one from {} instead of from its own instance.
They will be able to change __ dict __ always though..
You can override the __setattr__ special method.
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__setattr__('_initializing', True)
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
super(AttrDict, self).__setattr__('_initializing', False)
def __setattr__(self, x, value):
if x == '_initializing':
raise KeyError("You should not edit _initalizing")
if self._initializing or x in self.__dict__.keys():
super(AttrDict, self).__setattr__(x, value)
else:
raise KeyError("No new keys allowed!")
Note that I needed to add an attribute _initializing to let __setattr__ distinguish between attributes created by __init__ and attributes created by users.
Since python does not have private attributes, users might still set _initializing to True and then add their attributes to the AttrDict instance, so I added a further check to be sure that they are not trying to edit _initializing.
It is still not 100% safe, since an user could still use super() to set _initializing to True.
I am trying to expose the classes dictionary making it both and subscriptable and be able to iterate through the dict values. Here is the class :
class ExampleClass():
def __init__(self, *args, **kwargs):
for key, value in self.kwargs.items():
setattr(self, key, value)
for arg in args:
setattr(self, arg, arg) if isinstance(arg, str) else setattr(self, str(arg), arg)
def __str__(self):
return 'This is the example class'
def __getitem__(self, obj):
return self.__dict__[obj]
def __len__(self):
return len(self.__dict__.items())
If we create an instance and pass in these values :
cls = ExampleClass(123456,'cash', name='newexample', id=1)
This will store all of the args and kwargs as instance attributes, and using the syntax cls['id'] will return 1 as expected. But when I use the syntax for i in cls: print(i) I get a KeyError : KeyError : 0
How can I make this object's dict both subscriptable and iterable ?
You need to implement the __iter__ method.
class ExampleClass():
def __init__(self, *args, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
for arg in args:
setattr(self, arg, arg) if isinstance(arg, str) else setattr(self, str(arg), arg)
def __str__(self):
return 'This is the example class'
def __getitem__(self, obj):
return self.__dict__[obj]
def __len__(self):
return len(self.__dict__.items())
def __iter__(self):
return iter(self.__dict__)
cls = ExampleClass(123456,'cash', name='newexample', id=1)
print(cls['cash'])
print(cls['name'])
for i in cls: print(i)
This is the method which is called to create an iterator for your type so that it can be iterated. Your underlying dict already implements it, so you're sort of just proxying it here.
To make a class subscriptable, it must contain dunder getitem(), it may or may not contain dunder iter().
At the same time, an iterable must contain iter().
To check if your class has the required method, perform print(dir(your_class)), and look for the respective dunder function.
If you don't have one, create it.
I have classes like this:
class Tool(object):
def do_async(*args):
pass
for which I want to automatically generate non-async methods that make use of the async methods:
class Tool(object):
def do_async(*args):
pass
def do(*args):
result = self.do_async(*args)
return magical_parser(result)
This gets to be particularly tricky because each method needs to be accessible as both an object and class method, which is normally achieved with this magical decorator:
class class_or_instance(object):
def __init__(self, fn):
self.fn = fn
def __get__(self, obj, cls):
if obj is not None:
f = lambda *args, **kwds: self.fn(obj, *args, **kwds)
else:
f = lambda *args, **kwds: self.fn(cls, *args, **kwds)
functools.update_wrapper(f, self.fn)
return f
How can I make these methods, and make sure they're accessible as both class and object methods? This seems like something that could be done with decorators, but I am not sure how.
(Note that I don't know any of the method names in advance, but I know that all of the methods that need new buddies have _async at the end of their names.)
I think I've gotten fairly close, but this approach does not appropriately set the functions as class/object methods:
def process_asyncs(cls):
methods = cls.__dict__.keys()
for k in methods:
methodname = k.replace("_async","")
if 'async' in k and methodname not in methods:
#class_or_instance
def method(self, verbose=False, *args, **kwargs):
response = self.__dict__[k](*args,**kwargs)
result = self._parse_result(response, verbose=verbose)
return result
method.__docstr__ = ("Returns a table object.\n" +
cls.__dict__[k].__docstr__)
setattr(cls,methodname,MethodType(method, None, cls))
Do not get the other method from the __dict__; use getattr() instead so the descriptor protocol can kick in.
And don't wrap the method function in a MethodType() object as that'd neutralize the descriptor you put on method.
You need to bind k to the function you generate; a closured k would change with the loop:
#class_or_instance
def method(self, verbose=False, _async_method_name=k, *args, **kwargs):
response = getattr(self, _async_method_name)(*args,**kwargs)
result = self._parse_result(response, verbose=verbose)
return result
cls.__dict__[methodname] = method
Don't forget to return cls at the end; I've changed this to use a separate function to create a new scope to provide a new local name _async_method_name instead of a keyword parameter; this avoids difficulties with *args and explicit keyword arguments:
def process_asyncs(cls):
def create_method(async_method):
#class_or_instance
def newmethod(self, *args, **kwargs):
if 'verbose' in kwargs:
verbose = kwargs.pop('verbose')
else:
verbose = False
response = async_method(*args,**kwargs)
result = self._parse_result(response, verbose=verbose)
return result
return newmethod
methods = cls.__dict__.keys()
for k in methods:
methodname = k.replace("_async","")
if 'async' in k and methodname not in methods:
async_method = getattr(cls, k)
setattr(cls, methodname, create_method(async_method))
return cls
Anyone know of any easy way to track changes to a dictionary object in python? I am at a high level doing crud, so I have a couple methods that handle changing a dictionary, if the dictionary changes I want to call a function to basically do an Observer/Notify.
class MyClass(object):
def update(self, item):
changed = False
if(self.my_dict.has_key(item.id)):
self.my_dict[item.id] = item
changed = True
if(changed):
self.notify()
What I am trying to avoid is all of the tracking(setting the boolean) code. Was hoping there was an easier way to track changes. This is a simple case, but there could have been more complicated logic that would result in me having to set the changed flag.
You can derive from the dict class and add a callback on any changes. This requires to overwrite any methods that change the dictionary:
class NotifyDict(dict):
__slots__ = ["callback"]
def __init__(self, callback, *args, **kwargs):
self.callback = callback
dict.__init__(self, *args, **kwargs)
def _wrap(method):
def wrapper(self, *args, **kwargs):
result = method(self, *args, **kwargs)
self.callback()
return result
return wrapper
__delitem__ = _wrap(dict.__delitem__)
__setitem__ = _wrap(dict.__setitem__)
clear = _wrap(dict.clear)
pop = _wrap(dict.pop)
popitem = _wrap(dict.popitem)
setdefault = _wrap(dict.setdefault)
update = _wrap(dict.update)
Subclass dict and override __setitem__, making sure to call dict.__setitem__ after your stuff to ensure the item actually gets saved.
class Notifier(dict):
def __setitem__(self, key, value):
print 'Something is being set!'
dict.__setitem__(self, key, value)