I'm currently in the need for a Python container class with similar functionality like the builtin dict type. Basically what I need is a dictionary, where an arbitrary number of keys beside a primary key, which map to the very same value. However when iterating over it, it should iterate only over the (primary_key, value) pairs and only the primary key if the list of keys is requested.
If this has already been implemented I'd rather not reinvent the wheel. So is there already a module providing such a container? If not, I'm going to implement it myself.
Here is a quick implementation:
class MultipleKeyDict(dict):
__slots__ = ["_primary_keys"]
def __init__(self, arg=None, **kwargs):
self._primary_keys = {}
self.update(arg, **kwargs)
def __setitem__(self, key, value):
super(MultipleKeyDict, self).__setitem__(key, value)
self._primary_keys.setdefault(value, key)
def __delitem__(self, key):
value = self[key]
super(MultipleKeyDict, self).__delitem__(key)
if self._primary_keys[value] == key:
del self._primary_keys[value]
for k, v in super(MultipleKeyDict, self).iteritems():
if v == value:
self._primary_keys[value] = k
break
def __iter__(self):
return self.iterkeys()
def update(self, arg=None, **kwargs):
if arg is not None:
if isinstance(arg, collections.Mapping):
for k in arg:
self[k] = arg[k]
else:
for k, v in arg:
self[k] = v
for k in kwargs:
self[k] = kwargs[k]
def clear(self):
super(MultipleKeyDict, self).clear()
self._primary_keys.clear()
def iteritems(self):
for v, k in self._primary_keys.iteritems():
yield k, v
def items(self):
return list(self.iteritems())
def itervalues(self):
return self._primary_keys.iterkeys()
def values(self):
return self._primary_keys.keys()
def iterkeys(self):
return self._primary_keys.itervalues()
def keys(self):
return self._primary_keys.values()
The only messy bit is that it has to search the whole dict in case a primary key gets deleted.
I omitted copy(), pop(), popitem() and setdefault(). If you need them, you'll have to implement them yourself.
The simplest and easiest solution would be to use two dictionaries, one of which maps secondary keys to a primary key. If for some reason you need a reverse mapping, that could be included in the primary dictionary.
sec = {'one': 'blue', 'two': 'red', 'three': 'blue', # seconary keys
'blue': 'blue', 'red': 'red'} # include identity mapping for primaries
dict = {'blue': ('doll', '$9.43', ('one', 'three')),
'red': ('truck', '$14.99', ('two',)) }
record = dict[sec['two']]
print('Toy=', record[0], 'Price=', record[1])
There is now a multiple key dictionary python package.
https://pypi.python.org/pypi/multi_key_dict/1.0.2
From the link:
from multi_key_dict import multi_key_dict
k = multi_key_dict()
k[1000, 'kilo', 'k'] = 'kilo (x1000)'
print k[1000] # will print 'kilo (x1000)'
print k['k'] # will also print 'kilo (x1000)'
# the same way objects can be updated, deleted:
# and if an object is updated using one key, the new value will
# be accessible using any other key, e.g. for example above:
k['kilo'] = 'kilo'
print k[1000] # will now print 'kilo' as value was updated
Related
How to set the limit for the key and value for the below program. I wrote one python function to add key and values. How to limit the number of keys = 3 and values = 4. Need to ignore the more than 3 keys and 4 values are added.
Is there any thing like below given in program
self.size_key = 3
and self.size_value = 4 or [[] for _ in range(5)]
sample code
class my_dict(dict):
def add(self, key, value):
#if len(value) < 4 and len(key) < 3:
self.setdefault(key, []).append(value)
def remove_key(self, key):
del self[key]
def remove_value(self, key, value):
if value in self[key]:
self[key].remove(value)
dict_obj = my_dict()
dict_obj.add('key1', 'value1')
dict_obj.add('key2', 'value9')
dict_obj.add('key1', 'value3')
print(dict_obj)
To do what you want, you would need to use an Ordered Dict. Because dicts do not keep an order outside of a list. It would be impossible to predict which key is getting deleted.
This solves your problem.
from collections import OrderedDict
class my_dict(OrderedDict):
def add(self, key, value):
self.check(key)
self.setdefault(key, []).append(value)
def check(self, key):
if len(self.keys()) == 3:
del self[next(iter(self))]
if len(self.get(key, [])) == 4:
del self[key][0]
dict_obj = my_dict()
dict_obj.add('key1', 'value1')
dict_obj.add('key2', 'value9')
dict_obj.add('key1', 'value3')
dict_obj.add('key1', 'value4')
dict_obj.add('key1', 'value5')
dict_obj.add('key1', 'value6')
dict_obj.add('key3', 'value6')
dict_obj.add('key4', 'value6')
print(dict_obj)
I was writing the program of method find_key_for_value to retrieve the key for the value return none
class my_dict(dict):
def add(self, key, value):
self.setdefault(key, []).append(value)
def remove_key(self, key):
del self[key]
def remove_value(self, key, value):
if value in self[key]:
self[key].remove(value)
def find_key_for_value(self,value_to_find):
for key, value in dict_obj.items():
if value == value_to_find:
return key
#return [key for key,value in dict_obj.items() if value == value_to_find]
dict_obj = my_dict()
dict_obj.add('key1', 'value1')
dict_obj.add('key2', 'value9')
dict_obj.add('key1', 'value3')
find the key to print
dict_obj.find_key_for_value('value1')
desired out key1
Your algorithm looks correct, you have a typo:
def find_key_for_value(self,value_to_find):
# note the change from dict_obj to self here
for key, value in self.items():
if value == value_to_find:
return key
You dict value is a list. So you have to check if your value_to_find is within this list.
def find_key_for_value(self,value_to_find):
for key, value in self.items():
if value_to_find in value:
return key
or if your dict might become huge or a value can have multiple keys use:
def find_key_for_value(self,value_to_find):
return list(filter(lambda key: value_to_find in self[key], self.keys()))
Try to filter a nested dictionary. My solution is clunky, was hoping to see if there is a better method something using comprehensions. Only interested in the dictionary and lists for this example.
_dict_key_filter() will filter the keys of a nested dictionary or a list of nested dictionaries. Anything not in the obj_filter will be ignored on all nested levels.
obj : can be a dictionary or a list of dictionaries.
obj_filter: has to be a list of filter values
def _dict_key_filter(self, obj, obj_filter):
if isinstance(obj, dict):
retdict = {}
for key, value in obj.iteritems():
if key in obj_filter:
retdict[key] = copy.deepcopy(value)
elif isinstance(value, (dict, list)):
child = self._dict_key_filter(value, obj_filter)
if child:
retdict[key] = child
return retdict if retdict else None
elif isinstance(obj, list):
retlist = []
for value in list:
child = self._dict_key_filter(value, obj_filter)
if child:
retlist.append(child)
return retlist if retlist else None
else:
return None
Example#
dict1 = {'test1': {'test2':[1,2]}, 'test3': [{'test6': 2},
{'test8': {'test9': 23}}], 'test4':{'test5': 5}}
filter = ['test5' , 'test9']
return = _dict_key_filter(dict1, filter)
return value would be {'test3': [{'test8': {'test9': 23}}], 'test4': {'test5': 5}}
It's a really old question. I came across a similar problem recently.
It maybe obvious, but you are dealing with a tree in which each node has an arbitray number of children. You want to cut the subtrees that do not contain some items as nodes (not leaves). To achieve this, you are using a custom DFS: the main function returns either a subtree or None. If the value is None then you "cut" the branch.
First of all, the function dict_key_filter returns a (non empty) dict, a (non empty) list or None if no filter key was not found in the branch.
To reduce complexity, you could return a sequence in every case: an empty sequence if no filter key was found, and a non empty sequence if you are still searching or you found the leaf of the tree. Your code would look like:
def dict_key_filter(obj, obj_filter):
if isinstance(obj, dict):
retdict = {}
...
return retdict # empty or not
elif isinstance(obj, list):
retlist = []
...
return retlist # empty or not
else:
return [] # obvioulsy empty
This was the easy part. Now we have to fill the dots.
The list case
Let's begin with the list case, since it is the easier to refactor:
retlist = []
for value in obj:
child = dict_key_filter0(value, obj_filter)
if child:
retlist.append(child)
We can translate this into a simple list comprehension:
retlist = [dict_key_filter(value, obj_filter) for value in obj if dict_key_filter(value, obj_filter)]
The drawback is that dict_key_filter is evaluated twice. We can avoid this with a little trick (see https://stackoverflow.com/a/15812866):
retlist = [subtree for subtree in (dict_key_filter(value, obj_filter) for value in obj) if subtree]
The inner expression (dict_key_filter(value, obj_filter) for value in obj) is a generator that calls dict_key_filter once per value. But we can even do better if we build a closure of dict_key_filter:
def dict_key_filter(obj, obj_filter):
def inner_dict_key_filter(obj): return dict_key_filter(obj, obj_filter)
...
retlist = list(filter(len, map(inner_dict_key_filter, obj)))
Now we are in the functional world: map applies inner_dict_key_filter to every element of the list and then the subtrees are filtered to exclude empty subtrees (len(subtree) is true iff subtree is not empty). Now, the code looks like:
def dict_key_filter(obj, obj_filter):
def inner_dict_key_filter(obj): return dict_key_filter(obj, obj_filter)
if isinstance(obj, dict):
retdict = {}
...
return retdict
elif isinstance(obj, list):
return list(filter(len, map(inner_dict_key_filter, obj)))
else:
return []
If you are familiar with functional programming, the list case is readable (not quite as readable as it would be in Haskell, but still readable).
The dict case
I do not forget the dictionary-comprehension tag in your question. The first idea is to create a function to return either a whole copy of the branch or the result of the rest of the DFS.
def build_subtree(key, value):
if key in obj_filter:
return copy.deepcopy(value) # keep the branch
elif isinstance(value, (dict, list)):
return inner_dict_key_filter(value) # continue to search
return [] # just an orphan value here
As in the list case, we do not refuse empty subtrees for now:
retdict = {}
for key, value in obj.items():
retdict[key] = build_subtree(key, value)
We have now a perfect case for dict comprehension:
retdict = {key: build_subtree(key, value) for key, value in obj.items() if build_subtree(key, value)}
Again, we use the little trick to avoid to compute a value twice:
retdict = {key:subtree for key, subtree in ((key, build_subtree(key, value)) for key, value in obj.items()) if subtree}
But we have a little problem here: the code above is not exaclty equivalent to the original code. What if the value is 0? In the original version, we have retdict[key] = copy.deepcopy(0) but in the new version we have nothing. The 0 value is evaluated as false and filtered. And then the dict may become empty and we cut the branch wrongfully. We need another test to be sure we want to remove a value: if it's an empty list or dict, then remove it, else keep it:
def to_keep(subtree): return not (isinstance(subtree, (dict, list)) or len(subtree) == 0)
That is:
def to_keep(subtree): return not isinstance(subtree, (dict, list)) or subtree
If you remember a bit of logic (https://en.wikipedia.org/wiki/Truth_table#Logical_implication) you can interpret this as: if subtree is a dict or a list, then it must not be empty.
Let's put the pieces together:
def dict_key_filter(obj, obj_filter):
def inner_dict_key_filter(obj): return dict_key_filter(obj, obj_filter)
def to_keep(subtree): return not isinstance(subtree, (dict, list)) or subtree
def build_subtree(key, value):
if key in obj_filter:
return copy.deepcopy(value) # keep the branch
elif isinstance(value, (dict, list)):
return inner_dict_key_filter(value) # continue to search
return [] # just an orphan value here
if isinstance(obj, dict):
key_subtree_pairs = ((key, build_subtree(key, value)) for key, value in obj.items())
return {key:subtree for key, subtree in key_subtree_pairs if to_keep(subtree)}
elif isinstance(obj, list):
return list(filter(to_keep, map(inner_dict_key_filter, obj)))
return []
I don't know if this is more pythonic, but it seems clearer to me.
dict1 = {
'test1': { 'test2':[1,2] },
'test3': [
{'test6': 2},
{
'test8': { 'test9': 23 }
}
],
'test4':{'test5': 0}
}
obj_filter = ['test5' , 'test9']
print (dict_key_filter(dict1, obj_filter))
# {'test3': [{'test8': {'test9': 23}}], 'test4': {'test5': 0}}
I've a dictionary such as this:
my_dict=collections.OrderedDict([((123, 1), 'qwe'), ((232, 1), 'asd'), ((234, 2), 'zxc'), ((6745, 2), 'aaa'), ((456, 3), 'bbb')])
The combination of the tuple is always unique and I would like to maintain the order of insertion, and hence OrderedDict. I've a well over ~10K items in the dict. How can I efficiently maintain a counter that gives the count of the second element in the tuple? Basically, I need to know the count whenever I would like to add/delete an item in the key. Right now I just iterate through my_dict and get the counter everytime but it seems to be very expensive to do that.
In the above example I want the output to be:
1:2 # As in 1 occurs 2 times
2:2
3:1
Right now I do the following:
from collections import OrderedDict, Counter
my_dict = OrderedDict()
my_dict[(123,1)] = 'qwe'
my_dict[(232,1)] = 'asd'
my_dict[(234,2)] = 'zxc'
my_dict[(6745,2)] = 'aaa'
my_dict[(456,3)] = 'bbb'
cnt = []
for item in my_dict.keys():
cnt.append(item[1])
print Counter(cnt)
I'm not sure if this is the best way but is there a way to override the the = operator and pop function, such that it adds or subtracts a count every time I do that operation?
Getting a Counter to work nicely with an OrderedDict is probably going to require some subclassing. Here's something that might work (I've only implemented __setitem__ and __getitem__, but if you'd like a more robust implementation, let me know):
import collections
class CountedOrderedDict(collections.OrderedDict):
def __init__(self, *args, **kwargs):
self.counter = collections.Counter()
super(CountedOrderedDict, self).__init__(*args, **kwargs)
def __delitem__(self, key):
super(CountedOrderedDict, self).__delitem__(key)
self.counter[key[1]] -= 1
def __setitem__(self, key, value):
if key not in self:
self.counter[key[1]] += 1
super(CountedOrderedDict, self).__setitem__(key, value)
Example usage:
>>> my_dict = CountedOrderedDict({(123,1): 'sda', (232,1) : 'bfd', (234,2) : 'csd', (6745,2) : 'ds', (456,3) : 'rd'})
>>> my_dict.counter
Counter({'1': 2, '2': 2, '3': 1})
>>> del my_dict[(123,1)]
>>> my_dict.counter
Counter({'2': 2, '1': 1, '3': 1})
>>> my_dict[(150,1)] = "asdf"
>>> my_dict.counter
Counter({'1': 2, '2': 2, '3': 1})
Here's a more general CountedOrderedDict implementation that takes a key function as a parameter.
import collections
class CountedOrderedDict(collections.OrderedDict):
def __init__(self, key=lambda k: k, *args, **kwargs):
self.counter = collections.Counter()
self.key_transform = key
super(CountedOrderedDict, self).__init__(*args, **kwargs)
def __delitem__(self, key):
super(CountedOrderedDict, self).__delitem__(key)
self.counter[self.key_transform(key)] -= 1
def __setitem__(self, key, value):
if key not in self:
self.counter[self.key_transform(key)] += 1
super(CountedOrderedDict, self).__setitem__(key, value)
For your needs, you'd instantiate it like so:
my_dict = CountedOrderedDict(key=lambda k: k[1])
I'm subclasssing OrderedDict (Cpython, 2.7.3) to represent a datafile. __getitem__ pulls a field out of the datafile and sets it on the current instance similar to the code I've posted below. now I would like to override __contains__ to return True if the field is in the dictionary or in the file on the disk since it can be read either way. However, this seems to break OrderedDict's ability to inspect it's keys.
from collections import OrderedDict
dictclass = OrderedDict
class Foo(dictclass):
def __getitem__(self,key):
try:
return dictclass.__getitem__(self,key)
except KeyError:
pass
data = key*2
self[key] = data
return data
def __contains__(self,whatever):
return dictclass.__contains__(self,whatever) or 'bar' in whatever
a = Foo()
print a['bar']
print a.keys()
If you run the code above, you'll get this output:
barbar
[]
Note that if you change dictclass = dict in the above code, it still seems to work (giving the following output).
barbar
['bar']
Am I doing something horribly wrong?
When Foo.__contains__ is not defined:
a['bar']
calls Foo.__getitem__, which executes
self[key] = data
This calls OrderedDict.__setitem__, which is defined this way:
def __setitem__(self, key, value, PREV=0, NEXT=1, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link at the end of the linked list,
# and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[PREV]
last[NEXT] = root[PREV] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
Since Foo.__contains__ is not defined,
if key not in self:
is True. So the key is properly added to self.__root and self.__map.
When Foo.__contains__ is defined,
if key not in self:
if False. So the key is not properly added to self.__root and self.__map.
Foo.__contains__ effective fools OrderedDict.__setitem__ into thinking that the 'bar' key has already been added.
I found it helpful to play with the following code (adding print statements in __setitem__ and __iter__):
from collections import OrderedDict
dictclass = OrderedDict
class Foo(dictclass):
def __getitem__(self,key):
try:
return dictclass.__getitem__(self,key)
except KeyError:
pass
data = key*2
self[key] = data
return data
def __contains__(self,whatever):
print('contains: {}'.format(whatever))
return dictclass.__contains__(self,whatever) or 'bar' in whatever
def __setitem__(self, key, value, PREV=0, NEXT=1, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link at the end of the linked list,
# and the inherited dictionary is updated with the new key/value pair.
print('key not in self: {}'.format(key not in self))
if key not in self:
root = self._OrderedDict__root
last = root[PREV]
last[NEXT] = root[PREV] = self._OrderedDict__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __iter__(self):
'od.__iter__() <==> iter(od)'
# Traverse the linked list in order.
NEXT, KEY = 1, 2
root = self._OrderedDict__root
curr = root[NEXT]
print('curr: {}'.format(curr))
print('root: {}'.format(root))
print('curr is not root: {}'.format(curr is not root))
while curr is not root:
yield curr[KEY]
curr = curr[NEXT]
a = Foo()
print a['bar']
# barbar
print a.keys()
# ['bar']
Notice that you can avoid this problem by making Foo a subclass of collections.MutableMapping and delegating most of its behavior to a OrderedDict attribute:
import collections
dictclass = collections.OrderedDict
class Foo(collections.MutableMapping):
def __init__(self, *args, **kwargs):
self._data = dictclass(*args, **kwargs)
def __setitem__(self, key, value):
self._data[key] = value
def __delitem__(self, key):
del self._data[key]
def __iter__(self):
return iter(self._data)
def __len__(self):
return len(self._data)
def __getitem__(self,key):
try:
return self._data[key]
except KeyError:
pass
data = key*2
self[key] = data
return data
def __contains__(self,whatever):
return dictclass.__contains__(self,whatever) or 'bar' in whatever
which yields
a = Foo()
print a['bar']
# barbar
print a.keys()
# ['bar']
even with __contains__ defined.
What breaks your code is the or 'bar' in whatever. If you remove it, it will work as with the change dictclass = dict you mention.
The __setitem__ implementation of OrderedDict is this:
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link at the end of the linked list,
# and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
return dict_setitem(self, key, value)
So with self["bar"] = "barbar", the condition should be False, but it is True even before inserting any item. Thus, the key isn' added to self.__root which is used in OrderedDict.__iter__:
def __iter__(self):
'od.__iter__() <==> iter(od)'
# Traverse the linked list in order.
root = self.__root
curr = root[1] # start at the first node
while curr is not root:
yield curr[2] # yield the curr[KEY]
curr = curr[1] # move to next node
Since the code for retrieving the values uses this iterator and self.__root does not contain "bar", this concrete key cannot be returned in the values.