Python DFS nested dictionary - python

I've written a function which should be able to search a nested dictionary, using DFS, to find a specific value. The recursive element seems to be working fine, however, when the base case should return True, it simply doesn't.
obj = {'a': [{'c':'d'}, {'e':'f'}],
'b': [{'g':'h'}, {'i':'j'}]}
def obj_dfs(obj, target):
if type(obj) == dict:
for key, val in obj.items():
obj_dfs(key, target)
obj_dfs(val, target)
elif type(obj) in (list, tuple):
for elem in obj:
obj_dfs(elem, target)
else:
if obj == target:
print(f"{obj} == {target}")
return True
else:
print(f"{obj} != {target}")
return False
obj_dfs(obj, 'j')
And the results. As you can see, the standard output "i==i" shows that this element was evaluated correctly but the return True statement isn't functioning as intended. I've verified that if I call obj_dfs(obj, 'j'), that experiences the same error.
a != j
c != j
d != j
e != j
f != j
b != j
g != j
h != j
i != j
j == j
False
Why is this? And how can I fix this?

As the comments point out, you need to return the results of the recursive calls. Since you are just looking for a True/False match, you can pass the recursive calls into any() which will exit early with True if there is a match. The base case can simple be whether obj == target.
obj = {'a': [{'c':'d'}, {'e':'f'}],
'b': [{'g':'h'}, {'i':'j'}]}
def obj_dfs(obj, target):
if obj == target:
return True
if isinstance(obj, dict):
return any(obj_dfs(v, target) for v in obj.items())
elif isinstance(obj, (list, tuple)):
return any(obj_dfs(l, target) for l in obj)
return False
obj_dfs(obj, 'i'), obj_dfs(obj, 'j'), obj_dfs(obj, 'd'), obj_dfs(obj, 'x')
# (True, True, True, False)
This allows for a three simple blocks. Notice we are checking for a tuple as well as a list in the last isinstance. This allows you to simply pass in the dict item()s rather than looping over keys and values independently.
Adding a print(obj) as the first line of the function will show the order in which you are traversing the data. For example obj_dfs(obj, 'j') will print:
{'a': [{'c': 'd'}, {'e': 'f'}], 'b': [{'g': 'h'}, {'i': 'j'}]}
('a', [{'c': 'd'}, {'e': 'f'}])
a
[{'c': 'd'}, {'e': 'f'}]
{'c': 'd'}
('c', 'd')
c
d
{'e': 'f'}
('e', 'f')
e
f
('b', [{'g': 'h'}, {'i': 'j'}])
b
[{'g': 'h'}, {'i': 'j'}]
{'g': 'h'}
('g', 'h')
g
h
{'i': 'j'}
('i', 'j')
i
j

I made some edits to your code
obj = {'a': [{'c':'d'}, {'e':'f'}],
'b': [{'g':'h'}, {'i':'j'}]}
def obj_dfs(obj, target):
if type(obj) == dict:
for key, val in obj.items():
if(key==target):
return val
else:
result=obj_dfs(val, target)
if result!=None: return result
elif type(obj) in (list, tuple):
for elem in obj:
result=obj_dfs(elem, target)
if result!=None: return result
else:
if obj==target: return True
print(obj_dfs(obj, 'i'))
I don't know why you would just return true instead of the value though, so I put it that if its a dictionary key it would return the value, instead it would return true, to show that it is found

Expanding on my comment, try this, where we pass return values up the chain and always return True if a child returned True:
obj = {'a': [{'c':'d'}, {'e':'f'}],
'b': [{'g':'h'}, {'i':'j'}]}
obj2 = {'a': [{'c':'d'}, {'e':'f'}],
'b': [{'g':'h'}, {'g':'j'}]}
def obj_dfs(obj, target):
if type(obj) == dict:
for key, val in obj.items():
keyret = obj_dfs(key, target)
valueret = obj_dfs(val, target)
if keyret is True or valueret is True:
return True
else:
return False
elif type(obj) in (list, tuple):
rets = []
for elem in obj:
rets.append(obj_dfs(elem, target))
if True in rets:
return True
else:
return False
else:
if obj == target:
print(f"{obj} == {target}")
return True
else:
print(f"{obj} != {target}")
return False
print(obj_dfs(obj, 'i'))
print(obj_dfs(obj2, 'i'))

Recursion is a functional heritage and so using it with functional style yields the best results. This means decoupling concerns and pushing side effects to the fringe of your program. obj_dfs performs a depth-first traversal and mixes in search logic. And for purposes of debugging, includes a print side effect.
Decomposition results in functions that are easier to write, test, and reuse in various parts of our program. We'll start with a generic dfs for traversal -
def dfs(t, path = []):
if isinstance(t, dict):
for key, val in t.items():
yield from dfs(val, [*path, key])
elif isinstance(t, (list, tuple)):
for key, val in enumerate(t):
yield from dfs(val, [*path, key])
else:
yield path, t
obj = {'a': [{'c':'d'}, {'e':'f'}],
'b': [{'g':'h'}, {'i':'j'}]}
for path, val in dfs(obj):
print(path, val) # side effect decided by caller
['a', 0, 'c'] d
['a', 1, 'e'] f
['b', 0, 'g'] h
['b', 1, 'i'] j
The suggested solution and other answers here collapse the semantic difference between key and value, providing no differentiation on the particular match of target. Writing dfs as we did above, we can know which part of obj matched.
keys
values
['a', 0, 'c']
d
['a', 1, 'e']
f
['b', 0, 'g']
h
['b', 1, 'i']
j
has_value and has_key are easily defined in terms of dfs -
def has_value(t, target):
for path, val in dfs(t):
if val == target:
return True
return False
def has_key(t, target):
for path, val in dfs(t):
if target in path:
return True
return False
print(has_value(obj, "j")) # True
print(has_key(obj, "j")) # False
print(has_value(obj, "i")) # False
print(has_key(obj, "i")) # True

Related

A more elegant way to return a list of node values from depth-first recursive function in Python 3?

I'm working out how to return a list of values from a depth-first traversal of a binary tree. Specifically, I'm trying to do so with a recursive function.
After playing around a bit I got this code to properly return the list of expected values, but using a try/finally block in order to pull it off seems clunky to me.
def depth_first_recursive(root, node_list: list):
try:
if not root:
return
else:
node_list.append(root.val)
depth_first_recursive(root.left, node_list)
depth_first_recursive(root.right, node_list)
finally:
return node_list
class Node:
def __init__(self, val):
self.val = val
self.left = None
self.right = None
values = []
if __name__ == '__main__':
a = Node('a')
b = Node('b')
c = Node('c')
d = Node('d')
e = Node('e')
f = Node('f')
a.left = b
a.right = c
b.left = d
b.right = e
c.right = f
nodelist = []
print(depth_first_recursive(a, node_list=nodelist))
returns ['a', 'b', 'd', 'e', 'c', 'f', ]
Is there a better way to pull this off in Python?
Simplified yours, much more efficient than all the copying of the others. Yours takes O(n) time, theirs is O(n²).
def depth_first_recursive(root, node_list: list):
if root:
node_list.append(root.val)
depth_first_recursive(root.left, node_list)
depth_first_recursive(root.right, node_list)
return node_list
I have applied the same logic as yours but tried to make code compact:
def depth_first_recursive(root, node_list: list):
if root is None: return []
return [root.val]+depth_first_recursive(root.left, node_list) + depth_first_recursive(root.right, node_list)
Output:
['a', 'b', 'd', 'e', 'c', 'f']
Don't need to maintain a node_list, just have more faith in the structure of your recursion.
def depth_first_recursive(root):
if root is None:
return []
# Terminal case when reaching bottom of tree
elif not root.left and not root.right:
return [root.val]
# Get current value and continue to append list
else:
return [root.val] + depth_first_recursive(root.left) + depth_first_recursive(root.right)

Recursively locate nested dictionary containing a target key and value

There are many questions about this problem, but in my case they are not working. I'm trying to find a nested dictionary given a target key and value pair. My recursive function returned none (after fix, max depth recursive error).
def recursive_lookup(k, sv, d):
if k in d: return d[k]
for v in d.values():
if isinstance(v, dict):
a = recursive_lookup(k, sv, v)
if a == sv:
if a is not None:
return d
return None
def run():
maly = {'_id': "ObjectId('5def7e8c4802b906dd067f97')", 'METADATA': {'Tags': {'AcquisitionTime': '2019-02-05T15:59:37.5862118Z', 'ImageScaling': {'ImageScaling': {'ImagePixelSize': '4.54,4.54'}}, 'DetectorState': {'CameraState': {'ApplyCameraProfile': 'false', 'ApplyImageOrientation': 'true', 'ExposureTime': '2200000', 'Frame': '0,0,2752,2208', 'ImageOrientation': '3'}}, 'StageXPosition': '+000000141526.5820', 'StageYPosition': '+000000189329.5000', 'FocusPosition': '+000000002097.2550', 'RoiCenterOffsetX': '+000000000000.0000', 'RoiCenterOffsetY': '+000000000000.0000'}, 'DataSchema': None, 'AttachmentSchema': None}}
returned_value = recursive_lookup("FocusPosition", "+000000002097.2550", maly)
print(returned_value)
run()
If I change return d to recursive_lookup(k, sv, d) it is also not working.
It should return the maly dictionary, but it returned None.
How can I fix that problem?
This is the right idea, but a matched result isn't being passed up the call stack correctly. You can also simplify logic by checking key and value on the same call frame--this should also eliminate a bug where the target key-value are on the top level of the dict (there's no previous frame to fall back on to check the value).
def recursive_lookup(target_key, target_val, dictionary):
if target_key in dictionary and dictionary[target_key] == target_val:
return dictionary
for value in dictionary.values():
if isinstance(value, dict):
if result := recursive_lookup(target_key, target_val, value):
return result
if __name__ == "__main__":
maly = {'_id': "ObjectId('5def7e8c4802b906dd067f97')", 'METADATA': {'Tags': {'AcquisitionTime': '2019-02-05T15:59:37.5862118Z', 'ImageScaling': {'ImageScaling': {'ImagePixelSize': '4.54,4.54'}}, 'DetectorState': {'CameraState': {'ApplyCameraProfile': 'false', 'ApplyImageOrientation': 'true', 'ExposureTime': '2200000', 'Frame': '0,0,2752,2208', 'ImageOrientation': '3'}}, 'StageXPosition': '+000000141526.5820', 'StageYPosition': '+000000189329.5000', 'FocusPosition': '+000000002097.2550', 'RoiCenterOffsetX': '+000000000000.0000', 'RoiCenterOffsetY': '+000000000000.0000'}, 'DataSchema': None, 'AttachmentSchema': None}}
print(recursive_lookup("FocusPosition", "+000000002097.2550", maly))
Here's a more-easily verifiable version that uses a simple dictionary and doesn't use the 3.8 assignment expression:
def recursive_lookup(target_key, target_val, dictionary):
if target_key in dictionary and dictionary[target_key] == target_val:
return dictionary
for value in dictionary.values():
if isinstance(value, dict):
result = recursive_lookup(target_key, target_val, value)
if result: return result
if __name__ == "__main__":
dictionary = {
"a": "foo",
"b": {
"c": "bar",
"d": "baz",
"e": {
"f": "quux",
"g": "garply"
}
}
}
print(recursive_lookup("c", "bar", dictionary)) # => {'c': 'bar', 'd': 'baz', 'e': {'f': 'quux', 'g': 'garply'}}
print(recursive_lookup("g", "garply", dictionary)) # => {'f': 'quux', 'g': 'garply'}
This sample code performs a recursive search in a hierarchy of dictionaries. So I guess it may correspond to what you are looking for:
def rec_search(key, dic):
if key in dic:
return dic[key]
for d in dic.values():
if isinstance(d, dict):
val = rec_search(key, d)
if val is not None: return val
return None
maly = {1:'a',
2:'b',
3:{4:'d',
5:'e',
6:{7:'g',
8:'h'}
},
9:{10:'i',
11:'j',
12:{13:'l',
14:'m'}
}
}
print(rec_search(2,maly)) # --> 'b'
print(rec_search(7,maly)) # --> 'g'
print(rec_search(10,maly)) # --> 'i'
print(rec_search(15,maly)) # --> None
EDIT: Corrected code after Sylwester's comment
i think the problem is when you call recursive_search() for the second time
it just keeps looking for sv in the same dictionary which is maly and doesn't search deeper inside the rest that's why it returns None

Using tuple as key for nested properties with json.dump

Not sure how to use a tuple as a set of strings the way I would like.
I would like my json to look like:
'item': {
'a': {
'b': {
'c': 'somevalue'
}
}
}
Which could be done with:
item = {}
item['a']['b']['c'] = "somevalue"
However a, b, and c are dynamic, so I understand I need to use a tuple, but this does not do what I would like:
item = {}
path = ('a','b','c')
item[path] = "somevalue"
json.dump(item, sys.stdout)
So I am getting the error:
TypeError("key " + repr(key) + " is not a string"
How do I dynamically get item['a']['b']['c']?
AFAIK there are no builtins for this task, so you need to write a couple of recursive functions:
def xset(dct, path, val):
if len(path) == 1:
dct[path[0]] = val
else:
if path[0] not in dct: dct[path[0]] = {}
xset(dct[path[0]], path[1:], val)
def xget(dct, path):
if len(path) == 1:
return dct[path[0]]
else:
return xget(dct[path[0]], path[1:])
Usage:
>>> d = {}
>>> xset(d, ('a', 'b', 'c'), 6)
>>> d
{'a': {'b': {'c': 6}}}
>>> xset(d, ('a', 'b', 'd', 'e'), 12)
>>> d
{'a': {'b': {'c': 6, 'd': {'e': 12}}}}
>>> xget(d, ('a', 'b', 'c'))
6
Try this:
item = {}
for i in reversed(path):
tmp = {**item}
item ={}
item[i] = {**tmp} if path.index(i)!=len(path)-1 else 'somevalue'

Comparing two dictionaries and checking how many (key, value) pairs are equal

I have two dictionaries, but for simplification, I will take these two:
>>> x = dict(a=1, b=2)
>>> y = dict(a=2, b=2)
Now, I want to compare whether each key, value pair in x has the same corresponding value in y. So I wrote this:
>>> for x_values, y_values in zip(x.iteritems(), y.iteritems()):
if x_values == y_values:
print 'Ok', x_values, y_values
else:
print 'Not', x_values, y_values
And it works since a tuple is returned and then compared for equality.
My questions:
Is this correct? Is there a better way to do this? Better not in speed, I am talking about code elegance.
UPDATE: I forgot to mention that I have to check how many key, value pairs are equal.
If you want to know how many values match in both the dictionaries, you should have said that :)
Maybe something like this:
shared_items = {k: x[k] for k in x if k in y and x[k] == y[k]}
print(len(shared_items))
def dict_compare(d1, d2):
d1_keys = set(d1.keys())
d2_keys = set(d2.keys())
shared_keys = d1_keys.intersection(d2_keys)
added = d1_keys - d2_keys
removed = d2_keys - d1_keys
modified = {o : (d1[o], d2[o]) for o in shared_keys if d1[o] != d2[o]}
same = set(o for o in shared_keys if d1[o] == d2[o])
return added, removed, modified, same
x = dict(a=1, b=2)
y = dict(a=2, b=2)
added, removed, modified, same = dict_compare(x, y)
What you want to do is simply x==y
What you do is not a good idea, because the items in a dictionary are not supposed to have any order. You might be comparing [('a',1),('b',1)] with [('b',1), ('a',1)] (same dictionaries, different order).
For example, see this:
>>> x = dict(a=2, b=2,c=3, d=4)
>>> x
{'a': 2, 'c': 3, 'b': 2, 'd': 4}
>>> y = dict(b=2,c=3, d=4)
>>> y
{'c': 3, 'b': 2, 'd': 4}
>>> zip(x.iteritems(), y.iteritems())
[(('a', 2), ('c', 3)), (('c', 3), ('b', 2)), (('b', 2), ('d', 4))]
The difference is only one item, but your algorithm will see that all items are different
dic1 == dic2
From python docs:
The following examples all return a dictionary equal to
{"one": 1, "two": 2, "three": 3}:
>>> a = dict(one=1, two=2, three=3)
>>> b = {'one': 1, 'two': 2, 'three': 3}
>>> c = dict(zip(['one', 'two', 'three'], [1, 2, 3]))
>>> d = dict([('two', 2), ('one', 1), ('three', 3)])
>>> e = dict({'three': 3, 'one': 1, 'two': 2})
>>> a == b == c == d == e
True
Providing keyword arguments as in the first example only works for
keys that are valid Python identifiers. Otherwise, any valid keys can
be used.
Valid for python2 and python3.
Since it seems nobody mentioned deepdiff, I will add it here for completeness. I find it very convenient for getting diff of (nested) objects in general:
Installation
pip install deepdiff
Sample code
import deepdiff
import json
dict_1 = {
"a": 1,
"nested": {
"b": 1,
}
}
dict_2 = {
"a": 2,
"nested": {
"b": 2,
}
}
diff = deepdiff.DeepDiff(dict_1, dict_2)
print(json.dumps(diff, indent=4))
Output
{
"values_changed": {
"root['a']": {
"new_value": 2,
"old_value": 1
},
"root['nested']['b']": {
"new_value": 2,
"old_value": 1
}
}
}
Note about pretty-printing the result for inspection: The above code works if both dicts have the same attribute keys (with possibly different attribute values as in the example). However, if an "extra" attribute is present is one of the dicts, json.dumps() fails with
TypeError: Object of type PrettyOrderedSet is not JSON serializable
Solution: use diff.to_json() and json.loads() / json.dumps() to pretty-print:
import deepdiff
import json
dict_1 = {
"a": 1,
"nested": {
"b": 1,
},
"extra": 3
}
dict_2 = {
"a": 2,
"nested": {
"b": 2,
}
}
diff = deepdiff.DeepDiff(dict_1, dict_2)
print(json.dumps(json.loads(diff.to_json()), indent=4))
Output:
{
"dictionary_item_removed": [
"root['extra']"
],
"values_changed": {
"root['a']": {
"new_value": 2,
"old_value": 1
},
"root['nested']['b']": {
"new_value": 2,
"old_value": 1
}
}
}
Alternative: use pprint, results in a different formatting:
import pprint
# same code as above
pprint.pprint(diff, indent=4)
Output:
{ 'dictionary_item_removed': [root['extra']],
'values_changed': { "root['a']": { 'new_value': 2,
'old_value': 1},
"root['nested']['b']": { 'new_value': 2,
'old_value': 1}}}
I'm new to python but I ended up doing something similar to #mouad
unmatched_item = set(dict_1.items()) ^ set(dict_2.items())
len(unmatched_item) # should be 0
The XOR operator (^) should eliminate all elements of the dict when they are the same in both dicts.
Just use:
assert cmp(dict1, dict2) == 0
#mouad 's answer is nice if you assume that both dictionaries contain simple values only. However, if you have dictionaries that contain dictionaries you'll get an exception as dictionaries are not hashable.
Off the top of my head, something like this might work:
def compare_dictionaries(dict1, dict2):
if dict1 is None or dict2 is None:
print('Nones')
return False
if (not isinstance(dict1, dict)) or (not isinstance(dict2, dict)):
print('Not dict')
return False
shared_keys = set(dict1.keys()) & set(dict2.keys())
if not ( len(shared_keys) == len(dict1.keys()) and len(shared_keys) == len(dict2.keys())):
print('Not all keys are shared')
return False
dicts_are_equal = True
for key in dict1.keys():
if isinstance(dict1[key], dict) or isinstance(dict2[key], dict):
dicts_are_equal = dicts_are_equal and compare_dictionaries(dict1[key], dict2[key])
else:
dicts_are_equal = dicts_are_equal and all(atleast_1d(dict1[key] == dict2[key]))
return dicts_are_equal
Yet another possibility, up to the last note of the OP, is to compare the hashes (SHA or MD) of the dicts dumped as JSON. The way hashes are constructed guarantee that if they are equal, the source strings are equal as well. This is very fast and mathematically sound.
import json
import hashlib
def hash_dict(d):
return hashlib.sha1(json.dumps(d, sort_keys=True)).hexdigest()
x = dict(a=1, b=2)
y = dict(a=2, b=2)
z = dict(a=1, b=2)
print(hash_dict(x) == hash_dict(y))
print(hash_dict(x) == hash_dict(z))
The function is fine IMO, clear and intuitive. But just to give you (another) answer, here is my go:
def compare_dict(dict1, dict2):
for x1 in dict1.keys():
z = dict1.get(x1) == dict2.get(x1)
if not z:
print('key', x1)
print('value A', dict1.get(x1), '\nvalue B', dict2.get(x1))
print('-----\n')
Can be useful for you or for anyone else..
EDIT:
I have created a recursive version of the one above.. Have not seen that in the other answers
def compare_dict(a, b):
# Compared two dictionaries..
# Posts things that are not equal..
res_compare = []
for k in set(list(a.keys()) + list(b.keys())):
if isinstance(a[k], dict):
z0 = compare_dict(a[k], b[k])
else:
z0 = a[k] == b[k]
z0_bool = np.all(z0)
res_compare.append(z0_bool)
if not z0_bool:
print(k, a[k], b[k])
return np.all(res_compare)
The easiest way (and one of the more robust at that) to do a deep comparison of two dictionaries is to serialize them in JSON format, sorting the keys, and compare the string results:
import json
if json.dumps(x, sort_keys=True) == json.dumps(y, sort_keys=True):
... Do something ...
To test if two dicts are equal in keys and values:
def dicts_equal(d1,d2):
""" return True if all keys and values are the same """
return all(k in d2 and d1[k] == d2[k]
for k in d1) \
and all(k in d1 and d1[k] == d2[k]
for k in d2)
If you want to return the values which differ, write it differently:
def dict1_minus_d2(d1, d2):
""" return the subset of d1 where the keys don't exist in d2 or
the values in d2 are different, as a dict """
return {k,v for k,v in d1.items() if k in d2 and v == d2[k]}
You would have to call it twice i.e
dict1_minus_d2(d1,d2).extend(dict1_minus_d2(d2,d1))
Code
def equal(a, b):
type_a = type(a)
type_b = type(b)
if type_a != type_b:
return False
if isinstance(a, dict):
if len(a) != len(b):
return False
for key in a:
if key not in b:
return False
if not equal(a[key], b[key]):
return False
return True
elif isinstance(a, list):
if len(a) != len(b):
return False
while len(a):
x = a.pop()
index = indexof(x, b)
if index == -1:
return False
del b[index]
return True
else:
return a == b
def indexof(x, a):
for i in range(len(a)):
if equal(x, a[i]):
return i
return -1
Test
>>> a = {
'number': 1,
'list': ['one', 'two']
}
>>> b = {
'list': ['two', 'one'],
'number': 1
}
>>> equal(a, b)
True
A simple compare with == should be enough nowadays (python 3.8). Even when you compare the same dicts in a different order (last example). The best thing is, you don't need a third-party package to accomplish this.
a = {'one': 'dog', 'two': 'cat', 'three': 'mouse'}
b = {'one': 'dog', 'two': 'cat', 'three': 'mouse'}
c = {'one': 'dog', 'two': 'cat', 'three': 'mouse'}
d = {'one': 'dog', 'two': 'cat', 'three': 'mouse', 'four': 'fish'}
e = {'one': 'cat', 'two': 'dog', 'three': 'mouse'}
f = {'one': 'dog', 'two': 'cat', 'three': 'mouse'}
g = {'two': 'cat', 'one': 'dog', 'three': 'mouse'}
h = {'one': 'dog', 'two': 'cat', 'three': 'mouse'}
print(a == b) # True
print(c == d) # False
print(e == f) # False
print(g == h) # True
I am using this solution that works perfectly for me in Python 3
import logging
log = logging.getLogger(__name__)
...
def deep_compare(self,left, right, level=0):
if type(left) != type(right):
log.info("Exit 1 - Different types")
return False
elif type(left) is dict:
# Dict comparison
for key in left:
if key not in right:
log.info("Exit 2 - missing {} in right".format(key))
return False
else:
if not deep_compare(left[str(key)], right[str(key)], level +1 ):
log.info("Exit 3 - different children")
return False
return True
elif type(left) is list:
# List comparison
for key in left:
if key not in right:
log.info("Exit 4 - missing {} in right".format(key))
return False
else:
if not deep_compare(left[left.index(key)], right[right.index(key)], level +1 ):
log.info("Exit 5 - different children")
return False
return True
else:
# Other comparison
return left == right
return False
It compares dict, list and any other types that implements the "==" operator by themselves.
If you need to compare something else different, you need to add a new branch in the "if tree".
Hope that helps.
for python3:
data_set_a = dict_a.items()
data_set_b = dict_b.items()
difference_set = data_set_a ^ data_set_b
In PyUnit there's a method which compares dictionaries beautifully. I tested it using the following two dictionaries, and it does exactly what you're looking for.
d1 = {1: "value1",
2: [{"subKey1":"subValue1",
"subKey2":"subValue2"}]}
d2 = {1: "value1",
2: [{"subKey2":"subValue2",
"subKey1": "subValue1"}]
}
def assertDictEqual(self, d1, d2, msg=None):
self.assertIsInstance(d1, dict, 'First argument is not a dictionary')
self.assertIsInstance(d2, dict, 'Second argument is not a dictionary')
if d1 != d2:
standardMsg = '%s != %s' % (safe_repr(d1, True), safe_repr(d2, True))
diff = ('\n' + '\n'.join(difflib.ndiff(
pprint.pformat(d1).splitlines(),
pprint.pformat(d2).splitlines())))
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
I'm not recommending importing unittest into your production code. My thought is the source in PyUnit could be re-tooled to run in production. It uses pprint which "pretty prints" the dictionaries. Seems pretty easy to adapt this code to be "production ready".
Being late in my response is better than never!
Compare Not_Equal is more efficient than comparing Equal. As such two dicts are not equal if any key values in one dict is not found in the other dict. The code below takes into consideration that you maybe comparing default dict and thus uses get instead of getitem [].
Using a kind of random value as default in the get call equal to the key being retrieved - just in case the dicts has a None as value in one dict and that key does not exist in the other. Also the get != condition is checked before the not in condition for efficiency because you are doing the check on the keys and values from both sides at the same time.
def Dicts_Not_Equal(first,second):
""" return True if both do not have same length or if any keys and values are not the same """
if len(first) == len(second):
for k in first:
if first.get(k) != second.get(k,k) or k not in second: return (True)
for k in second:
if first.get(k,k) != second.get(k) or k not in first: return (True)
return (False)
return (True)
Why not just iterate through one dictionary and check the other in the process (assuming both dictionaries have the same keys)?
x = dict(a=1, b=2)
y = dict(a=2, b=2)
for key, val in x.items():
if val == y[key]:
print ('Ok', val, y[key])
else:
print ('Not', val, y[key])
Output:
Not 1 2
Ok 2 2
see dictionary view objects:
https://docs.python.org/2/library/stdtypes.html#dict
This way you can subtract dictView2 from dictView1 and it will return a set of key/value pairs that are different in dictView2:
original = {'one':1,'two':2,'ACTION':'ADD'}
originalView=original.viewitems()
updatedDict = {'one':1,'two':2,'ACTION':'REPLACE'}
updatedDictView=updatedDict.viewitems()
delta=original | updatedDict
print delta
>>set([('ACTION', 'REPLACE')])
You can intersect, union, difference (shown above), symmetric difference these dictionary view objects.
Better? Faster? - not sure, but part of the standard library - which makes it a big plus for portability
Below code will help you to compare list of dict in python
def compate_generic_types(object1, object2):
if isinstance(object1, str) and isinstance(object2, str):
return object1 == object2
elif isinstance(object1, unicode) and isinstance(object2, unicode):
return object1 == object2
elif isinstance(object1, bool) and isinstance(object2, bool):
return object1 == object2
elif isinstance(object1, int) and isinstance(object2, int):
return object1 == object2
elif isinstance(object1, float) and isinstance(object2, float):
return object1 == object2
elif isinstance(object1, float) and isinstance(object2, int):
return object1 == float(object2)
elif isinstance(object1, int) and isinstance(object2, float):
return float(object1) == object2
return True
def deep_list_compare(object1, object2):
retval = True
count = len(object1)
object1 = sorted(object1)
object2 = sorted(object2)
for x in range(count):
if isinstance(object1[x], dict) and isinstance(object2[x], dict):
retval = deep_dict_compare(object1[x], object2[x])
if retval is False:
print "Unable to match [{0}] element in list".format(x)
return False
elif isinstance(object1[x], list) and isinstance(object2[x], list):
retval = deep_list_compare(object1[x], object2[x])
if retval is False:
print "Unable to match [{0}] element in list".format(x)
return False
else:
retval = compate_generic_types(object1[x], object2[x])
if retval is False:
print "Unable to match [{0}] element in list".format(x)
return False
return retval
def deep_dict_compare(object1, object2):
retval = True
if len(object1) != len(object2):
return False
for k in object1.iterkeys():
obj1 = object1[k]
obj2 = object2[k]
if isinstance(obj1, list) and isinstance(obj2, list):
retval = deep_list_compare(obj1, obj2)
if retval is False:
print "Unable to match [{0}]".format(k)
return False
elif isinstance(obj1, dict) and isinstance(obj2, dict):
retval = deep_dict_compare(obj1, obj2)
if retval is False:
print "Unable to match [{0}]".format(k)
return False
else:
retval = compate_generic_types(obj1, obj2)
if retval is False:
print "Unable to match [{0}]".format(k)
return False
return retval
Here is my answer, use a recursize way:
def dict_equals(da, db):
if not isinstance(da, dict) or not isinstance(db, dict):
return False
if len(da) != len(db):
return False
for da_key in da:
if da_key not in db:
return False
if not isinstance(db[da_key], type(da[da_key])):
return False
if isinstance(da[da_key], dict):
res = dict_equals(da[da_key], db[da_key])
if res is False:
return False
elif da[da_key] != db[da_key]:
return False
return True
a = {1:{2:3, 'name': 'cc', "dd": {3:4, 21:"nm"}}}
b = {1:{2:3, 'name': 'cc', "dd": {3:4, 21:"nm"}}}
print dict_equals(a, b)
Hope that helps!
>>> x = {'a':1,'b':2,'c':3}
>>> x
{'a': 1, 'b': 2, 'c': 3}
>>> y = {'a':2,'b':4,'c':3}
>>> y
{'a': 2, 'b': 4, 'c': 3}
METHOD 1:
>>> common_item = x.items()&y.items() #using union,x.item()
>>> common_item
{('c', 3)}
METHOD 2:
>>> for i in x.items():
if i in y.items():
print('true')
else:
print('false')
false
false
true
>>> hash_1
{'a': 'foo', 'b': 'bar'}
>>> hash_2
{'a': 'foo', 'b': 'bar'}
>>> set_1 = set (hash_1.iteritems())
>>> set_1
set([('a', 'foo'), ('b', 'bar')])
>>> set_2 = set (hash_2.iteritems())
>>> set_2
set([('a', 'foo'), ('b', 'bar')])
>>> len (set_1.difference(set_2))
0
>>> if (len(set_1.difference(set_2)) | len(set_2.difference(set_1))) == False:
... print "The two hashes match."
...
The two hashes match.
>>> hash_2['c'] = 'baz'
>>> hash_2
{'a': 'foo', 'c': 'baz', 'b': 'bar'}
>>> if (len(set_1.difference(set_2)) | len(set_2.difference(set_1))) == False:
... print "The two hashes match."
...
>>>
>>> hash_2.pop('c')
'baz'
Here's another option:
>>> id(hash_1)
140640738806240
>>> id(hash_2)
140640738994848
So as you see the two id's are different. But the rich comparison operators seem to do the trick:
>>> hash_1 == hash_2
True
>>>
>>> hash_2
{'a': 'foo', 'b': 'bar'}
>>> set_2 = set (hash_2.iteritems())
>>> if (len(set_1.difference(set_2)) | len(set_2.difference(set_1))) == False:
... print "The two hashes match."
...
The two hashes match.
>>>
In Python 3.6, It can be done as:-
if (len(dict_1)==len(dict_2):
for i in dict_1.items():
ret=bool(i in dict_2.items())
ret variable will be true if all the items of dict_1 in present in dict_2
You can find that out by writing your own function in the following way.
class Solution:
def find_if_dict_equal(self,dict1,dict2):
dict1_keys=list(dict1.keys())
dict2_keys=list(dict2.keys())
if len(dict1_keys)!=len(dict2_keys):
return False
for i in dict1_keys:
if i not in dict2 or dict2[i]!=dict1[i]:
return False
return True
def findAnagrams(self, s, p):
if len(s)<len(p):
return []
p_dict={}
for i in p:
if i not in p_dict:
p_dict[i]=0
p_dict[i]+=1
s_dict={}
final_list=[]
for i in s[:len(p)]:
if i not in s_dict:
s_dict[i]=0
s_dict[i]+=1
if self.find_if_dict_equal(s_dict,p_dict):
final_list.append(0)
for i in range(len(p),len(s)):
element_to_add=s[i]
element_to_remove=s[i-len(p)]
if element_to_add not in s_dict:
s_dict[element_to_add]=0
s_dict[element_to_add]+=1
s_dict[element_to_remove]-=1
if s_dict[element_to_remove]==0:
del s_dict[element_to_remove]
if self.find_if_dict_equal(s_dict,p_dict):
final_list.append(i-len(p)+1)
return final_list
I have a default/template dictionry that I want to update its values from a second given dictionary. Thus the update will happen on keys that exist in the default dictionary and if the related value is compatible with the default key/value type.
Somehow this is similar to the question above.
I wrote this solution:
CODE
def compDict(gDict, dDict):
gDictKeys = list(gDict.keys())
for gDictKey in gDictKeys:
try:
dDict[gDictKey]
except KeyError:
# Do the operation you wanted to do for "key not present in dict".
print(f'\nkey \'{gDictKey}\' does not exist! Dictionary key/value no set !!!\n')
else:
# check on type
if type(gDict[gDictKey]) == type(dDict[gDictKey]):
if type(dDict[gDictKey])==dict:
compDict(gDict[gDictKey],dDict[gDictKey])
else:
dDict[gDictKey] = gDict[gDictKey]
print('\n',dDict, 'update successful !!!\n')
else:
print(f'\nValue \'{gDict[gDictKey]}\' for \'{gDictKey}\' not a compatible data type !!!\n')
# default dictionary
dDict = {'A':str(),
'B':{'Ba':int(),'Bb':float()},
'C':list(),
}
# given dictionary
gDict = {'A':1234, 'a':'addio', 'C':['HELLO'], 'B':{'Ba':3,'Bb':'wrong'}}
compDict(gDict, dDict)
print('Updated default dictionry: ',dDict)
OUTPUT
Value '1234' for 'A' not a compatible data type !!!
key 'a' does not exist! Dictionary key/value no set !!!
{'A': '', 'B': {'Ba': 0, 'Bb': 0.0}, 'C': ['HELLO']} update successful !!!
{'Ba': 3, 'Bb': 0.0} update successful !!!
Value 'wrong' for 'Bb' not a compatible data type !!!
Updated default dictionry: {'A': '', 'B': {'Ba': 3, 'Bb': 0.0}, 'C': ['HELLO']}
import json
if json.dumps(dict1) == json.dumps(dict2):
print("Equal")

How to convert a nested Python dict to object?

I'm searching for an elegant way to get data using attribute access on a dict with some nested dicts and lists (i.e. javascript-style object syntax).
For example:
>>> d = {'a': 1, 'b': {'c': 2}, 'd': ["hi", {'foo': "bar"}]}
Should be accessible in this way:
>>> x = dict2obj(d)
>>> x.a
1
>>> x.b.c
2
>>> x.d[1].foo
bar
I think, this is not possible without recursion, but what would be a nice way to get an object style for dicts?
Update: In Python 2.6 and onwards, consider whether the namedtuple data structure suits your needs:
>>> from collections import namedtuple
>>> MyStruct = namedtuple('MyStruct', 'a b d')
>>> s = MyStruct(a=1, b={'c': 2}, d=['hi'])
>>> s
MyStruct(a=1, b={'c': 2}, d=['hi'])
>>> s.a
1
>>> s.b
{'c': 2}
>>> s.c
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: 'MyStruct' object has no attribute 'c'
>>> s.d
['hi']
The alternative (original answer contents) is:
class Struct:
def __init__(self, **entries):
self.__dict__.update(entries)
Then, you can use:
>>> args = {'a': 1, 'b': 2}
>>> s = Struct(**args)
>>> s
<__main__.Struct instance at 0x01D6A738>
>>> s.a
1
>>> s.b
2
Surprisingly no one has mentioned Bunch. This library is exclusively meant to provide attribute style access to dict objects and does exactly what the OP wants. A demonstration:
>>> from bunch import bunchify
>>> d = {'a': 1, 'b': {'c': 2}, 'd': ["hi", {'foo': "bar"}]}
>>> x = bunchify(d)
>>> x.a
1
>>> x.b.c
2
>>> x.d[1].foo
'bar'
A Python 3 library is available at https://github.com/Infinidat/munch - Credit goes to codyzu
>>> from munch import DefaultMunch
>>> d = {'a': 1, 'b': {'c': 2}, 'd': ["hi", {'foo': "bar"}]}
>>> obj = DefaultMunch.fromDict(d)
>>> obj.b.c
2
>>> obj.a
1
>>> obj.d[1].foo
'bar'
class obj(object):
def __init__(self, d):
for k, v in d.items():
if isinstance(k, (list, tuple)):
setattr(self, k, [obj(x) if isinstance(x, dict) else x for x in v])
else:
setattr(self, k, obj(v) if isinstance(v, dict) else v)
>>> d = {'a': 1, 'b': {'c': 2}, 'd': ["hi", {'foo': "bar"}]}
>>> x = obj(d)
>>> x.b.c
2
>>> x.d[1].foo
'bar'
x = type('new_dict', (object,), d)
then add recursion to this and you're done.
edit this is how I'd implement it:
>>> d
{'a': 1, 'b': {'c': 2}, 'd': ['hi', {'foo': 'bar'}]}
>>> def obj_dic(d):
top = type('new', (object,), d)
seqs = tuple, list, set, frozenset
for i, j in d.items():
if isinstance(j, dict):
setattr(top, i, obj_dic(j))
elif isinstance(j, seqs):
setattr(top, i,
type(j)(obj_dic(sj) if isinstance(sj, dict) else sj for sj in j))
else:
setattr(top, i, j)
return top
>>> x = obj_dic(d)
>>> x.a
1
>>> x.b.c
2
>>> x.d[1].foo
'bar'
# Applies to Python-3 Standard Library
class Struct(object):
def __init__(self, data):
for name, value in data.items():
setattr(self, name, self._wrap(value))
def _wrap(self, value):
if isinstance(value, (tuple, list, set, frozenset)):
return type(value)([self._wrap(v) for v in value])
else:
return Struct(value) if isinstance(value, dict) else value
# Applies to Python-2 Standard Library
class Struct(object):
def __init__(self, data):
for name, value in data.iteritems():
setattr(self, name, self._wrap(value))
def _wrap(self, value):
if isinstance(value, (tuple, list, set, frozenset)):
return type(value)([self._wrap(v) for v in value])
else:
return Struct(value) if isinstance(value, dict) else value
Can be used with any sequence/dict/value structure of any depth.
There's a
collection helper called namedtuple, that can do this for you:
from collections import namedtuple
d_named = namedtuple('Struct', d.keys())(*d.values())
In [7]: d_named
Out[7]: Struct(a=1, b={'c': 2}, d=['hi', {'foo': 'bar'}])
In [8]: d_named.a
Out[8]: 1
If your dict is coming from json.loads(), you can turn it into an object instead (rather than a dict) in one line:
import json
from collections import namedtuple
json.loads(data, object_hook=lambda d: namedtuple('X', d.keys())(*d.values()))
See also How to convert JSON data into a Python object.
You can leverage the json module of the standard library with a custom object hook:
import json
class obj(object):
def __init__(self, dict_):
self.__dict__.update(dict_)
def dict2obj(d):
return json.loads(json.dumps(d), object_hook=obj)
Example usage:
>>> d = {'a': 1, 'b': {'c': 2}, 'd': ['hi', {'foo': 'bar'}]}
>>> o = dict2obj(d)
>>> o.a
1
>>> o.b.c
2
>>> o.d[0]
u'hi'
>>> o.d[1].foo
u'bar'
And it is not strictly read-only as it is with namedtuple, i.e. you can change values – not structure:
>>> o.b.c = 3
>>> o.b.c
3
Taking what I feel are the best aspects of the previous examples, here's what I came up with:
class Struct:
"""The recursive class for building and representing objects with."""
def __init__(self, obj):
for k, v in obj.items():
if isinstance(v, dict):
setattr(self, k, Struct(v))
else:
setattr(self, k, v)
def __getitem__(self, val):
return self.__dict__[val]
def __repr__(self):
return '{%s}' % str(', '.join('%s : %s' % (k, repr(v)) for (k, v) in self.__dict__.items()))
I ended up trying BOTH the AttrDict and the Bunch libraries and found them to be way too slow for my uses. After a friend and I looked into it, we found that the main method for writing these libraries results in the library aggressively recursing through a nested object and making copies of the dictionary object throughout. With this in mind, we made two key changes. 1) We made attributes lazy-loaded 2) instead of creating copies of a dictionary object, we create copies of a light-weight proxy object. This is the final implementation. The performance increase of using this code is incredible. When using AttrDict or Bunch, these two libraries alone consumed 1/2 and 1/3 respectively of my request time(what!?). This code reduced that time to almost nothing(somewhere in the range of 0.5ms). This of course depends on your needs, but if you are using this functionality quite a bit in your code, definitely go with something simple like this.
class DictProxy(object):
def __init__(self, obj):
self.obj = obj
def __getitem__(self, key):
return wrap(self.obj[key])
def __getattr__(self, key):
try:
return wrap(getattr(self.obj, key))
except AttributeError:
try:
return self[key]
except KeyError:
raise AttributeError(key)
# you probably also want to proxy important list properties along like
# items(), iteritems() and __len__
class ListProxy(object):
def __init__(self, obj):
self.obj = obj
def __getitem__(self, key):
return wrap(self.obj[key])
# you probably also want to proxy important list properties along like
# __iter__ and __len__
def wrap(value):
if isinstance(value, dict):
return DictProxy(value)
if isinstance(value, (tuple, list)):
return ListProxy(value)
return value
See the original implementation here by https://stackoverflow.com/users/704327/michael-merickel.
The other thing to note, is that this implementation is pretty simple and doesn't implement all of the methods you might need. You'll need to write those as required on the DictProxy or ListProxy objects.
If you want to access dict keys as an object (or as a dict for difficult keys), do it recursively, and also be able to update the original dict, you could do:
class Dictate(object):
"""Object view of a dict, updating the passed in dict when values are set
or deleted. "Dictate" the contents of a dict...: """
def __init__(self, d):
# since __setattr__ is overridden, self.__dict = d doesn't work
object.__setattr__(self, '_Dictate__dict', d)
# Dictionary-like access / updates
def __getitem__(self, name):
value = self.__dict[name]
if isinstance(value, dict): # recursively view sub-dicts as objects
value = Dictate(value)
return value
def __setitem__(self, name, value):
self.__dict[name] = value
def __delitem__(self, name):
del self.__dict[name]
# Object-like access / updates
def __getattr__(self, name):
return self[name]
def __setattr__(self, name, value):
self[name] = value
def __delattr__(self, name):
del self[name]
def __repr__(self):
return "%s(%r)" % (type(self).__name__, self.__dict)
def __str__(self):
return str(self.__dict)
Example usage:
d = {'a': 'b', 1: 2}
dd = Dictate(d)
assert dd.a == 'b' # Access like an object
assert dd[1] == 2 # Access like a dict
# Updates affect d
dd.c = 'd'
assert d['c'] == 'd'
del dd.a
del dd[1]
# Inner dicts are mapped
dd.e = {}
dd.e.f = 'g'
assert dd['e'].f == 'g'
assert d == {'c': 'd', 'e': {'f': 'g'}}
>>> def dict2obj(d):
if isinstance(d, list):
d = [dict2obj(x) for x in d]
if not isinstance(d, dict):
return d
class C(object):
pass
o = C()
for k in d:
o.__dict__[k] = dict2obj(d[k])
return o
>>> d = {'a': 1, 'b': {'c': 2}, 'd': ["hi", {'foo': "bar"}]}
>>> x = dict2obj(d)
>>> x.a
1
>>> x.b.c
2
>>> x.d[1].foo
'bar'
In 2021, use pydantic BaseModel - will convert nested dicts and nested json objects to python objects and vice versa:
https://pydantic-docs.helpmanual.io/usage/models/
>>> class Foo(BaseModel):
... count: int
... size: float = None
...
>>>
>>> class Bar(BaseModel):
... apple = 'x'
... banana = 'y'
...
>>>
>>> class Spam(BaseModel):
... foo: Foo
... bars: List[Bar]
...
>>>
>>> m = Spam(foo={'count': 4}, bars=[{'apple': 'x1'}, {'apple': 'x2'}])
Object to dict
>>> print(m.dict())
{'foo': {'count': 4, 'size': None}, 'bars': [{'apple': 'x1', 'banana': 'y'}, {'apple': 'x2', 'banana': 'y'}]}
Object to JSON
>>> print(m.json())
{"foo": {"count": 4, "size": null}, "bars": [{"apple": "x1", "banana": "y"}, {"apple": "x2", "banana": "y"}]}
Dict to object
>>> spam = Spam.parse_obj({'foo': {'count': 4, 'size': None}, 'bars': [{'apple': 'x1', 'banana': 'y'}, {'apple': 'x2', 'banana': 'y2'}]})
>>> spam
Spam(foo=Foo(count=4, size=None), bars=[Bar(apple='x1', banana='y'), Bar(apple='x2', banana='y2')])
JSON to object
>>> spam = Spam.parse_raw('{"foo": {"count": 4, "size": null}, "bars": [{"apple": "x1", "banana": "y"}, {"apple": "x2", "banana": "y"}]}')
>>> spam
Spam(foo=Foo(count=4, size=None), bars=[Bar(apple='x1', banana='y'), Bar(apple='x2', banana='y')])
x.__dict__.update(d) should do fine.
Typically you want to mirror dict hierarchy into your object but not list or tuples which are typically at lowest level. So this is how I did this:
class defDictToObject(object):
def __init__(self, myDict):
for key, value in myDict.items():
if type(value) == dict:
setattr(self, key, defDictToObject(value))
else:
setattr(self, key, value)
So we do:
myDict = { 'a': 1,
'b': {
'b1': {'x': 1,
'y': 2} },
'c': ['hi', 'bar']
}
and get:
x.b.b1.x 1
x.c ['hi', 'bar']
This should get your started:
class dict2obj(object):
def __init__(self, d):
self.__dict__['d'] = d
def __getattr__(self, key):
value = self.__dict__['d'][key]
if type(value) == type({}):
return dict2obj(value)
return value
d = {'a': 1, 'b': {'c': 2}, 'd': ["hi", {'foo': "bar"}]}
x = dict2obj(d)
print x.a
print x.b.c
print x.d[1].foo
It doesn't work for lists, yet. You'll have to wrap the lists in a UserList and overload __getitem__ to wrap dicts.
I know there's already a lot of answers here already and I'm late to the party but this method will recursively and 'in place' convert a dictionary to an object-like structure... Works in 3.x.x
def dictToObject(d):
for k,v in d.items():
if isinstance(v, dict):
d[k] = dictToObject(v)
return namedtuple('object', d.keys())(*d.values())
# Dictionary created from JSON file
d = {
'primaryKey': 'id',
'metadata':
{
'rows': 0,
'lastID': 0
},
'columns':
{
'col2': {
'dataType': 'string',
'name': 'addressLine1'
},
'col1': {
'datatype': 'string',
'name': 'postcode'
},
'col3': {
'dataType': 'string',
'name': 'addressLine2'
},
'col0': {
'datatype': 'integer',
'name': 'id'
},
'col4': {
'dataType': 'string',
'name': 'contactNumber'
}
},
'secondaryKeys': {}
}
d1 = dictToObject(d)
d1.columns.col1 # == object(datatype='string', name='postcode')
d1.metadata.rows # == 0
from mock import Mock
d = {'a': 1, 'b': {'c': 2}, 'd': ["hi", {'foo': "bar"}]}
my_data = Mock(**d)
# We got
# my_data.a == 1
This also works well too
class DObj(object):
pass
dobj = Dobj()
dobj.__dict__ = {'a': 'aaa', 'b': 'bbb'}
print dobj.a
>>> aaa
print dobj.b
>>> bbb
The simplest way would be using collections.namedtuple.
I find the following 4-liner the most beautiful, which supports nested dictionaries:
def dict_to_namedtuple(typename, data):
return namedtuple(typename, data.keys())(
*(dict_to_namedtuple(typename + '_' + k, v) if isinstance(v, dict) else v for k, v in data.items())
)
The output will look good as well:
>>> nt = dict_to_namedtuple('config', {
... 'path': '/app',
... 'debug': {'level': 'error', 'stream': 'stdout'}
... })
>>> print(nt)
config(path='/app', debug=config_debug(level='error', stream='stdout'))
>>> print(nt.debug.level)
'error'
Let me explain a solution I almost used some time ago. But first, the reason I did not is illustrated by the fact that the following code:
d = {'from': 1}
x = dict2obj(d)
print x.from
gives this error:
File "test.py", line 20
print x.from == 1
^
SyntaxError: invalid syntax
Because "from" is a Python keyword there are certain dictionary keys you cannot allow.
Now my solution allows access to the dictionary items by using their names directly. But it also allows you to use "dictionary semantics". Here is the code with example usage:
class dict2obj(dict):
def __init__(self, dict_):
super(dict2obj, self).__init__(dict_)
for key in self:
item = self[key]
if isinstance(item, list):
for idx, it in enumerate(item):
if isinstance(it, dict):
item[idx] = dict2obj(it)
elif isinstance(item, dict):
self[key] = dict2obj(item)
def __getattr__(self, key):
return self[key]
d = {'a': 1, 'b': {'c': 2}, 'd': ["hi", {'foo': "bar"}]}
x = dict2obj(d)
assert x.a == x['a'] == 1
assert x.b.c == x['b']['c'] == 2
assert x.d[1].foo == x['d'][1]['foo'] == "bar"
Old Q&A, but I get something more to talk. Seems no one talk about recursive dict. This is my code:
#!/usr/bin/env python
class Object( dict ):
def __init__( self, data = None ):
super( Object, self ).__init__()
if data:
self.__update( data, {} )
def __update( self, data, did ):
dataid = id(data)
did[ dataid ] = self
for k in data:
dkid = id(data[k])
if did.has_key(dkid):
self[k] = did[dkid]
elif isinstance( data[k], Object ):
self[k] = data[k]
elif isinstance( data[k], dict ):
obj = Object()
obj.__update( data[k], did )
self[k] = obj
obj = None
else:
self[k] = data[k]
def __getattr__( self, key ):
return self.get( key, None )
def __setattr__( self, key, value ):
if isinstance(value,dict):
self[key] = Object( value )
else:
self[key] = value
def update( self, *args ):
for obj in args:
for k in obj:
if isinstance(obj[k],dict):
self[k] = Object( obj[k] )
else:
self[k] = obj[k]
return self
def merge( self, *args ):
for obj in args:
for k in obj:
if self.has_key(k):
if isinstance(self[k],list) and isinstance(obj[k],list):
self[k] += obj[k]
elif isinstance(self[k],list):
self[k].append( obj[k] )
elif isinstance(obj[k],list):
self[k] = [self[k]] + obj[k]
elif isinstance(self[k],Object) and isinstance(obj[k],Object):
self[k].merge( obj[k] )
elif isinstance(self[k],Object) and isinstance(obj[k],dict):
self[k].merge( obj[k] )
else:
self[k] = [ self[k], obj[k] ]
else:
if isinstance(obj[k],dict):
self[k] = Object( obj[k] )
else:
self[k] = obj[k]
return self
def test01():
class UObject( Object ):
pass
obj = Object({1:2})
d = {}
d.update({
"a": 1,
"b": {
"c": 2,
"d": [ 3, 4, 5 ],
"e": [ [6,7], (8,9) ],
"self": d,
},
1: 10,
"1": 11,
"obj": obj,
})
x = UObject(d)
assert x.a == x["a"] == 1
assert x.b.c == x["b"]["c"] == 2
assert x.b.d[0] == 3
assert x.b.d[1] == 4
assert x.b.e[0][0] == 6
assert x.b.e[1][0] == 8
assert x[1] == 10
assert x["1"] == 11
assert x[1] != x["1"]
assert id(x) == id(x.b.self.b.self) == id(x.b.self)
assert x.b.self.a == x.b.self.b.self.a == 1
x.x = 12
assert x.x == x["x"] == 12
x.y = {"a":13,"b":[14,15]}
assert x.y.a == 13
assert x.y.b[0] == 14
def test02():
x = Object({
"a": {
"b": 1,
"c": [ 2, 3 ]
},
1: 6,
2: [ 8, 9 ],
3: 11,
})
y = Object({
"a": {
"b": 4,
"c": [ 5 ]
},
1: 7,
2: 10,
3: [ 12 , 13 ],
})
z = {
3: 14,
2: 15,
"a": {
"b": 16,
"c": 17,
}
}
x.merge( y, z )
assert 2 in x.a.c
assert 3 in x.a.c
assert 5 in x.a.c
assert 1 in x.a.b
assert 4 in x.a.b
assert 8 in x[2]
assert 9 in x[2]
assert 10 in x[2]
assert 11 in x[3]
assert 12 in x[3]
assert 13 in x[3]
assert 14 in x[3]
assert 15 in x[2]
assert 16 in x.a.b
assert 17 in x.a.c
if __name__ == '__main__':
test01()
test02()
Wanted to upload my version of this little paradigm.
class Struct(dict):
def __init__(self,data):
for key, value in data.items():
if isinstance(value, dict):
setattr(self, key, Struct(value))
else:
setattr(self, key, type(value).__init__(value))
dict.__init__(self,data)
It preserves the attributes for the type that's imported into the class. My only concern would be overwriting methods from within the dictionary your parsing. But otherwise seems solid!
Here is another way to implement SilentGhost's original suggestion:
def dict2obj(d):
if isinstance(d, dict):
n = {}
for item in d:
if isinstance(d[item], dict):
n[item] = dict2obj(d[item])
elif isinstance(d[item], (list, tuple)):
n[item] = [dict2obj(elem) for elem in d[item]]
else:
n[item] = d[item]
return type('obj_from_dict', (object,), n)
else:
return d
I stumbled upon the case I needed to recursively convert a list of dicts to list of objects, so based on Roberto's snippet here what did the work for me:
def dict2obj(d):
if isinstance(d, dict):
n = {}
for item in d:
if isinstance(d[item], dict):
n[item] = dict2obj(d[item])
elif isinstance(d[item], (list, tuple)):
n[item] = [dict2obj(elem) for elem in d[item]]
else:
n[item] = d[item]
return type('obj_from_dict', (object,), n)
elif isinstance(d, (list, tuple,)):
l = []
for item in d:
l.append(dict2obj(item))
return l
else:
return d
Note that any tuple will be converted to its list equivalent, for obvious reasons.
Hope this helps someone as much as all your answers did for me, guys.
What about just assigning your dict to the __dict__ of an empty object?
class Object:
"""If your dict is "flat", this is a simple way to create an object from a dict
>>> obj = Object()
>>> obj.__dict__ = d
>>> d.a
1
"""
pass
Of course this fails on your nested dict example unless you walk the dict recursively:
# For a nested dict, you need to recursively update __dict__
def dict2obj(d):
"""Convert a dict to an object
>>> d = {'a': 1, 'b': {'c': 2}, 'd': ["hi", {'foo': "bar"}]}
>>> obj = dict2obj(d)
>>> obj.b.c
2
>>> obj.d
["hi", {'foo': "bar"}]
"""
try:
d = dict(d)
except (TypeError, ValueError):
return d
obj = Object()
for k, v in d.iteritems():
obj.__dict__[k] = dict2obj(v)
return obj
And your example list element was probably meant to be a Mapping, a list of (key, value) pairs like this:
>>> d = {'a': 1, 'b': {'c': 2}, 'd': [("hi", {'foo': "bar"})]}
>>> obj = dict2obj(d)
>>> obj.d.hi.foo
"bar"
Here's another implementation:
class DictObj(object):
def __init__(self, d):
self.__dict__ = d
def dict_to_obj(d):
if isinstance(d, (list, tuple)): return map(dict_to_obj, d)
elif not isinstance(d, dict): return d
return DictObj(dict((k, dict_to_obj(v)) for (k,v) in d.iteritems()))
[Edit] Missed bit about also handling dicts within lists, not just other dicts. Added fix.
class Struct(dict):
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, value):
self[name] = value
def copy(self):
return Struct(dict.copy(self))
Usage:
points = Struct(x=1, y=2)
# Changing
points['x'] = 2
points.y = 1
# Accessing
points['x'], points.x, points.get('x') # 2 2 2
points['y'], points.y, points.get('y') # 1 1 1
# Accessing inexistent keys/attrs
points['z'] # KeyError: z
points.z # AttributeError: z
# Copying
points_copy = points.copy()
points.x = 2
points_copy.x # 1
How about this:
from functools import partial
d2o=partial(type, "d2o", ())
This can then be used like this:
>>> o=d2o({"a" : 5, "b" : 3})
>>> print o.a
5
>>> print o.b
3
I think a dict consists of number, string and dict is enough most time.
So I ignore the situation that tuples, lists and other types not appearing in the final dimension of a dict.
Considering inheritance, combined with recursion, it solves the print problem conveniently and also provides two ways to query a data,one way to edit a data.
See the example below, a dict that describes some information about students:
group=["class1","class2","class3","class4",]
rank=["rank1","rank2","rank3","rank4","rank5",]
data=["name","sex","height","weight","score"]
#build a dict based on the lists above
student_dic=dict([(g,dict([(r,dict([(d,'') for d in data])) for r in rank ]))for g in group])
#this is the solution
class dic2class(dict):
def __init__(self, dic):
for key,val in dic.items():
self.__dict__[key]=self[key]=dic2class(val) if isinstance(val,dict) else val
student_class=dic2class(student_dic)
#one way to edit:
student_class.class1.rank1['sex']='male'
student_class.class1.rank1['name']='Nan Xiang'
#two ways to query:
print student_class.class1.rank1
print student_class.class1['rank1']
print '-'*50
for rank in student_class.class1:
print getattr(student_class.class1,rank)
Results:
{'score': '', 'sex': 'male', 'name': 'Nan Xiang', 'weight': '', 'height': ''}
{'score': '', 'sex': 'male', 'name': 'Nan Xiang', 'weight': '', 'height': ''}
--------------------------------------------------
{'score': '', 'sex': '', 'name': '', 'weight': '', 'height': ''}
{'score': '', 'sex': '', 'name': '', 'weight': '', 'height': ''}
{'score': '', 'sex': 'male', 'name': 'Nan Xiang', 'weight': '', 'height': ''}
{'score': '', 'sex': '', 'name': '', 'weight': '', 'height': ''}
{'score': '', 'sex': '', 'name': '', 'weight': '', 'height': ''}

Categories