Background
For some background, I'm trying to create a tool that converts worksheets into API calls using Python 3.5
For the conversion of the table cells to the schema needed for the API call, I've started down the path of using javascript like syntax for the headers used in the spreadsheet. e.g:
Worksheet Header (string)
dict.list[0].id
Python Dictionary
{
"dict":
"list": [
{"id": "my cell value"}
]
}
It's also possible that the header schema could have nested arrays/dicts:
one.two[0].three[0].four.five[0].six
And I also need to append to the object after it has been created as I go through each header.
What I've tried
add_branch
Based on https://stackoverflow.com/a/47276490/2903486 I am able to get nested dictionaries setup using values like one.two.three.four and I'm able to append to the existing dictionary as I go through the rows but I've been unable to add in support for arrays:
def add_branch(tree, vector, value):
key = vector[0]
tree[key] = value \
if len(vector) == 1 \
else add_branch(tree[key] if key in tree else {},
vector[1:],
value)
return tree
file = Worksheet(filePath, sheet).readRow()
rowList = []
for row in file:
rowObj = {}
for colName, rowValue in row.items():
rowObj.update(add_branch(rowObj, colName.split("."), rowValue))
rowList.append(rowObj)
return rowList
My own version of add_branch
import re, json
def branch(tree, vector, value):
"""
Used to convert JS style notation (e.g dict.another.array[0].id) to a python object
Originally based on https://stackoverflow.com/a/47276490/2903486
"""
# Convert Boolean
if isinstance(value, str):
value = value.strip()
if value.lower() in ['true', 'false']:
value = True if value.lower() == "true" else False
# Convert JSON
try:
value = json.loads(value)
except:
pass
key = vector[0]
arr = re.search('\[([0-9]+)\]', key)
if arr:
arr = arr.group(0)
key = key.replace(arr, '')
arr = arr.replace('[', '').replace(']', '')
newArray = False
if key not in tree:
tree[key] = []
tree[key].append(value \
if len(vector) == 1 \
else branch({} if key in tree else {},
vector[1:],
value))
else:
isInArray = False
for x in tree[key]:
if x.get(vector[1:][0], False):
isInArray = x[vector[1:][0]]
if isInArray:
tree[key].append(value \
if len(vector) == 1 \
else branch({} if key in tree else {},
vector[1:],
value))
else:
tree[key].append(value \
if len(vector) == 1 \
else branch({} if key in tree else {},
vector[1:],
value))
if len(vector) == 1 and len(tree[key]) == 1:
tree[key] = value.split(",")
else:
tree[key] = value \
if len(vector) == 1 \
else branch(tree[key] if key in tree else {},
vector[1:],
value)
return tree
What still needs help
My branch solution works pretty well actually now after adding in some things but I'm wondering if I'm doing something wrong/messy here or if theres a better way to handle where I'm editing nested arrays (my attempt started in the if IsInArray section of the code)
I'd expect these two headers to edit the last array, but instead I end up creating a duplicate dictionary on the first array:
file = [{
"one.array[0].dict.arrOne[0]": "1,2,3",
"one.array[0].dict.arrTwo[0]": "4,5,6"
}]
rowList = []
for row in file:
rowObj = {}
for colName, rowValue in row.items():
rowObj.update(add_branch(rowObj, colName.split("."), rowValue))
rowList.append(rowObj)
return rowList
Outputs:
[
{
"one": {
"array": [
{
"dict": {
"arrOne": [
"1",
"2",
"3"
]
}
},
{
"dict": {
"arrTwo": [
"4",
"5",
"6"
]
}
}
]
}
}
]
Instead of:
[
{
"one": {
"array": [
{
"dict": {
"arrOne": [
"1",
"2",
"3"
],
"arrTwo": [
"4",
"5",
"6"
]
}
}
]
}
}
]
So I'm not sure if there are any caveats in this solution, but this appears to work for some of the use cases i'm throwing at it:
import json, re
def build_job():
def branch(tree, vector, value):
# Originally based on https://stackoverflow.com/a/47276490/2903486
# Convert Boolean
if isinstance(value, str):
value = value.strip()
if value.lower() in ['true', 'false']:
value = True if value.lower() == "true" else False
# Convert JSON
try:
value = json.loads(value)
except:
pass
key = vector[0]
arr = re.search('\[([0-9]+)\]', key)
if arr:
# Get the index of the array, and remove it from the key name
arr = arr.group(0)
key = key.replace(arr,'')
arr = int(arr.replace('[','').replace(']',''))
if key not in tree:
# If we dont have an array already, turn the dict from the previous
# recursion into an array and append to it
tree[key] = []
tree[key].append(value \
if len(vector) == 1 \
else branch({} if key in tree else {},
vector[1:],
value))
else:
# Check to see if we are inside of an existing array here
isInArray = False
for i in range(len(tree[key])):
if tree[key][i].get(vector[1:][0], False):
isInArray = tree[key][i][vector[1:][0]]
if isInArray and arr < len(tree[key]) \
and isinstance(tree[key][arr], list):
# Respond accordingly by appending or updating the value
tree[key][arr].append(value \
if len(vector) == 1 \
else branch(tree[key] if key in tree else {},
vector[1:],
value))
else:
# Make sure we have an index to attach the requested array to
while arr >= len(tree[key]):
tree[key].append({})
# update the existing array with a dict
tree[key][arr].update(value \
if len(vector) == 1 \
else branch(tree[key][arr] if key in tree else {},
vector[1:],
value))
# Turn comma deliminated values to lists
if len(vector) == 1 and len(tree[key]) == 1:
tree[key] = value.split(",")
else:
# Add dictionaries together
tree.update({key: value \
if len(vector) == 1 \
else branch(tree[key] if key in tree else {},
vector[1:],
value)})
return tree
file = [{
"one.array[0].dict.dont-worry-about-me": "some value",
"one.array[0].dict.arrOne[0]": "1,2,3",
"one.array[0].dict.arrTwo[1]": "4,5,6",
"one.array[1].x.y[0].z[0].id": "789"
}]
rowList = []
for row in file:
rowObj = {}
for colName, rowValue in row.items():
rowObj.update(branch(rowObj, colName.split("."), rowValue))
rowList.append(rowObj)
return rowList
print(json.dumps(build_job(), indent=4))
Result:
[
{
"one": {
"array": [
{
"dict": {
"dont-worry-about-me": "some value",
"arrOne": [
"1",
"2",
"3"
],
"arrTwo": [
"4",
"5",
"6"
]
}
},
{
"x": {
"y": [
{
"z": [
{
"id": 789
}
]
}
]
}
}
]
}
}
]
Related
Disclaimer: I've been at this for about a week, and it's entirely possible that I've come up with the solution, but I missed it in my troubleshooting. Also, the INI files can be over 200 lines long and 10 deep with combinations of dictionaries and lists.
Situation: I maintain a couple dozen applications, and each application has a JSON formatted INI file that tracks certain system settings. On my computer, I aggregated all those INI files into a single file and then collapsed the structure. That collapsed structure is the unique keys from all those INI files, followed by all the possible values that each key has, and then what I may want each value to be replaced with (see examples below).
Goal: When I need to make configuration changes in those applications, I want to instead make the value changes in my JSON file and then use a Python script to replace the matching key-value pairs in all those other system files.
Simplifications:
I understand opening and writing the files, my problem is the parsing.
The recursion will always end with a key-value pair where the type(value) is str.
Sample INI file from one of those applications
{
"Version": "3.24.2",
"Package": [
{
"ID": "42",
"Display": "4",
"Driver": "E10A"
}, {
"ID": "50",
"Display": "1",
"Driver": "E12A"
}
]
}
My change file
Example use: If I want to replace all instances of {"Display":"1"} with {"Display":"10"}, then all I have to do is put a 10 between the double quotes below ... {"Display": {"1": ""}} to {"Display": {"1": "10"}}
{
"Version" {
"3.24.2": "",
"42.1": "",
"2022-10-1": ""
},
"ID" {
"42": "",
"50": ""
},
"Display": {
"1": "",
"4": ""
},
"Driver": {
"01152003.1": "",
"E10A": "",
"E12A": ""
}
}
Attempt 1
I read that Python assigns values like a C *pointer, but that was not my experience with this attempt. There are no errors, and the data variable never changed.
def RecursiveSearch(val, key=None):
if isinstance(val, dict):
for k, v in val.items():
RecursiveSearch(v, k)
elif isinstance(val, list):
for v in val:
RecursiveSearch(v, key)
elif isinstance(val, str):
# Is the key being tracked in my change file
if key in ChangeFile:
# Is that key's value being tracked in my change file
if val in ChangeFile[key].keys():
# Find the matching key-value and apply the replacement value
for k, v in ChangeFile[key].items():
# Only replace the value if it has something to replace it with
if k == val and v != "":
key[val] = v
data = open('config.ini', 'w', encoding='UTF-8', errors='ignore')
data = convertJSON(data)
ChangeFile = open('change.json', 'r', encoding='UTF-8', errors='ignore')
ChangeFile = convertJSON(data)
data = RecursiveSearch(val=data, key=None)
print(data)
Attempt 2
Same code but with return values. In this attempt the data is completely replaced with the last key-value pair the recursion looked at.
def RecursiveSearch(val, key=None):
if isinstance(val, dict):
for k, v in val.items():
tmp = RecursiveSearch(v, k)
if tmp != {k: v}:
return tmp
return val
elif isinstance(val, list):
for v in val:
tmp = RecursiveSearch(v, key)
if v != tmp:
return tmp
return val
elif isinstance(val, str):
# Is the key being tracked in my change file
if key in ChangeFile:
# Is that key's value being tracked in my change file
if val in ChangeFile[key].keys():
# Find the matching key-value and apply the replacement value
for k, v in ChangeFile[key].items():
# Only replace the value if it has something to replace it with
if k == val and v != "":
return v
else: return val
else: return val
else: return val
else: return val
# Return edited data after the recursion uncoils
return {key: val}
data = open('config.ini', 'w', encoding='UTF-8', errors='ignore')
data = convertJSON(data)
ChangeFile = open('change.json', 'r', encoding='UTF-8', errors='ignore')
ChangeFile = convertJSON(data)
data = RecursiveSearch(val=data, key=None)
print(data)
I have been working on a project that involves parsing a CSV file in order to turn all the data into a very specifically formatted JSON following a complex schema. I have to custom make this program as the required complexity of the JSON makes existing converters fail. I am mostly there, I have run into one final roadblock though:
I have nested dictionaries, and occasionally there must be a list within those, this list will contain further dictionaries. This is fine, I have been able to complete that, BUT now I need to find a way to add more nested dictionaries within those. Below is a simplified breakdown of the concept.
the CSV will look something like this, where the # before a tag indicates it's a list
x.a, x.b.z, x.b.y, x.#c.z.nest1, x.#c.z.nest2, x.#c.yy, x.d, x.e.z, x.e.y
ab, cd, ef, gh, ij, kl, mn, op, qr
this should result in the following JSON
{
"x": {
"a": "ab",
"b": {
"z": "cd",
"y": "ef"
},
"c": [
{
"z": {
"nest1": "gh",
"nest2": "ij"
}
},
{
"yy": "kl"
}
],
"d": "mn",
"e": {
"z": "op",
"y": "qr"
}
}
}
This is one issue that I haven't been able to solve, my current code can only do one dictionary after the list item, not further. I also need to be able to somehow do the following within a list of dictionaries:
"c": [
{
"z": {
"nest1": "gh"
},
"zz": {
"nest2": "ij"
}
},
{
"yy": "kl"
}
i.e. somehow add multiple nested dictionaries within the dictionary in the list. The problem with this occurs within the fact that these aren't reference-able by name, so I do not know how I could potentially indicate to do that within the CSV format.
Here is the code I have that works up to the first dictionary nested within a list:
import json
import pandas as pd
from os.path import exists
# df1 = pd.read_csv("excelTestFacilities.csv", header = 1, sep=",", keep_default_na=False, engine="python")
# df2 = pd.read_csv("excelTestFacilityContacts.csv", header = 1, sep=",", keep_default_na=False, engine="python")
# df = pd.merge(df1, df2, how = 'inner')
df = pd.read_csv("csvTestFile.csv", header = 1, sep=", ", keep_default_na=False, engine="python")
#print(df) # uncomment to see the transformation
json_data = df.to_dict(orient="records")
#print(json_data)
def unflatten_dic(dic):
"""
Unflattens a CSV list into a set of nested dictionaries
"""
ini = {}
for k,v in list(dic.items()):
node = ini
list_bool = False
*parents, key = k.split('.')
for parent in parents:
if parent[0] == '#':
list_bool = True
if list_bool:
for parent in parents:
if parent[0] == '#':
node[parent[1:]] = node = node.get(parent[1:], [])
else:
node[parent] = node = node.get(parent, {})
node.append({key : v})
else:
for parent in parents:
node[parent] = node = node.get(parent, {})
node[key] = v
return ini
def merge_lists(dic):
"""
Removes duplicates within sets
"""
for k,v in list(dic.items()):
if isinstance(v, dict):
keys = list(v.keys())
vals = list(v.values())
if all(isinstance(l, list) and len(l)==len(vals[0]) for l in vals):
dic[k] = []
val_tuple = set(zip(*vals)) # removing duplicates with set()
for t in val_tuple:
dic[k].append({subkey: t[i] for i, subkey in enumerate(keys)})
else:
merge_lists(v)
elif isinstance(v, list):
dic[k] = list(set(v)) # removing list duplicates
def clean_blanks(value):
"""
Recursively remove all None values from dictionaries and lists, and returns
the result as a new dictionary or list.
"""
if isinstance(value, list):
return [clean_blanks(x) for x in value if x != ""]
elif isinstance(value, dict):
return {
key: clean_blanks(val)
for key, val in value.items()
if val != "" and val != {}
}
else:
return value
def add_to_dict(section_added_to, section_to_add, value, reportNum):
"""
Adds a value to a given spot within a dictionary set.
section_added_to is optional for adding the set to a deeper section such as facility
section_to_add is the name that the new dictionary entry will have
value is the item to be added
reportNum is the number indicating which report to add to, starting at 0
"""
if section_added_to != '':
end_list[reportNum][section_added_to][section_to_add] = value
else:
end_list[reportNum][section_to_add] = value
def read_add_vals(filename_prefix, added_to, section):
for i in range(len(end_list)):
temp_list = []
filename = filename_prefix + str(i+1) + ".csv"
if not exists(filename):
continue;
temp_df = pd.read_csv(filename, header = 1, sep=",", keep_default_na=False, engine="python")
temp_json = temp_df.to_dict(orient="records")
for y in temp_json:
return_ini = unflatten_dic(y)
temp_list.append(return_ini)
add_to_dict(added_to, section, temp_list, i)
global end_list
end_list = []
for x in json_data:
return_ini = unflatten_dic(x)
end_list.append(return_ini)
#read_add_vals('excelTestPermitsFac', 'facility', 'permits');
json_data = clean_blanks(end_list)
final_json = {"year":2021, "version":"2022-02-14", "reports":json_data}
print(json.dumps(final_json, indent=4))
There is some parts of this code that are involved in other components of the overall end JSON, but I am mainly concerned with how to change unflatten_dic()
Here is my current working code for changing unflatten_dic(), even though it doesn't work...
def list_get(list, list_item):
i = 0
for dict in list:
if list_item in dict:
return dict.get(list_item, {})
i += 1
return {}
def check_in_list(list, list_item):
i = 0
for dict in list:
if list_item in dict:
return i
i += 1
return -1
def unflatten_dic(dic):
"""
Unflattens a CSV list into a set of nested dictionaries
"""
ini = {}
for k,v in list(dic.items()):
node = ini
list_bool = False
*parents, key = k.split('.')
for parent in parents:
if parent[0] == '#':
list_bool = True
previous_node_list = False
if list_bool:
for parent in parents:
print(parent)
if parent[0] == '#':
node[parent[1:]] = node = node.get(parent[1:], [])
ends_with_dict = False
previous_node_list = True
else:
print("else")
if previous_node_list:
print("prev list")
i = check_in_list(node, parent)
if i >= 0:
node[i] = node = list_get(node, parent)
else:
node.append({parent : {}})
previous_node_list = False
ends_with_dict = True
else:
print("not prev list")
node[parent] = node = node.get(parent, {})
previous_node_list = False
if ends_with_dict:
node[key] = v
else:
node.append({key : v})
else:
for parent in parents:
node[parent] = node = node.get(parent, {})
node[key] = v
#print(node)
return ini
Any, even small, amount of help would be greatly appreciated.
It is easiest to use recursion and collections.defaultdict to group child entries on their parents (each entry is separated by the . in the csv data):
from collections import defaultdict
def to_dict(vals, is_list = 0):
def form_child(a, b):
return b[0][0] if len(b[0]) == 1 else to_dict(b, a[0] == '#')
d = defaultdict(list)
for a, *b in vals:
d[a].append(b)
if not is_list:
return {a[a[0] == '#':]:form_child(a, b) for a, b in d.items()}
return [{a[a[0] == '#':]:form_child(a, b)} for a, b in d.items()]
import csv, json
with open('filename.csv') as f:
data = list(csv.reader(f))
r = [a.split('.')+[b] for i in range(0, len(data), 2) for a, b in zip(data[i], data[i+1])]
print(json.dumps(to_dict(r), indent=4))
Output:
{
"x": {
"a": "ab",
"b": {
"z": "cd",
"y": "ef"
},
"c": [
{
"z": {
"nest1": "gh",
"nest2": "ij"
}
},
{
"yy": "kl"
}
],
"d": "mn",
"e": {
"z": "op",
"y": "qr"
}
}
}
I managed to get it working in what seems to be all scenarios. Here is the code that I made for the unflatten_dic() function.
def unflatten_dic(dic):
"""
Unflattens a CSV list into a set of nested dictionaries
"""
ini = {}
for k,v in list(dic.items()):
node = ini
list_bool = False
*parents, key = k.split('.')
# print("parents")
# print(parents)
for parent in parents:
if parent[0] == '#':
list_bool = True
if list_bool:
for parent in parents:
if parent[0] == '#':
node[parent[1:]] = node = node.get(parent[1:], [])
elif parent.isnumeric():
# print("numeric parent")
# print("length of node")
# print(len(node))
if len(node) > int(parent):
# print("node length good")
node = node[int(parent)]
else:
node.append({})
node = node[int(parent)]
else:
node[parent] = node = node.get(parent, {})
try:
node.append({key : v})
except AttributeError:
node[key] = v
else:
for parent in parents:
node[parent] = node = node.get(parent, {})
node[key] = v
return ini
I haven't run into an issue thus far, this is based on the following rules for the CSV:
# before any name results in that item being a list
if the section immediately after a list in the CSV is a number, that will create multiple dictionaries within the list. Here is an example
x.a, x.b.z, x.b.y, x.#c.0.zz, x.#c.1.zz, x.#c.2.zz, x.d, x.e.z, x.e.y, x.#c.1.yy.l, x.#c.1.yy.#m.q, x.#c.1.yy.#m.r
ab, cd, ef, gh, , kl, mn, op, qr, st, uv, wx
12, 34, 56, 78, 90, 09, , 65, 43, 21, , 92
This will result in the following JSON after formatting
"reports": [
{
"x": {
"a": "ab",
"b": {
"z": "cd",
"y": "ef"
},
"c": [
{
"zz": "gh"
},
{
"yy": {
"l": "st",
"m": [
{
"q": "uv"
},
{
"r": "wx"
}
]
}
},
{
"zz": "kl"
}
],
"d": "mn",
"e": {
"z": "op",
"y": "qr"
}
}
},
{
"x": {
"a": "12",
"b": {
"z": "34",
"y": "56"
},
"c": [
{
"zz": "78"
},
{
"zz": "90",
"yy": {
"l": "21",
"m": [
{
"r": "92"
}
]
}
},
{
"zz": "09"
}
],
"e": {
"z": "65",
"y": "43"
}
}
}
]
I have a json object: users.json
{
"1" :
{ "name" : "Jason" } ,
"2" :
{ "name" : "Alex" }
}
I have a python function which takes as input a name and should return the "id". For example if I pass 'Jason', it should return '1' and if I pass 'Alex' it should return '2'. I know this is a simple question but I am really stuck...(and a bit lazy to study python dictionnaries...) Here is what I have so far
def __init__(self):
self.users_file = 'users.json'
def read_users_file(self):
with open(self.users_file) as users_file:
return json.load(users_file)
def get_user_id(self, name):
data = self.read_users_file()
values = data.values()
for val in data.values():
if(name == val.get('name')):
print('user found!')
Thanks!
data = {
"1":
{"name": "Jason"},
"2":
{"name": "Alex"}
}
name = 'Jason'
for key in d:
if (d[key]['name'] == name):
print(key) ## output 1
or in more Pythonic way:
for key, value in data.items():
if name == value['name']:
print(key)
I have a list of lists containing key and value like so:
[
['mounts:device', '/dev/sda3'],
['mounts:fstype:[0]', 'ext1'],
['mounts:fstype:[1]', 'ext3']
]
Well I can easily change the list to this
(Lists arent seperated by ':')
[
['mounts:device', '/dev/sda3'],
['mounts:fstype[0]', 'ext1'],
['mounts:fstype[1]', 'ext3']
]
Whatever suits better for this problem:
Problem is to create a dictionary:
{
'mounts': {
'device': '/dev/sda3',
'fstype': [
'ext1',
'ext3'
]
}
It should also be possible to have lists in lists for example:
['mounts:test:lala:fstype[0][0]', 'abc']
or
['mounts:test:lala:fstype:[0]:[0]', 'abc']
This is what I have so far:
def unflatten(pair_list):
root = {}
for pair in pair_list:
context = root
key_list = pair[0].split(':')
key_list_last_item = key_list.pop()
for key in key_list:
if key not in context:
context[key] = {}
context = context[key]
context[key_list_last_item] = pair[1]
return root
Based on this answer https://stackoverflow.com/a/18648007/5413035 but as requested I need recursivness and lists in the mix
Thanks in advance
Here is a solution using a tree of dict:
import collections
def tree():
return collections.defaultdict(tree)
def unflatten(pair_list):
root = tree()
for mount, path in pair_list:
parts = mount.split(":")
curr = root
for part in parts[:-1]:
index = int(part[1:-1]) if part[0] == "[" else part
curr = curr[index]
part = parts[-1]
index = int(part[1:-1]) if part[0] == "[" else part
curr[index] = path
return root
With the following input:
pair_list = [
['mounts:device', '/dev/sda3'],
['mounts:fstype:[0]', 'ext1'],
['mounts:fstype:[1]', 'ext3'],
['mounts:test:lala:fstype:[0]:[0]', 'abc']
]
You'll get:
{
"mounts": {
"fstype": {
"0": "ext1",
"1": "ext3"
},
"test": {
"lala": {
"fstype": {
"0": {
"0": "abc"
}
}
}
},
"device": "/dev/sda3"
}
}
Then you can use the recursive function make_listbellow to turn the integer indexes in a list.
def make_list(root):
if isinstance(root, str):
return root
keys = list(root.keys())
if all(isinstance(k, int) for k in keys):
values = [None] * (max(keys) + 1)
for k in keys:
values[k] = make_list(root[k])
return values
else:
return {k: make_list(v) for k, v in root.items()}
Here is the result with the pair_list:
flat = unflatten(pair_list)
flat = make_list(flat)
You'll get:
{'mounts': {'device': '/dev/sda3',
'fstype': ['ext1', 'ext3'],
'test': {'lala': {'fstype': [['abc']]}}}}
Is it fine?
input1=[
['mounts:device', '/dev/sda3'],
['mounts:fstype:[0]', 'ext1'],
['mounts:fstype:[1]', 'ext3']
]
input2={x[1]:x[0].split(':')[1] for x in input1}
input3=['ext3', 'ext1', '/dev/sda3']
input4=['fstype', 'fstype', 'device']
res={}
for x,y in zip(input3, input4):
res.setdefault(y,[]).append(x)
res1=res.keys()
res2=res.values()
res3=[x[0] for x in res2 if len(x)==1]+[x for x in res2 if len(x)>1]
result=dict(zip(res1,res3))
print result
Output :
{'device': '/dev/sda3', 'fstype': ['ext3', 'ext1']}
I am looking to write a recursive function:
arguments: d, dictionary
result: list of dictionaries
def expand_dictionary(d):
return []
The function recursively goes through a dictionary and flattens nested objects using an _, in addition it expands out nested lists into the array, and includes the parent label.
Think of creating a relational model from a document.
Here is an example input and output:
original_object = {
"id" : 1,
"name" : {
"first" : "Alice",
"last" : "Sample"
},
"cities" : [
{
"id" : 55,
"name" : "New York"
},
{
"id" : 60,
"name" : "Chicago"
}
],
"teachers" : [
{
"id" : 2
"name" : "Bob",
"classes" : [
{
"id" : 13,
"name" : "math"
},
{
"id" : 16,
"name" : "spanish"
}
]
}
]
}
expected_output = [
{
"id" : 1,
"name_first" : "Alice",
"name_last" : "Sample"
},
{
"_parent_object" : "cities",
"id" : 55,
"name" : "New York"
},
{
"_parent_object" : "cities",
"id" : 60,
"name" : "Chicago"
},
{
"parent_object" :"teachers",
"id" : 2,
"name" : "Bob"
},
{
"parent_object" :"teachers_classes",
"id" : 13,
"name" : "math"
},
{
"parent_object" :"teachers_classes",
"id" : 16,
"name" : "spanish"
}
]
the code currently being used for flattening is:
def flatten_dictionary(d):
def expand(key, value):
if isinstance(value, dict):
return [ (key + '_' + k, v) for k, v in flatten_dictionary(value).items() ]
else:
#If value is null or empty array don't include it
if value is None or value == [] or value == '':
return []
return [ (key, value) ]
items = [ item for k, v in d.items() for item in expand(k, v) ]
return dict(items)
That will do
def expand_dictionary(d,name=None,l=None):
obj = {}
if l == None:
l = [obj]
else:
l.append(obj)
prefix = (name+'_'if name else '')
if prefix: obj['_parent_object'] = name
for i, v in d.iteritems():
if isinstance(v, list):
map(lambda x:expand_dictionary(x,prefix+i,l),v)
elif isinstance(v, dict):
obj.update(flatten_dictionary({i: v}))
else:
obj[i] = v
return l
After working through it a bit here is what I have come up with. Probably can be significantly optimized. Based on #paulo-scardine's comment I added the parent primary key to keep the relational model. Would love to hear optimization thoughts.
def expand_dictionary(original_object, object_name, objects=None):
if objects is None:
objects = []
def flatten_dictionary(dictionary):
def expand(key, value):
if isinstance(value, dict):
return [ (key + '_' + k, v) for k, v in flatten_dictionary(value).items() ]
else:
#If value is null or empty array don't include it
if value is None or value == [] or value == '':
return []
return [ (key, value) ]
items = [ item for k, v in dictionary.items() for item in expand(k, v) ]
return dict(items)
original_object_root = flatten_dictionary(original_object).copy()
original_object_root['_meta_object_name'] = object_name
for key,value in original_object_root.copy().items():
if isinstance(value, dict):
flatten_dictionary(value, objects)
if isinstance(value, list):
original_object_root.pop(key)
for nested_object in value:
nested_object['_meta_parent_foreign_key'] = original_object_root['id']
nested_object['_meta_object_name'] = object_name + "_" + key
expand_dictionary(nested_object, object_name + "_" + key, objects)
objects.append(original_object_root)
return objects