Using the xmltodict (v0.12.0) on python, I have an xml that will get parsed and converted into a json format. For example:
XML:
<test temp="temp" temp2="temp2">This is a test</test>
Will get converted to the following json:
"test": {
"#temp": "temp",
"#temp2": "temp2",
"#text": "This is a test"
}
I have a front end parser that reads JSON objects and converts them into XML. Unfortunately, the tags are required to be shaped in a different way.
What the front end parser expects:
{
test: {
"#": {
temp: "temp",
temp2: "temp2"
},
"#": "This is a test"
}
}
I feel like this formatting is better served to be modified on Python but I am having a bit of trouble iterating a much larger dictionary, where we don't know how deep an xml would go, and collecting all of the keys that start with "#" and giving that it's own object within the overall tag object. What are some ways I could approach shaping this data?
For anyone who is curious, this is how I ended up solving the issue. Like #furas stated, I decided that recursion was my best bet. I ended up iterating through my original xml data I converted to JSON with the incorrect formatting of attributes, then creating a copy while finding any attribute markers:
def structure_xml(data):
curr_dict = {}
for key,value in data.items():
if isinstance(value, dict):
curr_dict[key] = structure_xml(value)
elif isinstance(value, list):
value_list = []
for val in value:
if isinstance(val,dict) or isinstance(val,list):
value_list.append(structure_xml(val))
curr_dict[key] = value_list
else:
if '#' in key:
new_key = key.split("#",1)[1]
new_obj = { new_key: value }
if "#" in curr_dict:
curr_dict["#"][new_key] = value
else:
curr_dict["#"] = new_obj
elif '#text' in key:
curr_dict['#'] = data[key]
else:
curr_dict[key] = data[key]
return curr_dict
Related
I need to parse a soap response and convert to a text file. I am trying to parse the values as detailed below. I am using ElementTree in python
I have the below xml response which I need to parse
<soapenv:Envelope xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/" xmlns:tmf854="tmf854.v1" xmlns:alu="alu.v1">
<soapenv:Header>
<tmf854:header>
<tmf854:activityName>query</tmf854:activityName>
<tmf854:msgName>queryResponse</tmf854:msgName>
<tmf854:msgType>RESPONSE</tmf854:msgType>
<tmf854:senderURI>https:/destinationhost:8443/tmf854/services</tmf854:senderURI>
<tmf854:destinationURI>https://localhost:8443</tmf854:destinationURI>
<tmf854:activityStatus>SUCCESS</tmf854:activityStatus>
<tmf854:correlationId>1</tmf854:correlationId>
<tmf854:communicationPattern>MultipleBatchResponse</tmf854:communicationPattern>
<tmf854:communicationStyle>RPC</tmf854:communicationStyle>
<tmf854:requestedBatchSize>1500</tmf854:requestedBatchSize>
<tmf854:batchSequenceNumber>1</tmf854:batchSequenceNumber>
<tmf854:batchSequenceEndOfReply>true</tmf854:batchSequenceEndOfReply>
<tmf854:iteratorReferenceURI>http://9195985371165397084</tmf854:iteratorReferenceURI>
<tmf854:timestamp>20220915222121.472+0530</tmf854:timestamp>
</tmf854:header>
</soapenv:Header>
<soapenv:Body>
<queryResponse xmlns="alu.v1">
<queryObjectData>
<queryObject>
<name>
<tmf854:mdNm>AMS</tmf854:mdNm>
<tmf854:meNm>CHEERLAVANCHA_281743</tmf854:meNm>
<tmf854:ptpNm>/type=NE/CHEERLAVANCHA_281743</tmf854:ptpNm>
</name>
<vendorExtensions>
<package>
<NameAndStringValue>
<tmf854:name>hubSubtendedStatus</tmf854:name>
<tmf854:value>NONE</tmf854:value>
</NameAndStringValue>
<NameAndStringValue>
<tmf854:name>productAndRelease</tmf854:name>
<tmf854:value>DF.6.1</tmf854:value>
</NameAndStringValue>
<NameAndStringValue>
<tmf854:name>adminUserName</tmf854:name>
<tmf854:value>isadmin</tmf854:value>
</NameAndStringValue>
<NameAndStringValue>
</package>
</vendorExtensions>
</queryObject>
</queryObjectData>
</queryResponse>
</soapenv:Body>
</soapenv:Envelope>
I need to use the below code snippet.
parser = ElementTree.parse("response.txt")
root = parser.getroot()
inventoryObjectData = root.find(".//{alu.v1}queryObjectData")
for inventoryObject in inventoryObjectData:
for device in inventoryObject:
if (device.tag.split("}")[1]) == "me":
vendorExtensionsNames = []
vendorExtensionsValues = []
if device.find(".//{tmf854.v1}mdNm") is not None:
mdnm = device.find(".//{tmf854.v1}mdNm").text
if device.find(".//{tmf854.v1}meNm") is not None:
menm = device.find(".//{tmf854.v1}meNm").text
if device.find(".//{tmf854.v1}userLabel") is not None:
userlabel = device.find(".//{tmf854.v1}userLabel").text
if device.find(".//{tmf854.v1}resourceState") is not None:
resourcestate = device.find(".//{tmf854.v1}resourceState").text
if device.find(".//{tmf854.v1}location") is not None:
location = device.find(".//{tmf854.v1}location").text
if device.find(".//{tmf854.v1}manufacturer") is not None:
manufacturer = device.find(".//{tmf854.v1}manufacturer").text
if device.find(".//{tmf854.v1}productName") is not None:
productname = device.find(".//{tmf854.v1}productName").text
if device.find(".//{tmf854.v1}version") is not None:
version = device.find(".//{tmf854.v1}version").text
vendorExtensions = device.find("vendorExtensions")
vendorExtensionsNamesElements = vendorExtensions.findall(".//{tmf854.v1}name")
for i in vendorExtensionsNamesElements:
vendorExtensionsNames.append(i.text.strip())
vendorExtensionsValuesElements = vendorExtensions.findall(".//{tmf854.v1}value")
for i in vendorExtensionsValuesElements:
vendorExtensionsValues.append(str(i.text or "").strip())
alu = ""
for i in vendorExtensions:
if i.attrib:
if alu == "":
alu = i.attrib.get("{alu.v1}name")
else:
alu = alu + "|" + i.attrib.get("{alu.v1}name")
The issue is that The below code is not able to find the 'vendorExtensions"'. Please help here.
vendorExtensions = device.find("vendorExtensions")
Have tried the below as well
vendorExtensions = device.find(".//queryObject/vendorExtensions")
Your document declares a default namespace of alu.v1:
<queryResponse xmlns="alu.v1">
...
</queryResponse>
Any attribute without an explicit namespace is in the alu.v1 namespace. You need to qualify your attribute name appropriately:
vendorExtensions = device.find("{alu.v1}vendorExtensions")
While the above is a real problem with your code that needs to be corrected (the Wikipedia entry on XML namespaces may be useful reading if you're unfamiliar with how namespaces work), there are also some logic problems with your code.
Let's drop the big list of conditionals from the code and see if it's actually doing what we think it's doing. If we run this:
from xml.etree import ElementTree
parser = ElementTree.parse("data.xml")
root = parser.getroot()
queryObjectData = root.find(".//{alu.v1}queryObjectData")
for queryObject in queryObjectData:
for device in queryObject:
print(device.tag)
Then using your sample data (once it has been corrected to be syntactically valid), we see as output:
{alu.v1}name
{alu.v1}vendorExtensions
Your search for the {alu.v1}vendorExtensions element will never succeed before the thing on which you're trying to search (the device variable) is the thing you're trying to find.
Additionally, the conditional in your loop...
if (device.tag.split("}")[1]) == "me":
...will never match (there is no element in the entire document for which tag.split("}")[1] == "me" is True).
I'm not entirely clear what you're trying to do, but here's are some thoughts:
Given your example data, you probably don't want that for device in inventoryObject: loop
We can drastically simplify your code by replacing that long block of conditionals with a list of attributes in which we are interested and then a for loop to extract them.
Rather than assigning a bunch of individual variables, we can build up a dictionary with the data from the queryObject
That might look like:
from xml.etree import ElementTree
import json
attributeNames = [
"mdNm",
"meNm",
"userLabel",
"resourceState",
"location",
"manufacturer",
"productName",
"version",
]
parser = ElementTree.parse("data.xml")
root = parser.getroot()
queryObjectData = root.find(".//{alu.v1}queryObjectData")
for queryObject in queryObjectData:
device = {}
for name in attributeNames:
if (value := queryObject.find(f".//{{tmf854.v1}}{name}")) is not None:
device[name] = value.text
vendorExtensions = queryObject.find("{alu.v1}vendorExtensions")
extensionMap = {}
for extension in vendorExtensions.findall(".//{alu.v1}NameAndStringValue"):
extname = extension.find("{tmf854.v1}name").text
extvalue = extension.find("{tmf854.v1}value").text
extensionMap[extname] = extvalue
device["vendorExtensions"] = extensionMap
print(json.dumps(device, indent=2))
Given your example data, this outputs:
{
"mdNm": "AMS",
"meNm": "CHEERLAVANCHA_281743",
"vendorExtensions": {
"hubSubtendedStatus": "NONE",
"productAndRelease": "DF.6.1",
"adminUserName": "isadmin"
}
}
An alternate approach, in which we just transform each queryObject into a dictionary, might look like this:
from xml.etree import ElementTree
import json
def localName(ele):
return ele.tag.split("}")[1]
def etree_to_dict(t):
if list(t):
d = {}
for child in t:
if localName(child) == "NameAndStringValue":
d.update(dict([[x.text.strip() for x in child]]))
else:
d.update({localName(child): etree_to_dict(child) for child in t})
return d
else:
return t.text.strip()
parser = ElementTree.parse("data.xml")
root = parser.getroot()
queryObjectData = root.find(".//{alu.v1}queryObjectData") or []
for queryObject in queryObjectData:
d = etree_to_dict(queryObject)
print(json.dumps(d, indent=2))
This will output:
{
"name": {
"mdNm": "AMS",
"meNm": "CHEERLAVANCHA_281743",
"ptpNm": "/type=NE/CHEERLAVANCHA_281743"
},
"vendorExtensions": {
"package": {
"hubSubtendedStatus": "NONE",
"productAndRelease": "DF.6.1",
"adminUserName": "isadmin"
}
}
}
That may or may not be appropriate depending on the structure of your real data and exactly what you're trying to accomplish.
I have a JSON file where each object looks like the following example:
[
{
"timestamp": 1569177699,
"attachments": [
],
"data": [
{
"post": "\u00f0\u009f\u0096\u00a4\u00f0\u009f\u0092\u0099"
},
{
"update_timestamp": 1569177699
}
],
"title": "firstName LastName"
}
]
I want to check if, there is the key post, nested within the key data. I wrote this, but it doesn't work:
posts = json.loads(open(file).read())
for post in posts:
if 'data' in post:
if 'post' in post['data']
print post['data']['post']
Here is my solution. I see from your sample data that post["data"] is a list, so the program should iterate over it:
posts = json.loads(open(file).read())
for post in posts:
if 'data' in post:
#THIS IS THE NEW LINE to iterate list
for d in post["data"]:
if 'post' in d:
print d['post']
Try:
posts = json.loads(open(file).read())
for data in posts:
for key, value in data.items():
if key == 'data':
for item in value:
if 'post' in item:
print(key, item['post'])
Try this answer this works!
Elegant way to check if a nested key exists in a python dict
def keys_exists(element, *keys):
'''
Check if *keys (nested) exists in `element` (dict).
'''
if not isinstance(element, dict):
raise AttributeError('keys_exists() expects dict as first argument.')
if len(keys) == 0:
raise AttributeError('keys_exists() expects at least two arguments, one given.')
_element = element
for key in keys:
try:
_element = _element[key]
except KeyError:
return False
return True
You could do it generically by adapting my answer to the question How to find a particular json value by key?.
It's generic in the sense that it doesn't care much about the details of how the JSON data is structured, it just checks every dictionary it finds inside it.
import json
def find_values(id, json_file):
results = []
def _decode_dict(a_dict):
try:
results.append(a_dict[id])
except KeyError:
pass
return a_dict
json.load(json_file, object_hook=_decode_dict) # Return value ignored.
return len(results) > 0 # If there are any results, id was found.
with open('find_key_test.json', 'r') as json_file:
print(find_values('post', json_file)) # -> True
please try the following:
posts = json.loads(open(file).read())
for post in posts:
if 'data' in post:
for data in post['data']:
if 'post' in data:
print(data['post'])
I wasn't sure how to search for this but I am trying to make a script that dynamically launches programs. I will have a couple of JSON files and I want to be able to do a search replace sort of thing.
So I'll setup an example:
config.json
{
"global_vars": {
"BASEDIR": "/app",
"CONFIG_DIR": "{BASEDIR}/config",
"LOG_DIR": "{BASEDIR}/log",
"CONFIG_ARCHIVE_DIR": "{CONFIG_DIR}/archive"
}
}
Then process.json
{
"name": "Dummy_Process",
"binary": "java",
"executable": "DummyProcess-0.1.0.jar",
"launch_args": "-Dspring.config.location={CONFIG_DIR}/application.yml -Dlogging.config={CONFIG_DIR}/logback-spring.xml -jar {executable}",
"startup_log": "{LOG_DIR}/startup_{name}.out"
}
Now I want to be able to load both of these JSON objects and be able to use the values there to update. So like "CONFIG_ARCHIVE_DIR": "{CONFIG_DIR}/archive" will become CONFIG_ARCHIVE_DIR": "/app/config/archive"
Does anyone know a good way to do this recursively because I'm running into issues when I'm trying to use something like CONFIG_DIR which requires BASEDIR first.
I have this function that loads all the data:
#Recursive function, loops and loads all values into data
def _load_data(data,obj):
for i in obj.keys():
if isinstance(obj[i],str):
data[i]=obj[i]
if isinstance(obj[i],dict):
data=_load_data(data,obj[i])
return data
Then I have this function:
def _update_data(data,data_str=""):
if not data_str:
data_str=json.dumps(data)
for i in data.keys():
if isinstance(data[i],str):
data_str=data_str.replace("{"+i+"}",data[i])
if isinstance(data[i],dict):
data=_update_data(data,data_str)
return json.loads(data_str)
So this works for one level but I don't know if this is the best way to do it. It stops working when I hit a case like the CONFIG_DIR because it would need to loop over the data multiple times. First it needs to update the BASEDIR then once more to update CONFIG_DIR. suggestion welcome.
The end goal of this script is to create a start/stop/status script to manage all of our binaries. They all use different binaries to start and I want one Processes file for multiple servers. Each process will have a servers array to tell the start/stop script what to run on given server. Maybe there's something like this already out there so if there is, please point me in the direction.
I will be running on Linux and prefer to use Python. I want something smart and easy for someone else to pickup and use/modify.
I made something that works with the example files you provided. Note that I didn't handle multiple keys or non-dictionaries in the data. This function accepts a list of the dictionaries obtained after JSON parsing your input files. It uses the fact that re.sub can accept a function for the replacement value and calls that function with each match. I am sure there are plenty of improvements that could be made to this, but it should get you started at least.
def make_config(configs):
replacements = {}
def find_defs(config):
# Find leaf nodes of the dictionary.
defs = {}
for k, v in config.items():
if isinstance(v, dict):
# Nested dictionary so recurse.
defs.update(find_defs(v))
else:
defs[k] = v
return defs
for config in configs:
replacements.update(find_defs(config))
def make_replacement(m):
# Construct the replacement string.
name = m.group(0).strip('{}')
if name in replacements:
# Replace replacement strings in the replacement string.
new = re.sub('\{[^}]+\}', make_replacement, replacements[name])
# Cache result
replacements[name] = new
return new
raise Exception('Replacement string for {} not found'.format(name))
finalconfig = {}
for name, value in replacements.items():
finalconfig[name] = re.sub('\{[^}]+\}', make_replacement, value)
return finalconfig
With this input:
[
{
"global_vars": {
"BASEDIR": "/app",
"CONFIG_DIR": "{BASEDIR}/config",
"LOG_DIR": "{BASEDIR}/log",
"CONFIG_ARCHIVE_DIR": "{CONFIG_DIR}/archive"
}
},
{
"name": "Dummy_Process",
"binary": "java",
"executable": "DummyProcess-0.1.0.jar",
"launch_args": "-Dspring.config.location={CONFIG_DIR}/application.yml -Dlogging.config={CONFIG_DIR}/logback-spring.xml -jar {executable}",
"startup_log": "{LOG_DIR}/startup_{name}.out"
}
]
It gives this output:
{
'BASEDIR': '/app',
'CONFIG_ARCHIVE_DIR': '/app/config/archive',
'CONFIG_DIR': '/app/config',
'LOG_DIR': '/app/log',
'binary': 'java',
'executable': 'DummyProcess-0.1.0.jar',
'launch_args': '-Dspring.config.location=/app/config/application.yml -Dlogging.config=/app/config/logback-spring.xml -jar DummyProcess-0.1.0.jar',
'name': 'Dummy_Process',
'startup_log': '/app/log/startup_Dummy_Process.out'
}
As an alternative to the answer by #FamousJameous and if you don't mind changing to ini format, you can also use the python built-in configparser which already has support to expand variables.
I implemented a solution with a class (Config) with a couple of functions:
_load: simply convert from JSON to a Python object;
_extract_params: loop over the document (output of _load) and add them to a class object (self.params);
_loop: loop over the object returned from _extract_params and, if the values contains any {param}, call the _transform method;
_transform: replace the {param} in the values with the correct values, if there is any '{' in the value linked to the param that needs to be replaced, call again the function
I hope I was clear enough, here is the code:
import json
import re
config = """{
"global_vars": {
"BASEDIR": "/app",
"CONFIG_DIR": "{BASEDIR}/config",
"LOG_DIR": "{BASEDIR}/log",
"CONFIG_ARCHIVE_DIR": "{CONFIG_DIR}/archive"
}
}"""
process = """{
"name": "Dummy_Process",
"binary": "java",
"executable": "DummyProcess-0.1.0.jar",
"launch_args": "-Dspring.config.location={CONFIG_DIR}/application.yml -Dlogging.config={CONFIG_DIR}/logback-spring.xml -jar {executable}",
"startup_log": "{LOG_DIR}/startup_{name}.out"
}
"""
class Config(object):
def __init__(self, documents):
self.documents = documents
self.params = {}
self.output = {}
# Loads JSON to dictionary
def _load(self, document):
obj = json.loads(document)
return obj
# Extracts the config parameters in a dictionary
def _extract_params(self, document):
for k, v in document.items():
if isinstance(v, dict):
# Recursion for inner dictionaries
self._extract_params(v)
else:
# if not a dict set params[k] as v
self.params[k] = v
return self.params
# Loop on the configs dictionary
def _loop(self, params):
for key, value in params.items():
# if there is any parameter inside the value
if len(re.findall(r'{([^}]*)\}', value)) > 0:
findings = re.findall(r'{([^}]*)\}', value)
# call the transform function
self._transform(params, key, findings)
return self.output
# Replace all the findings with the correct value
def _transform(self, object, key, findings):
# Iterate over the found params
for finding in findings:
# if { -> recursion to set all the needed values right
if '{' in object[finding]:
self._transform(object, finding, re.findall(r'{([^}]*)\}', object[finding]))
# Do de actual replace
object[key] = object[key].replace('{'+finding+'}', object[finding])
self.output = object
return self.output
# Entry point
def process_document(self):
params = {}
# _load the documents and extract the params
for document in self.documents:
params.update(self._extract_params(self._load(document)))
# _loop over the params
return self._loop(params)
# return self.output
if __name__ == '__main__':
config = Config([config, process])
print(config.process_document())
I am sure there are many other better ways to reach your goal, but I still hope this can bu useful to you.
consider the sample JSON below.
{
"widget": {
"test": "on",
"window": {
"title": "myWidget1",
"name": "main_window"
},
"image": {
"src": "Images/wid1.png",
"name": "wid1"
}
},
"os":{
"name": "ios"
}
}
Consider the case where we dont know the structure of the JSON and any of the keys. What I need to implement is a python function which iterates through all the keys and sub-keys and prints the key. That is by only knowing the JSON file name, I should be able to iterate the entire keys and sub-keys. The JSON can be of any structure.What I have tried is given below.
JSON_PATH = "D:\workspace\python\sampleJSON.json"
os.path.expanduser(JSON_PATH)
def iterateAllKeys(e):
for key in e.iterkeys():
print key
for child in key.get(key):
iterateAllKeys(child)
with open(JSON_PATH) as data_file:
data = json.load(data_file)
iterateAllKeys(data)
Here, the iterateAllKeys() function is supposed to print all the keys present in the JSON file. But if only the outer loop is present, ie
def iterateAllKeys(e):
for key in e.iterkeys():
print key
It will print the keys "widget" and "os". But,
def iterateAllKeys(e):
for key in e.iterkeys():
print key
for child in key.get(key):
iterateAllKeys(child)
returns an error - AttributeError: 'unicode' object has no attribute 'get'. My understanding is - since the value of 'child' is not a dict object, we cannot apply the 'key.get()'. But is there any alternate way by which I can iterate the JSON file without specifying any of the key names. Thank you.
You can use recursion to iterate through multi level dictionaries like this:
def iter_dict(dic):
for key in dic:
print(key)
if isinstance(dic[key], dict):
iter_dict(dic[key])
The keys of the first dictionary are iterated and every key is printed, if the item is an instance of dict class, we can use recursion to also iterate through the dictionaries we encounter as items.
You can do this thru auxiliary package like flatten_json.
pip install flatten_json
from flatten_json import flatten
for key in flatten(your_dict).keys():
print(key)
Output:
widget_test
widget_window_title
widget_window_name
widget_image_src
widget_image_name
os_name
If you want to show only key without whole path then you can do like that:
print(key.split('_')[-1])
First of all your last function:
def iterateAllKeys(e):
for key in e.iterkeys():
print key
for child in key.get(key):
iterateAllKeys(child)
key is just the key_value of the dictionary. So if anything you should be using e.get(key) or e[key].
for child in e.get(key):
Now this would not solve your problem, one work-around is using try except, as follows:
def iterateAllKeys(e):
for key in e.iterkeys():
print key
try:
iterateAllKeys(e[key])
except:
print "---SKIP---"
This is maybe not the best work-around, but it certainly works.
With your Data it prints the following:
widget
test
---SKIP---
window
name
---SKIP---
title
---SKIP---
image
src
---SKIP---
name
---SKIP---
os
name
---SKIP---
I have a flask application which is receiving a request from dataTables Editor. Upon receipt at the server, request.form looks like (e.g.)
ImmutableMultiDict([('data[59282][gender]', u'M'), ('data[59282][hometown]', u''),
('data[59282][disposition]', u''), ('data[59282][id]', u'59282'),
('data[59282][resultname]', u'Joe Doe'), ('data[59282][confirm]', 'true'),
('data[59282][age]', u'27'), ('data[59282][place]', u'3'), ('action', u'remove'),
('data[59282][runnerid]', u''), ('data[59282][time]', u'29:49'),
('data[59282][club]', u'')])
I am thinking to use something similar to this really ugly code to decode it. Is there a better way?
from collections import defaultdict
# request.form comes in multidict [('data[id][field]',value), ...]
# so we need to exec this string to turn into python data structure
data = defaultdict(lambda: {}) # default is empty dict
# need to define text for each field to be received in data[id][field]
age = 'age'
club = 'club'
confirm = 'confirm'
disposition = 'disposition'
gender = 'gender'
hometown = 'hometown'
id = 'id'
place = 'place'
resultname = 'resultname'
runnerid = 'runnerid'
time = 'time'
# fill in data[id][field] = value
for formkey in request.form.keys():
exec '{} = {}'.format(d,repr(request.form[formkey]))
This question has an accepted answer and is a bit old but since the DataTable module seems being pretty popular among jQuery community still, I believe this approach may be useful for someone else. I've just wrote a simple parsing function based on regular expression and dpath module, though it appears not to be quite reliable module. The snippet may be not very straightforward due to an exception-relied fragment, but it was only one way to prevent dpath from trying to resolve strings as integer indices I found.
import re, dpath.util
rxsKey = r'(?P<key>[^\W\[\]]+)'
rxsEntry = r'(?P<primaryKey>[^\W]+)(?P<secondaryKeys>(\[' \
+ rxsKey \
+ r'\])*)\W*'
rxKey = re.compile(rxsKey)
rxEntry = re.compile(rxsEntry)
def form2dict( frmDct ):
res = {}
for k, v in frmDct.iteritems():
m = rxEntry.match( k )
if not m: continue
mdct = m.groupdict()
if not 'secondaryKeys' in mdct.keys():
res[mdct['primaryKey']] = v
else:
fullPath = [mdct['primaryKey']]
for sk in re.finditer( rxKey, mdct['secondaryKeys'] ):
k = sk.groupdict()['key']
try:
dpath.util.get(res, fullPath)
except KeyError:
dpath.util.new(res, fullPath, [] if k.isdigit() else {})
fullPath.append(int(k) if k.isdigit() else k)
dpath.util.new(res, fullPath, v)
return res
The practical usage is based on native flask request.form.to_dict() method:
# ... somewhere in a view code
pars = form2dict(request.form.to_dict())
The output structure includes both, dictionary and lists, as one could expect. E.g.:
# A little test:
rs = jQDT_form2dict( {
'columns[2][search][regex]' : False,
'columns[2][search][value]' : None,
'columns[2][search][regex]' : False,
} )
generates:
{
"columns": [
null,
null,
{
"search": {
"regex": false,
"value": null
}
}
]
}
Update: to handle lists as dictionaries (in more efficient way) one may simplify this snippet with following block at else part of if clause:
# ...
else:
fullPathStr = mdct['primaryKey']
for sk in re.finditer( rxKey, mdct['secondaryKeys'] ):
fullPathStr += '/' + sk.groupdict()['key']
dpath.util.new(res, fullPathStr, v)
I decided on a way that is more secure than using exec:
from collections import defaultdict
def get_request_data(form):
'''
return dict list with data from request.form
:param form: MultiDict from `request.form`
:rtype: {id1: {field1:val1, ...}, ...} [fieldn and valn are strings]
'''
# request.form comes in multidict [('data[id][field]',value), ...]
# fill in id field automatically
data = defaultdict(lambda: {})
# fill in data[id][field] = value
for formkey in form.keys():
if formkey == 'action': continue
datapart,idpart,fieldpart = formkey.split('[')
if datapart != 'data': raise ParameterError, "invalid input in request: {}".format(formkey)
idvalue = int(idpart[0:-1])
fieldname = fieldpart[0:-1]
data[idvalue][fieldname] = form[formkey]
# return decoded result
return data