Related
Want to convert Sample JSON data into CSV file using python. I am retrieving JSON data from API.
As my JSON has nested objects, so it normally cannot be directly converted to CSV.I don't want to do any hard coding and I want to make a python code fully dynamic.
So, I have written a function that flatten my JSON Data but I am not able to work out how to iterate all records, finding relevant column names and then output those data into CSV.
In the Sample JSON file I have mentioned only 2 records but in actual there are 100 records.
Sample JSON Look like this:
[
{
"id":"Random_Company_57",
"unid":"75",
"fieldsToValues":{
"Email":"None",
"occupation":"SO1 Change",
"manager":"None",
"First Name":"Bells",
"employeeID":"21011.0",
"loginRequired":"true",
"superUser":"false",
"ldapSuperUser":"false",
"archived":"true",
"password":"None",
"externalUser":"false",
"Username":"Random_Company_57",
"affiliation":"",
"Phone":"+16 22 22 222",
"unidDominoKey":"",
"externalUserActive":"false",
"secondaryOccupation":"SO1 Change",
"retypePassword":"None",
"Last Name":"Christmas"
},
"hierarchyFieldAccess":[
],
"userHierarchies":[
{
"hierarchyField":"Company",
"value":"ABC Company"
},
{
"hierarchyField":"Department",
"value":"gfds"
},
{
"hierarchyField":"Project",
"value":"JKL-SDFGHJW"
},
{
"hierarchyField":"Division",
"value":"Silver RC"
},
{
"hierarchyField":"Site",
"value":"SQ06"
}
],
"locale":{
"id":1,
"dateFormat":"dd/MM/yyyy",
"languageTag":"en-UA"
},
"roles":[
"User"
],
"readAccessRoles":[
],
"preferredLanguage":"en-AU",
"prefName":"Christmas Bells",
"startDate":"None",
"firstName":"Bells",
"lastName":"Christmas",
"fullName":"Christmas Bells",
"lastModified":"2022-02-22T03:47:41.632Z",
"email":"None",
"docNo":"None",
"virtualSuperUser":false
},
{
"id":"xyz.abc#safe.net",
"unid":"98",
"fieldsToValues":{
"Email":"xyz.abc#safe.net",
"occupation":"SO1 Change",
"manager":"None",
"First Name":"Bells",
"employeeID":"21011.0",
"loginRequired":"false",
"superUser":"false",
"ldapSuperUser":"false",
"archived":"false",
"password":"None",
"externalUser":"false",
"Username":"xyz.abc#safe.net",
"affiliation":"",
"Phone":"+16 2222 222 222",
"unidDominoKey":"",
"externalUserActive":"false",
"secondaryOccupation":"SO1 Change",
"retypePassword":"None",
"Last Name":"Christmas"
},
"hierarchyFieldAccess":[
],
"userHierarchies":[
{
"hierarchyField":"Company",
"value":"ABC Company"
},
{
"hierarchyField":"Department",
"value":"PUHJ"
},
{
"hierarchyField":"Project",
"value":"RPOJ-SDFGHJW"
},
{
"hierarchyField":"Division",
"value":"Silver RC"
},
{
"hierarchyField":"Site",
"value":"SQ06"
}
],
"locale":{
"id":1,
"dateFormat":"dd/MM/yyyy",
"languageTag":"en-UA"
},
"roles":[
"User"
],
"readAccessRoles":[
],
"preferredLanguage":"en-AU",
"prefName":"Christmas Bells",
"startDate":"None",
"firstName":"Bells",
"lastName":"Christmas",
"fullName":"Christmas Bells",
"lastModified":"2022-03-16T05:04:13.085Z",
"email":"xyz.abc#safe.net",
"docNo":"None",
"virtualSuperUser":false
}
]
What I have tried.
def flattenjson(b, delim):
val = {}
for i in b.keys():
if isinstance(b[i], dict):
get = flattenjson(b[i], delim)
for j in get.keys():
val[i + delim + j] = get[j]
else:
val[i] = b[i]
print(val)
return val
json=[{Sample JSON String that mentioned above}]
flattenjson(json,"__")
I don't know it is a right way to deal this problem or not?
My final aim is that all the above json data will output in a csv file.
Based on this answer, you could loop through your list of json data and flatten each json with the given function (they always have the same structure?), then build a DataFrame and write the data to csv. That's the easiest way I can think of,
try this:
import pandas as pd
import json
import collections
def flatten(dictionary, parent_key=False, separator='__'):
items = []
for key, value in dictionary.items():
new_key = str(parent_key) + separator + key if parent_key else key
if isinstance(value, collections.MutableMapping):
items.extend(flatten(value, new_key, separator).items())
elif isinstance(value, list):
for k, v in enumerate(value):
items.extend(flatten({str(k): v}, new_key).items())
else:
items.append((new_key, value))
return dict(items)
with open('your_json.json') as f:
data = json.load(f) # data is a the example you provided (list of dicts)
all_records=[]
for jsn in data:
tmp = flatten(jsn)
all_records.append(tmp)
df = pd.DataFrame(all_records)
out = df.to_csv('json_to_csv.csv')
I have the below Json file which I need to query to get the values of the keys inside 'validations' in a list
for example the column_values_not_null output will need to be this:
['lu_name', 'transaction_amount']
"validation_file_name": "ctm",
"connection_type": "s3",
"low_threshold": 500000,
"high_threshold": 1000000,
"frequency": "weekly",
"validations": [
{
"columns_to_match_ordered_list" :[
"lu_name",
"site_name",
"transaction_date_time",
"margin",
"transaction_currency_code",
"reversal_indicator_description",
"reversal_amount",
"original_amount"
]
},
{
"column_values_not_null":[
"lu_name",
"transaction_amount"
]
},
{
"column_values_not_duplicate": [
"lu_name",
"response_code_description"
]
}
]
I am able to do the below but I need to do this without using the index value
f = open('test.json')
json_content = json.load(f)
print(json_content['validations'][1]['column_values_not_null'])
Get a list by querying the validations key. The sum( ,[]) are used to flat the list (as required by the condition "without using the index value" if got it right), for details about it with pros and cons see doc.
data = #
def validations(data: dict, key_query: str) -> list:
for k, v in data.items():
if k == 'validations':
return sum(sum([list(d.values()) for d in v if key_query in d], []), [])
print(validations(data, query='column_values_not_null'))
# ['lu_name', 'transaction_amount']
I am using python2.7
I have a json i pull that is always changing when i request it.
I need to pull out Animal_Target_DisplayName under Term7 Under Relation6 in my dict.
The problem is sometimes the object Relation6 is in another part of the Json, it could be leveled deeper or in another order.
I am trying to create code that can just export the values of the key Animal_Target_DisplayName but nothing is working. It wont even loop down the nested dict.
Now this can work if i just pull it out using something like ['view']['Term0'][0]['Relation6'] but remember the JSON is never returned in the same structure.
Code i am using to get the values of the key Animal_Target_DisplayName but it doesnt seem to loop through my dict and find all the values with that key.
array = []
for d in dict.values():
row = d['Animal_Target_DisplayName']
array.append(row)
JSON Below:
dict = {
"view":{
"Term0":[
{
"Id":"b0987b91-af12-4fe3-a56f-152ac7a4d84d",
"DisplayName":"Dog",
"FullName":"Dog",
"AssetType1":[
{
"AssetType_Id":"00000000-0000-0000-0000-000000031131",
}
]
},
{
"Id":"ee74a59d-fb74-4052-97ba-9752154f015d",
"DisplayName":"Dog2",
"FullName":"Dog",
"AssetType1":[
{
"AssetType_Id":"00000000-0000-0000-0000-000000031131",
}
]
},
{
"Id":"eb548eae-da6f-41e8-80ea-7e9984f56af6",
"DisplayName":"Dog3",
"FullName":"Dog3",
"AssetType1":[
{
"AssetType_Id":"00000000-0000-0000-0000-000000031131",
}
]
},
{
"Id":"cfac6dd4-0efa-4417-a2bf-0333204f8a42",
"DisplayName":"Animal Set",
"FullName":"Animal Set",
"AssetType1":[
{
"AssetType_Id":"00000000-0000-0000-0001-000400000001",
}
],
"StringAttribute2":[
{
"StringAttribute_00000000-0000-0000-0000-000000003114_Id":"00a701a8-be4c-4b76-a6e5-3b0a4085bcc8",
"StringAttribute_00000000-0000-0000-0000-000000003114_Value":"Desc"
}
],
"StringAttribute3":[
{
"StringAttribute_00000000-0000-0000-0000-000000000262_Id":"a81adfb4-7528-4673-8c95-953888f3b43a",
"StringAttribute_00000000-0000-0000-0000-000000000262_Value":"meow"
}
],
"BooleanAttribute4":[
{
"BooleanAttribute_00000000-0000-0000-0001-000500000001_Id":"932c5f97-c03f-4a1a-a0c5-a518f5edef5e",
"BooleanAttribute_00000000-0000-0000-0001-000500000001_Value":"true"
}
],
"SingleValueListAttribute5":[
{
"SingleValueListAttribute_00000000-0000-0000-0001-000500000031_Id":"ef51dedd-6f25-4408-99a6-5a6cfa13e198",
"SingleValueListAttribute_00000000-0000-0000-0001-000500000031_Value":"Blah"
}
],
"Relation6":[
{
"Animal_Id":"2715ca09-3ced-4b74-a418-cef4a95dddf1",
"Term7":[
{
"Animal_Target_Id":"88fd0090-4ea8-4ae6-b7f0-1b13e5cf3d74",
"Animal_Target_DisplayName":"Animaltheater",
"Animal_Target_FullName":"Animaltheater"
}
]
},
{
"Animal_Id":"6068fe78-fc8e-4542-9aee-7b4b68760dcd",
"Term7":[
{
"Animal_Target_Id":"4e87a614-2a8b-46c0-90f3-8a0cf9bda66c",
"Animal_Target_DisplayName":"Animaltitle",
"Animal_Target_FullName":"Animaltitle"
}
]
},
{
"Animal_Id":"754ec0e6-19b6-4b6b-8ba1-573393268257",
"Term7":[
{
"Animal_Target_Id":"a8986ed5-3ec8-44f3-954c-71cacb280ace",
"Animal_Target_DisplayName":"Animalcustomer",
"Animal_Target_FullName":"Animalcustomer"
}
]
},
{
"Animal_Id":"86b3ffd1-4d54-4a98-b25b-369060651bd6",
"Term7":[
{
"Animal_Target_Id":"89d02067-ebe8-4b87-9a1f-a6a0bdd40ec4",
"Animal_Target_DisplayName":"Animalfact_transaction",
"Animal_Target_FullName":"Animalfact_transaction"
}
]
},
{
"Animal_Id":"ea2e1b76-f8bc-46d9-8ebc-44ffdd60f213",
"Term7":[
{
"Animal_Target_Id":"e398cd32-1e73-46bd-8b8f-d039986d6de0",
"Animal_Target_DisplayName":"Animalfact_transaction",
"Animal_Target_FullName":"Animalfact_transaction"
}
]
}
],
"Relation10":[
{
"TargetRelation_b8b178ff-e957-47db-a4e7-6e5b789d6f03_Id":"aff80bd0-a282-4cf5-bdcc-2bad35ddec1d",
"Term11":[
{
"AnimalId":"3ac22167-eb91-469a-9d94-315aa301f55a",
"AnimalDisplayName":"Animal",
"AnimalFullName":"Animal"
}
]
}
],
"Tag12":[
{
"Tag_Id":"75968ea6-4c9f-43c9-80f7-dfc41b24ec8f",
"Tag_Name":"AnimalAnimaltitle"
},
{
"Tag_Id":"b1adbc00-aeef-415b-82b6-a3159145c60d",
"Tag_Name":"Animal2"
},
{
"Tag_Id":"5f78e4dc-2b37-41e0-a0d3-cec773af2397",
"Tag_Name":"AnimalDisplayName"
}
]
}
]
}
}
The output i am trying to get is a list of all the values from key Animal_Target_DisplayName like this ['Animaltheater','Animaltitle', 'Animalcustomer', 'Animalfact_transaction', 'Animalfact_transaction'] but we need to remember the nested structure of this json always changes but the keys for it are always the same.
I guess your only option is running through the entire dict and get the values of Animal_Target_DisplayName key, I propose the following recursive solution:
def run_json(dict_):
animal_target_sons = []
if type(dict_) is list:
for element in dict_:
animal_target_sons.append(run_json(element))
elif type(dict_) is dict:
for key in dict_:
if key=="Animal_Target_DisplayName":
animal_target_sons.append([dict_[key]])
else:
animal_target_sons.append(run_json(dict_[key]))
return [x for sublist in animal_target_sons for x in sublist]
run_json(dict_)
Then calling run_json returns a list with what you want. By the way, I recommend you to rename your json from dict to, for example dict_, since dict is a reserved word of Python for the dictionary type.
Since you're getting JSON, why not make use of the json module? That will do the parsing for you and allow you to use dictionary functions+features to get the information you need.
#!/usr/bin/python2.7
from __future__ import print_function
import json
# _somehow_ get your JSON in as a string. I'm calling it "jstr" for this
# example.
# Use the module to parse it
jdict = json.loads(jstr)
# our dict has keys...
# view -> Term0 -> keys-we're-interested-in
templist = jdict["view"]["Term0"]
results = {}
for _el in range(len(templist)):
if templist[_el]["FullName"] == "Animal Set":
# this is the one we're interested in - and it's another list
moretemp = templist[_el]["Relation6"]
for _k in range(len(moretemp)):
term7 = moretemp[_k]["Term7"][0]
displayName = term7["Animal_Target_DisplayName"]
fullName = term7["Animal_Target_FullName"]
results[fullName] = displayName
print("{0}".format(results))
Then you can dump the results dict plain, or with pretty-printing:
>>> print(json.dumps(results, indent=4))
{
"Animaltitle2": "Animaltitle2",
"Animalcustomer3": "Animalcustomer3",
"Animalfact_transaction4": "Animalfact_transaction4",
"Animaltheater1": "Animaltheater1"
}
With given script I am able to get output as I showed in a screenshot,
but there is a column named as cve.description.description_data which is again in json format. I want to extract that data as well.
import json
import pandas as pd
from pandas.io.json import json_normalize
#load json object
with open('nvdcve-1.0-modified.json') as f:
d = json.load(f)
#tells us parent node is 'programs'
nycphil = json_normalize(d['CVE_Items'])
nycphil.head(3)
works_data = json_normalize(data=d['CVE_Items'], record_path='cve')
works_data.head(3)
nycphil.to_csv("test4.csv")
If I change works_data = json_normalize(data=d['CVE_Items'], record_path='cve.descr') it gives this error:
"result = result[spec] KeyError: 'cve.description'"
JSON format as follows:
{
"CVE_data_type":"CVE",
"CVE_data_format":"MITRE",
"CVE_data_version":"4.0",
"CVE_data_numberOfCVEs":"1000",
"CVE_data_timestamp":"2018-04-04T00:00Z",
"CVE_Items":[
{
"cve":{
"data_type":"CVE",
"data_format":"MITRE",
"data_version":"4.0",
"CVE_data_meta":{
"ID":"CVE-2001-1594",
"ASSIGNER":"cve#mitre.org"
},
"affects":{
"vendor":{
"vendor_data":[
{
"vendor_name":"gehealthcare",
"product":{
"product_data":[
{
"product_name":"entegra_p&r",
"version":{
"version_data":[
{
"version_value":"*"
}
]
}
}
]
}
}
]
}
},
"problemtype":{
"problemtype_data":[
{
"description":[
{
"lang":"en",
"value":"CWE-255"
}
]
}
]
},
"references":{
"reference_data":[
{
"url":"http://apps.gehealthcare.com/servlet/ClientServlet/2263784.pdf?DOCCLASS=A&REQ=RAC&DIRECTION=2263784-100&FILENAME=2263784.pdf&FILEREV=5&DOCREV_ORG=5&SUBMIT=+ ACCEPT+"
},
{
"url":"http://www.forbes.com/sites/thomasbrewster/2015/07/10/vulnerable- "
},
{
"url":"https://ics-cert.us-cert.gov/advisories/ICSMA-18-037-02"
},
{
"url":"https://twitter.com/digitalbond/status/619250429751222277"
}
]
},
"description":{
"description_data":[
{
"lang":"en",
"value":"GE Healthcare eNTEGRA P&R has a password of (1) value."
}
]
}
},
"configurations":{
"CVE_data_version":"4.0",
"nodes":[
{
"operator":"OR",
"cpe":[
{
"vulnerable":true,
"cpe22Uri":"cpe:/a:gehealthcare:entegra_p%26r",
"cpe23Uri":"cpe:2.3:a:gehealthcare:entegra_p\\&r:*:*:*:*:*:*:*:*"
}
]
}
]
},
"impact":{
"baseMetricV2":{
"cvssV2":{
"version":"2.0",
"vectorString":"(AV:N/AC:L/Au:N/C:C/I:C/A:C)",
"accessVector":"NETWORK",
"accessComplexity":"LOW",
"authentication":"NONE",
"confidentialityImpact":"COMPLETE",
"integrityImpact":"COMPLETE",
"availabilityImpact":"COMPLETE",
"baseScore":10.0
},
"severity":"HIGH",
"exploitabilityScore":10.0,
"impactScore":10.0,
"obtainAllPrivilege":false,
"obtainUserPrivilege":false,
"obtainOtherPrivilege":false,
"userInteractionRequired":false
}
},
"publishedDate":"2015-08-04T14:59Z",
"lastModifiedDate":"2018-03-28T01:29Z"
}
]
}
I want to flatten all data.
Assuming the multiple URLs delineate between rows and all else meta data repeats, consider a recursive function call to extract every key-value pair in nested json object, d.
The recursive function will call global to update the needed global objects to be binded into a list of dictionaries for pd.DataFrame() call. Last loop at end updates the recursive function's dictionary, inner, to integrate the different urls (stored in multi)
import json
import pandas as pd
# load json object
with open('nvdcve-1.0-modified.json') as f:
d = json.load(f)
multi = []; inner = {}
def recursive_extract(i):
global multi, inner
if type(i) is list:
if len(i) == 1:
for k,v in i[0].items():
if type(v) in [list, dict]:
recursive_extract(v)
else:
inner[k] = v
else:
multi = i
if type(i) is dict:
for k,v in i.items():
if type(v) in [list, dict]:
recursive_extract(v)
else:
inner[k] = v
recursive_extract(d['CVE_Items'])
data_dict = []
for i in multi:
tmp = inner.copy()
tmp.update(i)
data_dict.append(tmp)
df = pd.DataFrame(data_dict)
df.to_csv('Output.csv')
Output (all columns the same except for URL, widened for emphasis)
I've seen a fair share of ungainly XML->JSON code on the web, and having interacted with Stack's users for a bit, I'm convinced that this crowd can help more than the first few pages of Google results can.
So, we're parsing a weather feed, and we need to populate weather widgets on a multitude of web sites. We're looking now into Python-based solutions.
This public weather.com RSS feed is a good example of what we'd be parsing (our actual weather.com feed contains additional information because of a partnership w/them).
In a nutshell, how should we convert XML to JSON using Python?
xmltodict (full disclosure: I wrote it) can help you convert your XML to a dict+list+string structure, following this "standard". It is Expat-based, so it's very fast and doesn't need to load the whole XML tree in memory.
Once you have that data structure, you can serialize it to JSON:
import xmltodict, json
o = xmltodict.parse('<e> <a>text</a> <a>text</a> </e>')
json.dumps(o) # '{"e": {"a": ["text", "text"]}}'
There is no "one-to-one" mapping between XML and JSON, so converting one to the other necessarily requires some understanding of what you want to do with the results.
That being said, Python's standard library has several modules for parsing XML (including DOM, SAX, and ElementTree). As of Python 2.6, support for converting Python data structures to and from JSON is included in the json module.
So the infrastructure is there.
You can use the xmljson library to convert using different XML JSON conventions.
For example, this XML:
<p id="1">text</p>
translates via the BadgerFish convention into this:
{
'p': {
'#id': 1,
'$': 'text'
}
}
and via the GData convention into this (attributes are not supported):
{
'p': {
'$t': 'text'
}
}
... and via the Parker convention into this (attributes are not supported):
{
'p': 'text'
}
It's possible to convert from XML to JSON and from JSON to XML using the same
conventions:
>>> import json, xmljson
>>> from lxml.etree import fromstring, tostring
>>> xml = fromstring('<p id="1">text</p>')
>>> json.dumps(xmljson.badgerfish.data(xml))
'{"p": {"#id": 1, "$": "text"}}'
>>> xmljson.parker.etree({'ul': {'li': [1, 2]}})
# Creates [<ul><li>1</li><li>2</li></ul>]
Disclosure: I wrote this library. Hope it helps future searchers.
To anyone that may still need this. Here's a newer, simple code to do this conversion.
from xml.etree import ElementTree as ET
xml = ET.parse('FILE_NAME.xml')
parsed = parseXmlToJson(xml)
def parseXmlToJson(xml):
response = {}
for child in list(xml):
if len(list(child)) > 0:
response[child.tag] = parseXmlToJson(child)
else:
response[child.tag] = child.text or ''
# one-liner equivalent
# response[child.tag] = parseXmlToJson(child) if len(list(child)) > 0 else child.text or ''
return response
If some time you get only response code instead of all data then error like json parse will be there so u need to convert it as text
import xmltodict
data = requests.get(url)
xpars = xmltodict.parse(data.text)
json = json.dumps(xpars)
print json
Here's the code I built for that. There's no parsing of the contents, just plain conversion.
from xml.dom import minidom
import simplejson as json
def parse_element(element):
dict_data = dict()
if element.nodeType == element.TEXT_NODE:
dict_data['data'] = element.data
if element.nodeType not in [element.TEXT_NODE, element.DOCUMENT_NODE,
element.DOCUMENT_TYPE_NODE]:
for item in element.attributes.items():
dict_data[item[0]] = item[1]
if element.nodeType not in [element.TEXT_NODE, element.DOCUMENT_TYPE_NODE]:
for child in element.childNodes:
child_name, child_dict = parse_element(child)
if child_name in dict_data:
try:
dict_data[child_name].append(child_dict)
except AttributeError:
dict_data[child_name] = [dict_data[child_name], child_dict]
else:
dict_data[child_name] = child_dict
return element.nodeName, dict_data
if __name__ == '__main__':
dom = minidom.parse('data.xml')
f = open('data.json', 'w')
f.write(json.dumps(parse_element(dom), sort_keys=True, indent=4))
f.close()
There is a method to transport XML-based markup as JSON which allows it to be losslessly converted back to its original form. See http://jsonml.org/.
It's a kind of XSLT of JSON. I hope you find it helpful
I'd suggest not going for a direct conversion. Convert XML to an object, then from the object to JSON.
In my opinion, this gives a cleaner definition of how the XML and JSON correspond.
It takes time to get right and you may even write tools to help you with generating some of it, but it would look roughly like this:
class Channel:
def __init__(self)
self.items = []
self.title = ""
def from_xml( self, xml_node ):
self.title = xml_node.xpath("title/text()")[0]
for x in xml_node.xpath("item"):
item = Item()
item.from_xml( x )
self.items.append( item )
def to_json( self ):
retval = {}
retval['title'] = title
retval['items'] = []
for x in items:
retval.append( x.to_json() )
return retval
class Item:
def __init__(self):
...
def from_xml( self, xml_node ):
...
def to_json( self ):
...
You may want to have a look at http://designtheory.org/library/extrep/designdb-1.0.pdf. This project starts off with an XML to JSON conversion of a large library of XML files. There was much research done in the conversion, and the most simple intuitive XML -> JSON mapping was produced (it is described early in the document). In summary, convert everything to a JSON object, and put repeating blocks as a list of objects.
objects meaning key/value pairs (dictionary in Python, hashmap in Java, object in JavaScript)
There is no mapping back to XML to get an identical document, the reason is, it is unknown whether a key/value pair was an attribute or an <key>value</key>, therefore that information is lost.
If you ask me, attributes are a hack to start; then again they worked well for HTML.
Well, probably the simplest way is just parse the XML into dictionaries and then serialize that with simplejson.
When I do anything with XML in python I almost always use the lxml package. I suspect that most people use lxml. You could use xmltodict but you will have to pay the penalty of parsing the XML again.
To convert XML to json with lxml you:
Parse XML document with lxml
Convert lxml to a dict
Convert list to json
I use the following class in my projects. Use the toJson method.
from lxml import etree
import json
class Element:
'''
Wrapper on the etree.Element class. Extends functionality to output element
as a dictionary.
'''
def __init__(self, element):
'''
:param: element a normal etree.Element instance
'''
self.element = element
def toDict(self):
'''
Returns the element as a dictionary. This includes all child elements.
'''
rval = {
self.element.tag: {
'attributes': dict(self.element.items()),
},
}
for child in self.element:
rval[self.element.tag].update(Element(child).toDict())
return rval
class XmlDocument:
'''
Wraps lxml to provide:
- cleaner access to some common lxml.etree functions
- converter from XML to dict
- converter from XML to json
'''
def __init__(self, xml = '<empty/>', filename=None):
'''
There are two ways to initialize the XmlDocument contents:
- String
- File
You don't have to initialize the XmlDocument during instantiation
though. You can do it later with the 'set' method. If you choose to
initialize later XmlDocument will be initialized with "<empty/>".
:param: xml Set this argument if you want to parse from a string.
:param: filename Set this argument if you want to parse from a file.
'''
self.set(xml, filename)
def set(self, xml=None, filename=None):
'''
Use this to set or reset the contents of the XmlDocument.
:param: xml Set this argument if you want to parse from a string.
:param: filename Set this argument if you want to parse from a file.
'''
if filename is not None:
self.tree = etree.parse(filename)
self.root = self.tree.getroot()
else:
self.root = etree.fromstring(xml)
self.tree = etree.ElementTree(self.root)
def dump(self):
etree.dump(self.root)
def getXml(self):
'''
return document as a string
'''
return etree.tostring(self.root)
def xpath(self, xpath):
'''
Return elements that match the given xpath.
:param: xpath
'''
return self.tree.xpath(xpath);
def nodes(self):
'''
Return all elements
'''
return self.root.iter('*')
def toDict(self):
'''
Convert to a python dictionary
'''
return Element(self.root).toDict()
def toJson(self, indent=None):
'''
Convert to JSON
'''
return json.dumps(self.toDict(), indent=indent)
if __name__ == "__main__":
xml='''<system>
<product>
<demod>
<frequency value='2.215' units='MHz'>
<blah value='1'/>
</frequency>
</demod>
</product>
</system>
'''
doc = XmlDocument(xml)
print doc.toJson(indent=4)
The output from the built in main is:
{
"system": {
"attributes": {},
"product": {
"attributes": {},
"demod": {
"attributes": {},
"frequency": {
"attributes": {
"units": "MHz",
"value": "2.215"
},
"blah": {
"attributes": {
"value": "1"
}
}
}
}
}
}
}
Which is a transformation of this xml:
<system>
<product>
<demod>
<frequency value='2.215' units='MHz'>
<blah value='1'/>
</frequency>
</demod>
</product>
</system>
I found for simple XML snips, use regular expression would save troubles. For example:
# <user><name>Happy Man</name>...</user>
import re
names = re.findall(r'<name>(\w+)<\/name>', xml_string)
# do some thing to names
To do it by XML parsing, as #Dan said, there is not one-for-all solution because the data is different. My suggestion is to use lxml. Although not finished to json, lxml.objectify give quiet good results:
>>> from lxml import objectify
>>> root = objectify.fromstring("""
... <root xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
... <a attr1="foo" attr2="bar">1</a>
... <a>1.2</a>
... <b>1</b>
... <b>true</b>
... <c>what?</c>
... <d xsi:nil="true"/>
... </root>
... """)
>>> print(str(root))
root = None [ObjectifiedElement]
a = 1 [IntElement]
* attr1 = 'foo'
* attr2 = 'bar'
a = 1.2 [FloatElement]
b = 1 [IntElement]
b = True [BoolElement]
c = 'what?' [StringElement]
d = None [NoneElement]
* xsi:nil = 'true'
While the built-in libs for XML parsing are quite good I am partial to lxml.
But for parsing RSS feeds, I'd recommend Universal Feed Parser, which can also parse Atom.
Its main advantage is that it can digest even most malformed feeds.
Python 2.6 already includes a JSON parser, but a newer version with improved speed is available as simplejson.
With these tools building your app shouldn't be that difficult.
My answer addresses the specific (and somewhat common) case where you don't really need to convert the entire xml to json, but what you need is to traverse/access specific parts of the xml, and you need it to be fast, and simple (using json/dict-like operations).
Approach
For this, it is important to note that parsing an xml to etree using lxml is super fast. The slow part in most of the other answers is the second pass: traversing the etree structure (usually in python-land), converting it to json.
Which leads me to the approach I found best for this case: parsing the xml using lxml, and then wrapping the etree nodes (lazily), providing them with a dict-like interface.
Code
Here's the code:
from collections import Mapping
import lxml.etree
class ETreeDictWrapper(Mapping):
def __init__(self, elem, attr_prefix = '#', list_tags = ()):
self.elem = elem
self.attr_prefix = attr_prefix
self.list_tags = list_tags
def _wrap(self, e):
if isinstance(e, basestring):
return e
if len(e) == 0 and len(e.attrib) == 0:
return e.text
return type(self)(
e,
attr_prefix = self.attr_prefix,
list_tags = self.list_tags,
)
def __getitem__(self, key):
if key.startswith(self.attr_prefix):
return self.elem.attrib[key[len(self.attr_prefix):]]
else:
subelems = [ e for e in self.elem.iterchildren() if e.tag == key ]
if len(subelems) > 1 or key in self.list_tags:
return [ self._wrap(x) for x in subelems ]
elif len(subelems) == 1:
return self._wrap(subelems[0])
else:
raise KeyError(key)
def __iter__(self):
return iter(set( k.tag for k in self.elem) |
set( self.attr_prefix + k for k in self.elem.attrib ))
def __len__(self):
return len(self.elem) + len(self.elem.attrib)
# defining __contains__ is not necessary, but improves speed
def __contains__(self, key):
if key.startswith(self.attr_prefix):
return key[len(self.attr_prefix):] in self.elem.attrib
else:
return any( e.tag == key for e in self.elem.iterchildren() )
def xml_to_dictlike(xmlstr, attr_prefix = '#', list_tags = ()):
t = lxml.etree.fromstring(xmlstr)
return ETreeDictWrapper(
t,
attr_prefix = '#',
list_tags = set(list_tags),
)
This implementation is not complete, e.g., it doesn't cleanly support cases where an element has both text and attributes, or both text and children (only because I didn't need it when I wrote it...) It should be easy to improve it, though.
Speed
In my specific use case, where I needed to only process specific elements of the xml, this approach gave a suprising and striking speedup by a factor of 70 (!) compared to using #Martin Blech's xmltodict and then traversing the dict directly.
Bonus
As a bonus, since our structure is already dict-like, we get another alternative implementation of xml2json for free. We just need to pass our dict-like structure to json.dumps. Something like:
def xml_to_json(xmlstr, **kwargs):
x = xml_to_dictlike(xmlstr, **kwargs)
return json.dumps(x)
If your xml includes attributes, you'd need to use some alphanumeric attr_prefix (e.g. "ATTR_"), to ensure the keys are valid json keys.
I haven't benchmarked this part.
check out lxml2json (disclosure: I wrote it)
https://github.com/rparelius/lxml2json
it's very fast, lightweight (only requires lxml), and one advantage is that you have control over whether certain elements are converted to lists or dicts
jsonpickle or if you're using feedparser, you can try feed_parser_to_json.py
You can use declxml. It has advanced features like multi attributes and complex nested support. You just need to write a simple processor for it. Also with the same code, you can convert back to JSON as well. It is fairly straightforward and the documentation is awesome.
Link: https://declxml.readthedocs.io/en/latest/index.html
If you don't want to use any external libraries and 3rd party tools, Try below code.
Code
import re
import json
def getdict(content):
res=re.findall("<(?P<var>\S*)(?P<attr>[^/>]*)(?:(?:>(?P<val>.*?)</(?P=var)>)|(?:/>))",content)
if len(res)>=1:
attreg="(?P<avr>\S+?)(?:(?:=(?P<quote>['\"])(?P<avl>.*?)(?P=quote))|(?:=(?P<avl1>.*?)(?:\s|$))|(?P<avl2>[\s]+)|$)"
if len(res)>1:
return [{i[0]:[{"#attributes":[{j[0]:(j[2] or j[3] or j[4])} for j in re.findall(attreg,i[1].strip())]},{"$values":getdict(i[2])}]} for i in res]
else:
return {res[0]:[{"#attributes":[{j[0]:(j[2] or j[3] or j[4])} for j in re.findall(attreg,res[1].strip())]},{"$values":getdict(res[2])}]}
else:
return content
with open("test.xml","r") as f:
print(json.dumps(getdict(f.read().replace('\n',''))))
Sample Input
<details class="4b" count=1 boy>
<name type="firstname">John</name>
<age>13</age>
<hobby>Coin collection</hobby>
<hobby>Stamp collection</hobby>
<address>
<country>USA</country>
<state>CA</state>
</address>
</details>
<details empty="True"/>
<details/>
<details class="4a" count=2 girl>
<name type="firstname">Samantha</name>
<age>13</age>
<hobby>Fishing</hobby>
<hobby>Chess</hobby>
<address current="no">
<country>Australia</country>
<state>NSW</state>
</address>
</details>
Output
[
{
"details": [
{
"#attributes": [
{
"class": "4b"
},
{
"count": "1"
},
{
"boy": ""
}
]
},
{
"$values": [
{
"name": [
{
"#attributes": [
{
"type": "firstname"
}
]
},
{
"$values": "John"
}
]
},
{
"age": [
{
"#attributes": []
},
{
"$values": "13"
}
]
},
{
"hobby": [
{
"#attributes": []
},
{
"$values": "Coin collection"
}
]
},
{
"hobby": [
{
"#attributes": []
},
{
"$values": "Stamp collection"
}
]
},
{
"address": [
{
"#attributes": []
},
{
"$values": [
{
"country": [
{
"#attributes": []
},
{
"$values": "USA"
}
]
},
{
"state": [
{
"#attributes": []
},
{
"$values": "CA"
}
]
}
]
}
]
}
]
}
]
},
{
"details": [
{
"#attributes": [
{
"empty": "True"
}
]
},
{
"$values": ""
}
]
},
{
"details": [
{
"#attributes": []
},
{
"$values": ""
}
]
},
{
"details": [
{
"#attributes": [
{
"class": "4a"
},
{
"count": "2"
},
{
"girl": ""
}
]
},
{
"$values": [
{
"name": [
{
"#attributes": [
{
"type": "firstname"
}
]
},
{
"$values": "Samantha"
}
]
},
{
"age": [
{
"#attributes": []
},
{
"$values": "13"
}
]
},
{
"hobby": [
{
"#attributes": []
},
{
"$values": "Fishing"
}
]
},
{
"hobby": [
{
"#attributes": []
},
{
"$values": "Chess"
}
]
},
{
"address": [
{
"#attributes": [
{
"current": "no"
}
]
},
{
"$values": [
{
"country": [
{
"#attributes": []
},
{
"$values": "Australia"
}
]
},
{
"state": [
{
"#attributes": []
},
{
"$values": "NSW"
}
]
}
]
}
]
}
]
}
]
}
]
This stuff here is actively maintained and so far is my favorite: xml2json in python
I published one on github a while back..
https://github.com/davlee1972/xml_to_json
This converter is written in Python and will convert one or more XML files into JSON / JSONL files
It requires a XSD schema file to figure out nested json structures (dictionaries vs lists) and json equivalent data types.
python xml_to_json.py -x PurchaseOrder.xsd PurchaseOrder.xml
INFO - 2018-03-20 11:10:24 - Parsing XML Files..
INFO - 2018-03-20 11:10:24 - Processing 1 files
INFO - 2018-03-20 11:10:24 - Parsing files in the following order:
INFO - 2018-03-20 11:10:24 - ['PurchaseOrder.xml']
DEBUG - 2018-03-20 11:10:24 - Generating schema from PurchaseOrder.xsd
DEBUG - 2018-03-20 11:10:24 - Parsing PurchaseOrder.xml
DEBUG - 2018-03-20 11:10:24 - Writing to file PurchaseOrder.json
DEBUG - 2018-03-20 11:10:24 - Completed PurchaseOrder.xml
I also have a follow up xml to parquet converter that works in a similar fashion
https://github.com/blackrock/xml_to_parquet