How to parse the xml with xmlns attribute using python - python

<?xml version="1.0" ?>
<school xmlns="loyo:22:2.2">
<profile>
<student xmlns="loyo:5:542">
<marks>
<mark java="java:/lo">
<ca1>200</ca1>
</mark>
</marks>
</student>
</profile>
</school>
I trying to access the ca1 text. I am using etree but I cannot access it. I'm using below code.
import xml.etree.ElementTree as ET
tree = ET.parse('mca.xml')
root = tree.getroot()
def getElementsData(xpath):
elements = list()
if root.findall(xpath):
for elem in root.findall(xpath):
elements.append(elem.text)
return elements
else:
raise SystemExit("Invalid xpath provided")
t = getElementsData('.//ca1')
for i in t:
print(i)
I tried in different way to access it I don't know the exact problem. Is it recording file type issue?

Your document has namespaces on nodes school and student, you need to incorporate the namespaces in your search. Since you are looking for ca1, which is under student, you will need to specify the namespace that student node has:
import xml.etree.ElementTree as ET
tree = ET.parse('mca.xml')
root = tree.getroot()
def getElementsData(xpath, namespaces):
elements = root.findall(xpath, namespaces)
if elements == []:
raise SystemExit("Invalid xpath provided")
return elements
namespaces = {'ns_school': 'loyo:22:2.2', 'ns_student': 'loyo:5:542'}
elements = getElementsData('.//ns_student:ca1', namespaces)
for element in elements:
print(element)
Notes
Since your namespaces have no names, I gave them such names as ns_school, ns_student, but these name can be anything (e.g. ns1, mystudent, ...)
In a more complex system, I recommend raising some other kinds of errors and let the caller decide whether or not to exit.

How about traversing like this
import xml.etree.ElementTree
e = xml.etree.ElementTree.parse('test.xml').getroot()
data = e.getchildren()[0].getchildren()[0].getchildren()[0].getchildren()[0].getchildren()[0].text
print(data)

Try the following xpath
tree.xpath('//ca1//text()')[0].strip()

Related

How to access UBL 2.1 xml tag using python

I need to access the tags in UBL 2.1 and modify them depend on the on the user input on python.
So, I used the ElementTree library to access the tags and modify them.
Here is a sample of the xml code:
<ns0:Invoice xmlns:ns0="urn:oasis:names:specification:ubl:schema:xsd:Invoice-2" xmlns:ns1="urn:oasis:names:specification:ubl:schema:xsd:CommonBasicComponents-2" xmlns:ns2="urn:oasis:names:specification:ubl:schema:xsd:CommonAggregateComponents-2">
<ns1:ProfileID>reporting:1.0</ns1:ProfileID>
<ns1:ID>0</ns1:ID>
<ns1:UUID>dbdf65eb-5d66-47e6-bb0c-a84bbf7baa30</ns1:UUID>
<ns1:IssueDate>2022-11-05</ns1:IssueDate>
The issue :
I want to access the tags but it is doesn't modifed and enter the loop
I tried both ways:
mytree = ET.parse('test.xml')
myroot = mytree.getroot()
for x in myroot.find({xmlns:ns1=urn:oasis:names:specification:ubl:schema:xsd:CommonBasicComponents-2}IssueDate}"):
x.text = '1999'
mytree.write('test.xml')
mytree = ET.parse('test.xml')
myroot = mytree.getroot()
for x in myroot.iter('./Invoice/AllowanceCharge/ChargeIndicator'):
x.text = str('true')
mytree.write('test.xml')
None of them worked and modify the tag.
So the questions is : How can I reach the specific tag and modify it?
If you correct the namespace and the brakets in your for loop it works for a valid XML like (root tag must be closed!):
Input:
<?xml version="1.0" encoding="utf-8"?>
<ns0:Invoice xmlns:ns0="urn:oasis:names:specification:ubl:schema:xsd:Invoice-2" xmlns:ns1="urn:oasis:names:specification:ubl:schema:xsd:CommonBasicComponents-2" xmlns:ns2="urn:oasis:names:specification:ubl:schema:xsd:CommonAggregateComponents-2">
<ns1:ProfileID>reporting:1.0</ns1:ProfileID>
<ns1:ID>0</ns1:ID>
<ns1:UUID>dbdf65eb-5d66-47e6-bb0c-a84bbf7baa30</ns1:UUID>
<ns1:IssueDate>2022-11-05</ns1:IssueDate>
</ns0:Invoice>
Your repaired code:
import xml.etree.ElementTree as ET
tree = ET.parse('test.xml')
root = tree.getroot()
for elem in root.findall("{urn:oasis:names:specification:ubl:schema:xsd:CommonBasicComponents-2}IssueDate"):
elem.text = '1999'
tree.write('test_changed.xml', encoding='utf-8', xml_declaration=True)
ET.dump(root)
Output:
<ns0:Invoice xmlns:ns0="urn:oasis:names:specification:ubl:schema:xsd:Invoice-2" xmlns:ns1="urn:oasis:names:specification:ubl:schema:xsd:CommonBasicComponents-2">
<ns1:ProfileID>reporting:1.0</ns1:ProfileID>
<ns1:ID>0</ns1:ID>
<ns1:UUID>dbdf65eb-5d66-47e6-bb0c-a84bbf7baa30</ns1:UUID>
<ns1:IssueDate>1999</ns1:IssueDate>
</ns0:Invoice>

XML parsing using ElementTree thrown of by lower case attribute of root node in Python [duplicate]

I have an xml file I need to open and make some changes to, one of those changes is to remove the namespace and prefix and then save to another file.
Here is the xml:
<?xml version='1.0' encoding='UTF-8'?>
<package xmlns="http://apple.com/itunes/importer">
<provider>some data</provider>
<language>en-GB</language>
</package>
I can make the other changes I need, but can't find out how to remove the namespace and prefix. This is the reusklt xml I need:
<?xml version='1.0' encoding='UTF-8'?>
<package>
<provider>some data</provider>
<language>en-GB</language>
</package>
And here is my script which will open and parse the xml and save it:
metadata = '/Users/user1/Desktop/Python/metadata.xml'
from lxml import etree
parser = etree.XMLParser(remove_blank_text=True)
open(metadata)
tree = etree.parse(metadata, parser)
root = tree.getroot()
tree.write('/Users/user1/Desktop/Python/done.xml', pretty_print = True, xml_declaration = True, encoding = 'UTF-8')
So how would I add code in my script which will remove the namespace and prefix?
We can get the desired output document in two steps:
Remove namespace URIs from element names
Remove unused namespace declarations from the XML tree
Example code
from lxml import etree
input_xml = """
<package xmlns="http://apple.com/itunes/importer">
<provider>some data</provider>
<language>en-GB</language>
<!-- some comment -->
<?xml-some-processing-instruction ?>
</package>
"""
root = etree.fromstring(input_xml)
# Iterate through all XML elements
for elem in root.getiterator():
# Skip comments and processing instructions,
# because they do not have names
if not (
isinstance(elem, etree._Comment)
or isinstance(elem, etree._ProcessingInstruction)
):
# Remove a namespace URI in the element's name
elem.tag = etree.QName(elem).localname
# Remove unused namespace declarations
etree.cleanup_namespaces(root)
print(etree.tostring(root).decode())
Output XML
<package>
<provider>some data</provider>
<language>en-GB</language>
<!-- some comment -->
<?xml-some-processing-instruction ?>
</package>
Details explaining the code
As described in the documentation, we use lxml.etree.QName.localname to get local names of elements, that is names without namespace URIs. Then we replace the fully qualified names of the elements by their local names.
Some XML elements, such as comments and processing instructions do not have names. So, we have to skip these elements while replacing element names, otherwise a ValueError will be raised.
Finally, we use lxml.etree.cleanup_namespaces() to remove unused namespace declarations from the XML tree.
Note on namespaced XML attributes
If the XML input contains attributes with explicitly specified namespace prefixes, the example code will not remove those prefixes. To accomplish the deletion of namespace prefixes in attributes, add the following for-loop after the line elem.tag = etree.QName(elem).localname, as suggested here
for attr_name in elem.attrib:
local_attr_name = etree.QName(attr_name).localname
if attr_name != local_attr_name:
attr_value = elem.attrib[attr_name]
del elem.attrib[attr_name]
elem.attrib[local_attr_name] = attr_value
To learn more about namespaced XML attributes see this answer.
Replace tag as Uku Loskit suggests. In addition to that, use lxml.objectify.deannotate.
from lxml import etree, objectify
metadata = '/Users/user1/Desktop/Python/metadata.xml'
parser = etree.XMLParser(remove_blank_text=True)
tree = etree.parse(metadata, parser)
root = tree.getroot()
####
for elem in root.getiterator():
if not hasattr(elem.tag, 'find'): continue # guard for Comment tags
i = elem.tag.find('}')
if i >= 0:
elem.tag = elem.tag[i+1:]
objectify.deannotate(root, cleanup_namespaces=True)
####
tree.write('/Users/user1/Desktop/Python/done.xml',
pretty_print=True, xml_declaration=True, encoding='UTF-8')
Note: Some tags like Comment return a function when accessing tag attribute. added a guard for that.
import xml.etree.ElementTree as ET
def remove_namespace(doc, namespace):
"""Remove namespace in the passed document in place."""
ns = u'{%s}' % namespace
nsl = len(ns)
for elem in doc.getiterator():
if elem.tag.startswith(ns):
elem.tag = elem.tag[nsl:]
metadata = '/Users/user1/Desktop/Python/metadata.xml'
tree = ET.parse(metadata)
root = tree.getroot()
remove_namespace(root, u'http://apple.com/itunes/importer')
tree.write('/Users/user1/Desktop/Python/done.xml',
pretty_print=True, xml_declaration=True, encoding='UTF-8')
Used a snippet of code from here
This method could be easily extended to delete any namespace attributes by searching for tags that begin with "xmlns"
You could also use XSLT to strip the namespaces...
XSLT 1.0 (test.xsl)
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output indent="yes"/>
<xsl:strip-space elements="*"/>
<xsl:template match="node()">
<xsl:copy>
<xsl:apply-templates select="#*|node()"/>
</xsl:copy>
</xsl:template>
<xsl:template match="*" priority="1">
<xsl:element name="{local-name()}" namespace="">
<xsl:apply-templates select="#*|node()"/>
</xsl:element>
</xsl:template>
<xsl:template match="#*">
<xsl:attribute name="{local-name()}" namespace="">
<xsl:value-of select="."/>
</xsl:attribute>
</xsl:template>
</xsl:stylesheet>
Python
from lxml import etree
tree = etree.parse("metadata.xml")
xslt = etree.parse("test.xsl")
new_tree = tree.xslt(xslt)
print(etree.tostring(new_tree, pretty_print=True, xml_declaration=True,
encoding="UTF-8").decode("UTF-8"))
Output
<?xml version='1.0' encoding='UTF-8'?>
<package>
<provider>some data</provider>
<language>en-GB</language>
</package>
you can try with lxml:
# Remove namespace prefixes
for elem in root.getiterator():
namespace_removed = elem.xpath('local-name()')
Define and call the following function, right after you parse the XML string:
from lxml import etree
def clean_xml_namespaces(root):
for element in root.getiterator():
if isinstance(element, etree._Comment):
continue
element.tag = etree.QName(element).localname
etree.cleanup_namespaces(root)
💡 Note - comment elements in the XML are ignored, as they should be
Usage:
xml_content = b'''<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<dependencies>
<dependency>
<groupId>org.easytesting</groupId>
<artifactId>fest-assert</artifactId>
<version>1.4</version>
</dependency>
<!-- this dependency is critical -->
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-lang3</artifactId>
<version>3.4</version>
</dependency>
</dependencies>
</project>
'''
root = etree.fromstring(xml_content)
clean_xml_namespaces(root)
elements = root.findall(".//dependency")
print(len(elements))
# outputs "2", as expected
So I realize this is an older answer with a highly up-voted and accepted answer, but if you are reading LARGE-FILES and find yourself in the same predicament I did; I hope this helps you out.
The issue with this approach is, in fact, the iteration. Regardless of how fast the parser is, doing anything say... a few 100k times is gonna eat your execution time. With that said, it came down to really thinking about the problem for me and understanding how namespaces work (or are "intended to work", because they are honestly not needed). Now if your xml truly uses namespaces, meaning you see tags that look like this: <xs:table>, then you'll need to tweak the approach here for your use-case. I'll include the full way of handling, as well.
DISCLAIMER : I cannot, with a good conscience, tell you to use regular expressions when parsing html/xml, go look at SergiyKolesnikov's answer as it WORKS, but I had an edge case so with that said... let's dive into some regex!
Problem: namespace stripping takes forever... and most of the time the namespaces only live inside of the very opening tag, or our "root". So in thinking about how python reads information in, and where our only problem-child is that root node, why not use that to our advantage.
Please NOTE: the file i'm using as my example comes as a raw, horrid, remarkably senseless structure of lulz with the promise of data in there somewhere.
my_file is the path to the file im using for our example, I cannot share it with you for professional reasons; and it has been cut down way in size just to get through this answer.
import os, sys, subprocess, re, io, json
from lxml import etree
# Your file would be '_biggest_file' if playing along at home
my_file = _biggest_file
meta_stuff = dict(
exists = os.path.exists(_biggest_file),
sizeof = os.path.getsize(_biggest_file),
extension_is_a_real_thing = any(re.findall("\.(html|xml)$", my_file, re.I)),
system_thinks_its_a = subprocess.check_output(
["file", "-i", _biggest_file]
).decode().split(":")[-1:][0].strip()
)
print(json.dumps(meta_stuff, indent = 2))
So for starters, decently sized, and system thinks at best it's html; the file extension is neither xml or html either...
{
"exists": true,
"sizeof": 24442371,
"extension_is_a_real_thing": false,
"system_thinks_its_a": "text/html; charset=us-ascii"
}
Approach:
In order to parse an xml file... it should at the very least be xml, so we'll need to check and add a declarations tag if one doesn't exist
If I have namespaces.. thats bad because I can't use xpaths, which is what I want to do
If my file is huge, I should only operate on the smallest imaginable parts that I need to clean before I'm ready to parse it.
Function
def speed_read(file_path):
# We're gonna be low-brow and add our own using this string. It's fine
_xml_dec = '<?xml version="1.0" encoding="utf-8"?>'
# Even worse.. rgx for xml here we go
#
# We'll need to extract the very first node that we find in our document,
# because for our purposes thats the one we know has the namespace uri's
# ie: "attributes"
# FiRsT node : <actual_name xmlns:xsi="idontactuallydoanything.com">
# We're going to pluck out that first node, get the tags actual name
# which means from:
# <actual_name xmlns:xsi="idontactuallydoanything.com">...</actual_name>
# We pluck:
# actual_name
# Then we're gonna replace the entire tag with one we make from that name
# by simple string substitution
#
# -> 'starting from the beginning, capture everything between the < and the >'
_first_node = re.compile('^(\<.*?\>)', re.I|re.M|re.U)
# -> 'Starting from the beginning, but dont you get me the <, find anything that happens
# before the first white-space, which i don't want either man'
_first_tagname = re.compile('(?<=^\<)(.*?)\S+',re.I|re.M|re.U)
# open the file context
with open(file_path, "r", encoding = "utf-8") as f:
# go ahead and strip leading and trailing, cause why not... plus adds
# safety for our regex's
_raw = f.read().strip()
# Now, if the file somehow happens to magically have the xml declaration, we
# wanna go ahead and remove it as we plan to add our own. But for efficiency,
# only check the first couple of characters
if _raw.startswith('<?xml', 0, 5):
#_raw = re.sub(_xml_dec, '', _raw).strip()
_raw = re.sub('\<\?xml.*?\?>\n?', '', _raw).strip()
# Here we grab that first node that has those meaningless namespaces
root_element = _first_node.search(_raw).group()
# here we get its name
first_tag = _first_tagname.search(root_element).group()
# Here, we rubstitute the entire element, with a new one
# that only contains the elements name
_raw = re.sub(root_element, '<{}>'.format(first_tag), _raw)
# Now we add our declaration tag in the worst way you have ever
# seen, but I miss sprintf, so this is how i'm rolling. Python is terrible btw
_raw = "{}{}".format(_xml_dec, _raw)
# The bytes part here might end up being overkill.. but this has worked
# for me consistently so it stays.
return etree.parse(io.BytesIO(bytes(bytearray(_raw, encoding = "utf-8"))))
# a good answer from above:
def safe_read(file_path):
root = etree.parse(file_path)
for elem in root.getiterator():
elem.tag = etree.QName(elem).localname
# Remove unused namespace declarations
etree.cleanup_namespaces(root)
return root
Benchmarking - Yes I know there's better ways to do this.
import pandas as pd
safe_times = []
for i in range(0,5):
s = time.time()
safe_read(_biggest_file)
safe_times.append(time.time() - s)
fast_times = []
for i in range(0,5):
s = time.time()
speed_read(_biggest_file)
fast_times.append(time.time() - s)
pd.DataFrame({"safe":safe_times, "fast":fast_times})
Results
safe
fast
2.36
0.61
2.15
0.58
2.47
0.49
2.94
0.60
2.83
0.53
The accepted solution removes namespaces in node names and not in attributes, i.e. <b:spam c:name="cheese"/> will be transformed to <spam c:name="cheese"/>.
An updated version which will give you <spam name="cheese"/>
def remove_namespaces(root):
for elem in root.getiterator():
if not (
isinstance(elem, etree._Comment)
or isinstance(elem, etree._ProcessingInstruction)
):
localname = etree.QName(elem).localname
if elem.tag != localname:
elem.tag = etree.QName(elem).localname
for attr_name in elem.attrib:
local_attr_name = etree.QName(attr_name).localname
if attr_name != local_attr_name:
attr_value = elem.attrib[attr_name]
del elem.attrib[attr_name]
elem.attrib[local_attr_name] = attr_value
deannotate(root, cleanup_namespaces=True)
Here are two other ways of removing namespaces. The first uses the lxml.etree.QName helper while the second uses regexes. Both functions allow an optional list of namespaces to match against. If no namespace list is supplied then all namespaces are removed. Attribute keys are also cleaned.
from lxml import etree
import re
def remove_namespaces_qname(doc, namespaces=None):
for el in doc.getiterator():
# clean tag
q = etree.QName(el.tag)
if q is not None:
if namespaces is not None:
if q.namespace in namespaces:
el.tag = q.localname
else:
el.tag = q.localname
# clean attributes
for a, v in el.items():
q = etree.QName(a)
if q is not None:
if namespaces is not None:
if q.namespace in namespaces:
del el.attrib[a]
el.attrib[q.localname] = v
else:
del el.attrib[a]
el.attrib[q.localname] = v
return doc
def remove_namespace_re(doc, namespaces=None):
if namespaces is not None:
ns = list(map(lambda n: u'{%s}' % n, namespaces))
for el in doc.getiterator():
# clean tag
m = re.match(r'({.+})(.+)', el.tag)
if m is not None:
if namespaces is not None:
if m.group(1) in ns:
el.tag = m.group(2)
else:
el.tag = m.group(2)
# clean attributes
for a, v in el.items():
m = re.match(r'({.+})(.+)', a)
if m is not None:
if namespaces is not None:
if m.group(1) in ns:
del el.attrib[a]
el.attrib[m.group(2)] = v
else:
del el.attrib[a]
el.attrib[m.group(2)] = v
return doc
all you need to do is:
objectify.deannotate(root, cleanup_namespaces=True)
after you have get the root, by using root = tree.getroot()

How to get the xml element as a string with namespace using ElementTree in python?

I need to get the elements from xml as a string. I am trying with below xml format.
<xml>
<prot:data xmlns:prot="prot">
<product-id-template>
<prot:ProductId>PRODUCT_ID</prot:ProductId>
</product-id-template>
<product-name-template>
<prot:ProductName>PRODUCT_NAME</prot:ProductName>
</product-name-template>
<dealer-template>
<xsi:Dealer xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">DEALER</xsi:Dealer>
</dealer-template>
</prot:data>
</xml>
And I tried with below code:
from xml.etree import ElementTree as ET
def get_template(xpath, namespaces):
tree = ET.parse('cdata.xml')
elements = tree.getroot()
for element in elements.findall(xpath, namespaces=namespaces):
return element
namespace = {"prot" : "prot"}
aa = get_template(".//prot:ProductId", namespace)
print(ET.tostring(aa).decode())
Actual output:
<ns0:ProductId xmlns:ns0="prot">PRODUCT_ID</ns0:ProductId>
Expected output:
<prot:ProductId>PRODUCT_ID</prot:ProductId>
I should not remove the xmlns from the document where it presents in the document. And It has to be removed where it not presents. Example product-id-template is not containing the xmlns so it needs to be retrieved without xmlns. And dealer-template contains the xmlns so it needs to be retrieved with xmlns.
How to achieve this?
You can remove xmlns with regex.
import re
# ...
with_ns = ET.tostring(aa).decode()
no_ns = re.sub(' xmlns(:\w+)?="[^"]+"', '', with_ns)
print(no_ns)
UPDATE: You can do a very wild thing. Although I can't recommend it, because I'm not a Python expert.
I just checked the source code and found that I can do this hack:
def my_serialize_xml(write, elem, qnames, namespaces,
short_empty_elements, **kwargs):
ET._serialize_xml(write, elem, qnames,
None, short_empty_elements, **kwargs)
ET._serialize["xml"] = my_serialize_xml
I just defined my_serialize_xml, which calls ElementTree._serialize_xml with namespaces=None. And then, in dictionary ElementTree._serialize, I changed value for key "xml" to my_serialize_xml. So when you call ElementTree.tostring, it will use my_serialize_xml.
If you want to try it, just place the code(above) after from xml.etree import ElementTree as ET (but before using the ET).

Accesing values in xml file with namespaces in python 2.7 lxml

I'm following this link to try to get values of several tags:
Parsing XML with namespace in Python via 'ElementTree'
In this link there is no problem to access to the root tag like this:
import sys
from lxml import etree as ET
doc = ET.parse('file.xml')
namespaces_rdf = {'rdf': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#'} # add more as needed
namespaces_dcat = {'dcat': 'http://www.w3.org/ns/dcat#'} # add more as needed
namespaces_dct = {'dct': 'http://purl.org/dc/terms/'}
print doc.findall('rdf:RDF', namespaces_rdf)
print doc.findall('dcat:Dataset', namespaces_dcat)
print doc.findall('dct:identifier', namespaces_dct)
OUTPUT:
[]
[<Element {http://www.w3.org/ns/dcat#}Dataset at 0x2269b98>]
[]
I get only access to dcat:Dataset, and I can't see how to access the value of rdf:about
And later access to dct:identifier
Of course, once I have accessed to this info, I need to acces to dcat:distribution info
This is my example file, generated with ckanext-dcat:
<?xml version="1.0" encoding="utf-8"?>
<rdf:RDF
xmlns:dct="http://purl.org/dc/terms/"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:dcat="http://www.w3.org/ns/dcat#"
>
<dcat:Dataset rdf:about="http://www.myweb.com/dataset/ec631628-2f46-4f17-a685-d62a37466c01">
<dct:identifier>ec631628-2f46-4f17-a685-d62a37466c01</dct:identifier>
<dct:description>FOO-Description</dct:description>
<dct:title>FOO-title</dct:title>
<dcat:keyword>keyword1</dcat:keyword>
<dcat:keyword>keyword2</dcat:keyword>
<dct:issued rdf:datatype="http://www.w3.org/2001/XMLSchema#dateTime">2014-10-08T08:55:04.566618</dct:issued>
<dct:modified rdf:datatype="http://www.w3.org/2001/XMLSchema#dateTime">2015-06-25T11:04:10.328902</dct:modified>
<dcat:distribution>
<dcat:Distribution rdf:about="http://www.myweb.com/dataset/ec631628-2f46-4f17-a685-d62a37466c01/resource/f5707551-6bf3-468f-9a96-b4184cc51d1f">
<dct:title>FOO-title-1</dct:title>
<dct:description>FOO-Description-1</dct:description>
<dcat:accessURL>http://www.myweb.com/dataset/ec631628-2f46-4f17-a685-d62a37466c01/resource/f5707551-6bf3-468f-9a96-b4184cc51d1f/download/myxls.xls</dcat:accessURL>
<dct:format>XLS</dct:format>
</dcat:Distribution>
</dcat:distribution>
<dcat:distribution>
<dcat:Distribution rdf:about="http://www.myweb.com/dataset/ec631628-2f46-4f17-a685-d62a37466c01/resource/74c1acc8-b2b5-441b-afb2-d072d0d00a7f">
<dct:format>XLS</dct:format>
<dct:title>FOO-title-2</dct:title>
<dct:description>FOO-Description-2</dct:description>
<dcat:accessURL>http://www.myweb.com/dataset/ec631628-2f46-4f17-a685-d62a37466c01/resource/74c1acc8-b2b5-441b-afb2-d072d0d00a7f/download/myxls.xls</dcat:accessURL>
</dcat:Distribution>
</dcat:distribution>
</dcat:Dataset>
</rdf:RDF>
Any idea on how to access this info??
Thanks
UPDATE:
Well, I need to access rdf:about in:
<dcat:Dataset rdf:about="http://www.myweb.com/dataset/ec631628-2f46-4f17-a685-d62a37466c01">
so with this code taken from:
Parse xml with lxml - extract element value
for node in doc.xpath('//dcat:Dataset', namespaces=namespaces):
# Iterate over attributes
for attrib in node.attrib:
print '#' + attrib + '=' + node.attrib[attrib]
I get this output:
[<Element {http://www.w3.org/ns/dcat#}Dataset at 0x23d8ee0>]
#{http://www.w3.org/1999/02/22-rdf-syntax-ns#}about=http://www.myweb.com/dataset/ec631628-2f46-4f17-a685-d62a37466c01
So, the question is:
How can I ask if the attribute is about to take this value, because in other files I have several tags.
UPDATE 2: Fixed how I get about value (clark notations)
for node in doc.xpath('//dcat:Dataset', namespaces=namespaces):
# Iterate over attributes
for attrib in node.attrib:
if attrib.endswith('about'):
#do my jobs
Well, almost finished, but I have last question: I need to know when I access my
<dct:title>
to which belongs, I have:
<dcat:Dataset rdf:about="http://www.myweb.com/dataset/ec631628-2f46-4f17-a685-d62a37466c01">
<dct:title>FOO-title</dct:title>
<dcat:Distribution rdf:about="http://www.myweb.com/dataset/ec631628-2f46-4f17-a685-d62a37466c01/resource/f5707551-6bf3-468f-9a96-b4184cc51d1f">
<dct:title>FOO-title-1</dct:title>
<dcat:Distribution rdf:about="http://www.myweb.com/dataset/ec631628-2f46-4f17-a685-d62a37466c01/resource/74c1acc8-b2b5-441b-afb2-d072d0d00a7f">
<dct:title>FOO-title-2</dct:title>
If I do something like this I get:
for node in doc.xpath('//dct:title', namespaces=namespaces):
print node.tag, node.text
{http://purl.org/dc/terms/}title FOO-title
{http://purl.org/dc/terms/}title FOO-title-1
{http://purl.org/dc/terms/}title FOO-title-2
Thanks
Use the xpath() method with namespaces named argument:
namespaces = {
'rdf': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#',
'dcat': 'http://www.w3.org/ns/dcat#',
'dct': 'http://purl.org/dc/terms/'
}
print(doc.xpath('//rdf:RDF', namespaces=namespaces))
print(doc.xpath('//dcat:Dataset', namespaces=namespaces))
print(doc.xpath('//dct:identifier', namespaces=namespaces))

Remove namespace and prefix from xml in python using lxml

I have an xml file I need to open and make some changes to, one of those changes is to remove the namespace and prefix and then save to another file.
Here is the xml:
<?xml version='1.0' encoding='UTF-8'?>
<package xmlns="http://apple.com/itunes/importer">
<provider>some data</provider>
<language>en-GB</language>
</package>
I can make the other changes I need, but can't find out how to remove the namespace and prefix. This is the reusklt xml I need:
<?xml version='1.0' encoding='UTF-8'?>
<package>
<provider>some data</provider>
<language>en-GB</language>
</package>
And here is my script which will open and parse the xml and save it:
metadata = '/Users/user1/Desktop/Python/metadata.xml'
from lxml import etree
parser = etree.XMLParser(remove_blank_text=True)
open(metadata)
tree = etree.parse(metadata, parser)
root = tree.getroot()
tree.write('/Users/user1/Desktop/Python/done.xml', pretty_print = True, xml_declaration = True, encoding = 'UTF-8')
So how would I add code in my script which will remove the namespace and prefix?
We can get the desired output document in two steps:
Remove namespace URIs from element names
Remove unused namespace declarations from the XML tree
Example code
from lxml import etree
input_xml = """
<package xmlns="http://apple.com/itunes/importer">
<provider>some data</provider>
<language>en-GB</language>
<!-- some comment -->
<?xml-some-processing-instruction ?>
</package>
"""
root = etree.fromstring(input_xml)
# Iterate through all XML elements
for elem in root.getiterator():
# Skip comments and processing instructions,
# because they do not have names
if not (
isinstance(elem, etree._Comment)
or isinstance(elem, etree._ProcessingInstruction)
):
# Remove a namespace URI in the element's name
elem.tag = etree.QName(elem).localname
# Remove unused namespace declarations
etree.cleanup_namespaces(root)
print(etree.tostring(root).decode())
Output XML
<package>
<provider>some data</provider>
<language>en-GB</language>
<!-- some comment -->
<?xml-some-processing-instruction ?>
</package>
Details explaining the code
As described in the documentation, we use lxml.etree.QName.localname to get local names of elements, that is names without namespace URIs. Then we replace the fully qualified names of the elements by their local names.
Some XML elements, such as comments and processing instructions do not have names. So, we have to skip these elements while replacing element names, otherwise a ValueError will be raised.
Finally, we use lxml.etree.cleanup_namespaces() to remove unused namespace declarations from the XML tree.
Note on namespaced XML attributes
If the XML input contains attributes with explicitly specified namespace prefixes, the example code will not remove those prefixes. To accomplish the deletion of namespace prefixes in attributes, add the following for-loop after the line elem.tag = etree.QName(elem).localname, as suggested here
for attr_name in elem.attrib:
local_attr_name = etree.QName(attr_name).localname
if attr_name != local_attr_name:
attr_value = elem.attrib[attr_name]
del elem.attrib[attr_name]
elem.attrib[local_attr_name] = attr_value
To learn more about namespaced XML attributes see this answer.
Replace tag as Uku Loskit suggests. In addition to that, use lxml.objectify.deannotate.
from lxml import etree, objectify
metadata = '/Users/user1/Desktop/Python/metadata.xml'
parser = etree.XMLParser(remove_blank_text=True)
tree = etree.parse(metadata, parser)
root = tree.getroot()
####
for elem in root.getiterator():
if not hasattr(elem.tag, 'find'): continue # guard for Comment tags
i = elem.tag.find('}')
if i >= 0:
elem.tag = elem.tag[i+1:]
objectify.deannotate(root, cleanup_namespaces=True)
####
tree.write('/Users/user1/Desktop/Python/done.xml',
pretty_print=True, xml_declaration=True, encoding='UTF-8')
Note: Some tags like Comment return a function when accessing tag attribute. added a guard for that.
import xml.etree.ElementTree as ET
def remove_namespace(doc, namespace):
"""Remove namespace in the passed document in place."""
ns = u'{%s}' % namespace
nsl = len(ns)
for elem in doc.getiterator():
if elem.tag.startswith(ns):
elem.tag = elem.tag[nsl:]
metadata = '/Users/user1/Desktop/Python/metadata.xml'
tree = ET.parse(metadata)
root = tree.getroot()
remove_namespace(root, u'http://apple.com/itunes/importer')
tree.write('/Users/user1/Desktop/Python/done.xml',
pretty_print=True, xml_declaration=True, encoding='UTF-8')
Used a snippet of code from here
This method could be easily extended to delete any namespace attributes by searching for tags that begin with "xmlns"
You could also use XSLT to strip the namespaces...
XSLT 1.0 (test.xsl)
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output indent="yes"/>
<xsl:strip-space elements="*"/>
<xsl:template match="node()">
<xsl:copy>
<xsl:apply-templates select="#*|node()"/>
</xsl:copy>
</xsl:template>
<xsl:template match="*" priority="1">
<xsl:element name="{local-name()}" namespace="">
<xsl:apply-templates select="#*|node()"/>
</xsl:element>
</xsl:template>
<xsl:template match="#*">
<xsl:attribute name="{local-name()}" namespace="">
<xsl:value-of select="."/>
</xsl:attribute>
</xsl:template>
</xsl:stylesheet>
Python
from lxml import etree
tree = etree.parse("metadata.xml")
xslt = etree.parse("test.xsl")
new_tree = tree.xslt(xslt)
print(etree.tostring(new_tree, pretty_print=True, xml_declaration=True,
encoding="UTF-8").decode("UTF-8"))
Output
<?xml version='1.0' encoding='UTF-8'?>
<package>
<provider>some data</provider>
<language>en-GB</language>
</package>
you can try with lxml:
# Remove namespace prefixes
for elem in root.getiterator():
namespace_removed = elem.xpath('local-name()')
Define and call the following function, right after you parse the XML string:
from lxml import etree
def clean_xml_namespaces(root):
for element in root.getiterator():
if isinstance(element, etree._Comment):
continue
element.tag = etree.QName(element).localname
etree.cleanup_namespaces(root)
💡 Note - comment elements in the XML are ignored, as they should be
Usage:
xml_content = b'''<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<dependencies>
<dependency>
<groupId>org.easytesting</groupId>
<artifactId>fest-assert</artifactId>
<version>1.4</version>
</dependency>
<!-- this dependency is critical -->
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-lang3</artifactId>
<version>3.4</version>
</dependency>
</dependencies>
</project>
'''
root = etree.fromstring(xml_content)
clean_xml_namespaces(root)
elements = root.findall(".//dependency")
print(len(elements))
# outputs "2", as expected
So I realize this is an older answer with a highly up-voted and accepted answer, but if you are reading LARGE-FILES and find yourself in the same predicament I did; I hope this helps you out.
The issue with this approach is, in fact, the iteration. Regardless of how fast the parser is, doing anything say... a few 100k times is gonna eat your execution time. With that said, it came down to really thinking about the problem for me and understanding how namespaces work (or are "intended to work", because they are honestly not needed). Now if your xml truly uses namespaces, meaning you see tags that look like this: <xs:table>, then you'll need to tweak the approach here for your use-case. I'll include the full way of handling, as well.
DISCLAIMER : I cannot, with a good conscience, tell you to use regular expressions when parsing html/xml, go look at SergiyKolesnikov's answer as it WORKS, but I had an edge case so with that said... let's dive into some regex!
Problem: namespace stripping takes forever... and most of the time the namespaces only live inside of the very opening tag, or our "root". So in thinking about how python reads information in, and where our only problem-child is that root node, why not use that to our advantage.
Please NOTE: the file i'm using as my example comes as a raw, horrid, remarkably senseless structure of lulz with the promise of data in there somewhere.
my_file is the path to the file im using for our example, I cannot share it with you for professional reasons; and it has been cut down way in size just to get through this answer.
import os, sys, subprocess, re, io, json
from lxml import etree
# Your file would be '_biggest_file' if playing along at home
my_file = _biggest_file
meta_stuff = dict(
exists = os.path.exists(_biggest_file),
sizeof = os.path.getsize(_biggest_file),
extension_is_a_real_thing = any(re.findall("\.(html|xml)$", my_file, re.I)),
system_thinks_its_a = subprocess.check_output(
["file", "-i", _biggest_file]
).decode().split(":")[-1:][0].strip()
)
print(json.dumps(meta_stuff, indent = 2))
So for starters, decently sized, and system thinks at best it's html; the file extension is neither xml or html either...
{
"exists": true,
"sizeof": 24442371,
"extension_is_a_real_thing": false,
"system_thinks_its_a": "text/html; charset=us-ascii"
}
Approach:
In order to parse an xml file... it should at the very least be xml, so we'll need to check and add a declarations tag if one doesn't exist
If I have namespaces.. thats bad because I can't use xpaths, which is what I want to do
If my file is huge, I should only operate on the smallest imaginable parts that I need to clean before I'm ready to parse it.
Function
def speed_read(file_path):
# We're gonna be low-brow and add our own using this string. It's fine
_xml_dec = '<?xml version="1.0" encoding="utf-8"?>'
# Even worse.. rgx for xml here we go
#
# We'll need to extract the very first node that we find in our document,
# because for our purposes thats the one we know has the namespace uri's
# ie: "attributes"
# FiRsT node : <actual_name xmlns:xsi="idontactuallydoanything.com">
# We're going to pluck out that first node, get the tags actual name
# which means from:
# <actual_name xmlns:xsi="idontactuallydoanything.com">...</actual_name>
# We pluck:
# actual_name
# Then we're gonna replace the entire tag with one we make from that name
# by simple string substitution
#
# -> 'starting from the beginning, capture everything between the < and the >'
_first_node = re.compile('^(\<.*?\>)', re.I|re.M|re.U)
# -> 'Starting from the beginning, but dont you get me the <, find anything that happens
# before the first white-space, which i don't want either man'
_first_tagname = re.compile('(?<=^\<)(.*?)\S+',re.I|re.M|re.U)
# open the file context
with open(file_path, "r", encoding = "utf-8") as f:
# go ahead and strip leading and trailing, cause why not... plus adds
# safety for our regex's
_raw = f.read().strip()
# Now, if the file somehow happens to magically have the xml declaration, we
# wanna go ahead and remove it as we plan to add our own. But for efficiency,
# only check the first couple of characters
if _raw.startswith('<?xml', 0, 5):
#_raw = re.sub(_xml_dec, '', _raw).strip()
_raw = re.sub('\<\?xml.*?\?>\n?', '', _raw).strip()
# Here we grab that first node that has those meaningless namespaces
root_element = _first_node.search(_raw).group()
# here we get its name
first_tag = _first_tagname.search(root_element).group()
# Here, we rubstitute the entire element, with a new one
# that only contains the elements name
_raw = re.sub(root_element, '<{}>'.format(first_tag), _raw)
# Now we add our declaration tag in the worst way you have ever
# seen, but I miss sprintf, so this is how i'm rolling. Python is terrible btw
_raw = "{}{}".format(_xml_dec, _raw)
# The bytes part here might end up being overkill.. but this has worked
# for me consistently so it stays.
return etree.parse(io.BytesIO(bytes(bytearray(_raw, encoding = "utf-8"))))
# a good answer from above:
def safe_read(file_path):
root = etree.parse(file_path)
for elem in root.getiterator():
elem.tag = etree.QName(elem).localname
# Remove unused namespace declarations
etree.cleanup_namespaces(root)
return root
Benchmarking - Yes I know there's better ways to do this.
import pandas as pd
safe_times = []
for i in range(0,5):
s = time.time()
safe_read(_biggest_file)
safe_times.append(time.time() - s)
fast_times = []
for i in range(0,5):
s = time.time()
speed_read(_biggest_file)
fast_times.append(time.time() - s)
pd.DataFrame({"safe":safe_times, "fast":fast_times})
Results
safe
fast
2.36
0.61
2.15
0.58
2.47
0.49
2.94
0.60
2.83
0.53
The accepted solution removes namespaces in node names and not in attributes, i.e. <b:spam c:name="cheese"/> will be transformed to <spam c:name="cheese"/>.
An updated version which will give you <spam name="cheese"/>
def remove_namespaces(root):
for elem in root.getiterator():
if not (
isinstance(elem, etree._Comment)
or isinstance(elem, etree._ProcessingInstruction)
):
localname = etree.QName(elem).localname
if elem.tag != localname:
elem.tag = etree.QName(elem).localname
for attr_name in elem.attrib:
local_attr_name = etree.QName(attr_name).localname
if attr_name != local_attr_name:
attr_value = elem.attrib[attr_name]
del elem.attrib[attr_name]
elem.attrib[local_attr_name] = attr_value
deannotate(root, cleanup_namespaces=True)
Here are two other ways of removing namespaces. The first uses the lxml.etree.QName helper while the second uses regexes. Both functions allow an optional list of namespaces to match against. If no namespace list is supplied then all namespaces are removed. Attribute keys are also cleaned.
from lxml import etree
import re
def remove_namespaces_qname(doc, namespaces=None):
for el in doc.getiterator():
# clean tag
q = etree.QName(el.tag)
if q is not None:
if namespaces is not None:
if q.namespace in namespaces:
el.tag = q.localname
else:
el.tag = q.localname
# clean attributes
for a, v in el.items():
q = etree.QName(a)
if q is not None:
if namespaces is not None:
if q.namespace in namespaces:
del el.attrib[a]
el.attrib[q.localname] = v
else:
del el.attrib[a]
el.attrib[q.localname] = v
return doc
def remove_namespace_re(doc, namespaces=None):
if namespaces is not None:
ns = list(map(lambda n: u'{%s}' % n, namespaces))
for el in doc.getiterator():
# clean tag
m = re.match(r'({.+})(.+)', el.tag)
if m is not None:
if namespaces is not None:
if m.group(1) in ns:
el.tag = m.group(2)
else:
el.tag = m.group(2)
# clean attributes
for a, v in el.items():
m = re.match(r'({.+})(.+)', a)
if m is not None:
if namespaces is not None:
if m.group(1) in ns:
del el.attrib[a]
el.attrib[m.group(2)] = v
else:
del el.attrib[a]
el.attrib[m.group(2)] = v
return doc
all you need to do is:
objectify.deannotate(root, cleanup_namespaces=True)
after you have get the root, by using root = tree.getroot()

Categories