How to get meta data with MMPython for images and video - python

I'm trying to get the creation date for all the photos and videos in a folder, and having mixed success. I have .jpg, .mov, and .mp4 videos in this folder.
I spent a long time looking at other posts, and I saw quite a few references to the MMPython library here: http://sourceforge.net/projects/mmpython/
Looking through the MMPython source I think this will give me what I need, but the problem is that I don't know how to invoke it. In other words, I have my file, but I don't know how to interface with MMPython and I can't see any examples
Here is my script:
import os
import sys
import exifread
import hashlib
import ExifTool
if len(sys.argv) > 1:
var = sys.argv[1]
else:
var = raw_input("Please enter the directory: ")
direct = '/Users/bbarr233/Documents/Personal/projects/photoOrg/photos'
print "direct: " + direct
print "var: " + var
var = var.rstrip()
for root, dirs, filenames in os.walk(var):
print "root " + root
for f in filenames:
#make sure that we are dealing with images or videos
if f.find(".jpg") > -1 or f.find(".jpeg") > -1 or f.find(".mov") > -1 or f.find(".mp4") > -1:
print "file " + root + "/" + f
f = open(root + "/" + f, 'rb')
#Now I want to do something like this, but don't know which method to call:
#tags = mmpython.process_file(f)
# do something with the creation date
Can someone hint me on on how I can use the MMPython library?
Thanks!!!
PS. I've looked at some other threads on this, such as:
Link to thread:This one didn't make sense to me
Link to thread: This one worked great for mov but not for my mp4s, it said the creation date was 1946
Link to thread: This thread is one of the ones that suggested MMPython, but like I said I don't know how to use it.

Here is a well commented code example I found which will show you how to use mmpython..
This module extracts metadata from new media files, using mmpython,
and provides utilities for converting metadata between formats.
# Copyright (C) 2005 Micah Dowty <micah#navi.cx>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import md5, os, cPickle
import mmpython
from mmpython.audio import mp3info
import sqlite
from RioKarma import Paths
class RidCalculator:
"""This object calculates the RID of a file- a sparse digest used by Rio Karma.
For files <= 64K, this is the file's md5sum. For larger files, this is the XOR
of three md5sums, from 64k blocks in the beginning, middle, and end.
"""
def fromSection(self, fileObj, start, end, blockSize=0x10000):
"""This needs a file-like object, as well as the offset and length of the portion
the RID is generated from. Beware that there is a special case for MP3 files.
"""
# It's a short file, compute only one digest
if end-start <= blockSize:
fileObj.seek(start)
return md5.md5(fileObj.read(end-start)).hexdigest()
# Three digests for longer files
fileObj.seek(start)
a = md5.md5(fileObj.read(blockSize)).digest()
fileObj.seek(end - blockSize)
b = md5.md5(fileObj.read(blockSize)).digest()
fileObj.seek((start + end - blockSize) / 2)
c = md5.md5(fileObj.read(blockSize)).digest()
# Combine the three digests
return ''.join(["%02x" % (ord(a[i]) ^ ord(b[i]) ^ ord(c[i])) for i in range(16)])
def fromFile(self, filename, length=None, mminfo=None):
"""Calculate the RID from a file, given its name. The file's length and
mmpython results may be provided if they're known, to avoid duplicating work.
"""
if mminfo is None:
mminfo = mmpython.parse(filename)
f = open(filename, "rb")
if length is None:
f.seek(0, 2)
length = f.tell()
f.seek(0)
# Is this an MP3 file? For some silliness we have to skip the header
# and the last 128 bytes of the file. mmpython can tell us where the
# header starts, but only in a somewhat ugly way.
if isinstance(mminfo, mmpython.audio.eyed3info.eyeD3Info):
try:
offset = mp3info.MPEG(f)._find_header(f)[0]
except ZeroDivisionError:
# This is a bit of a kludge, since mmpython seems to crash
# here on some MP3s for a currently-unknown reason.
print "WARNING, mmpython got a div0 error on %r" % filename
offset = 0
if offset < 0:
# Hmm, it couldn't find the header? Set this to zero
# so we still get a usable RID, but it probably
# won't strictly be a correct RID.
offset = 0
f.seek(0)
return self.fromSection(f, offset, length-128)
# Otherwise, use the whole file
else:
return self.fromSection(f, 0, length)
class BaseCache:
"""This is an abstract base class for objects that cache metadata
dictionaries on disk. The cache is implemented as a sqlite database,
with a 'dict' table holding administrative key-value data, and a
'files' table holding both a pickled representation of the metadata
and separate columns for all searchable keys.
"""
# This must be defined by subclasses as a small integer that changes
# when any part of the database schema or our storage format changes.
schemaVersion = None
# This is the template for our SQL schema. All searchable keys are
# filled in automatically, but other items may be added by subclasses.
schemaTemplate = """
CREATE TABLE dict
(
name VARCHAR(64) PRIMARY KEY,
value TEXT
);
CREATE TABLE files
(
%(keys)s,
_pickled TEXT NOT NULL
);
"""
# A list of searchable keys, used to build the schema and validate queries
searchableKeys = None
keyType = "VARCHAR(255)"
# The primary key is what ensures a file's uniqueness. Inserting a file
# with a primary key identical to an existing one will update that
# file rather than creating a new one.
primaryKey = None
def __init__(self, name):
self.name = name
self.connection = None
def open(self):
"""Open the cache, creating it if necessary"""
if self.connection is not None:
return
self.connection = sqlite.connect(Paths.getCache(self.name))
self.cursor = self.connection.cursor()
# See what version of the database we got. If it's empty
# or it's old, we need to reset it.
try:
version = self._dictGet('schemaVersion')
except sqlite.DatabaseError:
version = None
if version != str(self.schemaVersion):
self.empty()
def close(self):
if self.connection is not None:
self.sync()
self.connection.close()
self.connection = None
def _getSchema(self):
"""Create a complete schema from our schema template and searchableKeys"""
keys = []
for key in self.searchableKeys:
type = self.keyType
if key == self.primaryKey:
type += " PRIMARY KEY"
keys.append("%s %s" % (key, type))
return self.schemaTemplate % dict(keys=', '.join(keys))
def _encode(self, obj):
"""Encode an object that may not be a plain string"""
if type(obj) is unicode:
obj = obj.encode('utf-8')
elif type(obj) is not str:
obj = str(obj)
return "'%s'" % sqlite.encode(obj)
def _dictGet(self, key):
"""Return a value stored in the persistent dictionary. Returns None if
the key has no matching value.
"""
self.cursor.execute("SELECT value FROM dict WHERE name = '%s'" % key)
row = self.cursor.fetchone()
if row:
return sqlite.decode(row[0])
def _dictSet(self, key, value):
"""Create or update a value stored in the persistent dictionary"""
encodedValue = self._encode(value)
# First try inserting a new item
try:
self.cursor.execute("INSERT INTO dict (name, value) VALUES ('%s', %s)" %
(key, encodedValue))
except sqlite.IntegrityError:
# Violated the primary key constraint, update an existing item
self.cursor.execute("UPDATE dict SET value = %s WHERE name = '%s'" % (
encodedValue, key))
def sync(self):
"""Synchronize in-memory parts of the cache with disk"""
self.connection.commit()
def empty(self):
"""Reset the database to a default empty state"""
# Find and destroy every table in the database
self.cursor.execute("SELECT tbl_name FROM sqlite_master WHERE type='table'")
tables = [row.tbl_name for row in self.cursor.fetchall()]
for table in tables:
self.cursor.execute("DROP TABLE %s" % table)
# Apply the schema
self.cursor.execute(self._getSchema())
self._dictSet('schemaVersion', self.schemaVersion)
def _insertFile(self, d):
"""Insert a new file into the cache, given a dictionary of its metadata"""
# Make name/value lists for everything we want to update
dbItems = {'_pickled': self._encode(cPickle.dumps(d, -1))}
for column in self.searchableKeys:
if column in d:
dbItems[column] = self._encode(d[column])
# First try inserting a new row
try:
names = dbItems.keys()
self.cursor.execute("INSERT INTO files (%s) VALUES (%s)" %
(",".join(names), ",".join([dbItems[k] for k in names])))
except sqlite.IntegrityError:
# Violated the primary key constraint, update an existing item
self.cursor.execute("UPDATE files SET %s WHERE %s = %s" % (
", ".join(["%s = %s" % i for i in dbItems.iteritems()]),
self.primaryKey, self._encode(d[self.primaryKey])))
def _deleteFile(self, key):
"""Delete a File from the cache, given its primary key"""
self.cursor.execute("DELETE FROM files WHERE %s = %s" % (
self.primaryKey, self._encode(key)))
def _getFile(self, key):
"""Return a metadata dictionary given its primary key"""
self.cursor.execute("SELECT _pickled FROM files WHERE %s = %s" % (
self.primaryKey, self._encode(key)))
row = self.cursor.fetchone()
if row:
return cPickle.loads(sqlite.decode(row[0]))
def _findFiles(self, **kw):
"""Search for files. The provided keywords must be searchable.
Yields a list of details dictionaries, one for each match.
Any keyword can be None (matches anything) or it can be a
string to match. Keywords that aren't provided are assumed
to be None.
"""
constraints = []
for key, value in kw.iteritems():
if key not in self.searchableKeys:
raise ValueError("Key name %r is not searchable" % key)
constraints.append("%s = %s" % (key, self._encode(value)))
if not constraints:
constraints.append("1")
self.cursor.execute("SELECT _pickled FROM files WHERE %s" %
" AND ".join(constraints))
row = None
while 1:
row = self.cursor.fetchone()
if not row:
break
yield cPickle.loads(sqlite.decode(row[0]))
def countFiles(self):
"""Return the number of files cached"""
self.cursor.execute("SELECT COUNT(_pickled) FROM files")
return int(self.cursor.fetchone()[0])
def updateStamp(self, stamp):
"""The stamp for this cache is any arbitrary value that is expected to
change when the actual data on the device changes. It is used to
check the cache's validity. This function update's the stamp from
a value that is known to match the cache's current contents.
"""
self._dictSet('stamp', stamp)
def checkStamp(self, stamp):
"""Check whether a provided stamp matches the cache's stored stamp.
This should be used when you have a stamp that matches the actual
data on the device, and you want to see if the cache is still valid.
"""
return self._dictGet('stamp') == str(stamp)
class LocalCache(BaseCache):
"""This is a searchable metadata cache for files on the local disk.
It can be used to speed up repeated metadata lookups for local files,
but more interestingly it can be used to provide full metadata searching
on local music files.
"""
schemaVersion = 1
searchableKeys = ('type', 'rid', 'title', 'artist', 'source', 'filename')
primaryKey = 'filename'
def lookup(self, filename):
"""Return a details dictionary for the given filename, using the cache if possible"""
filename = os.path.realpath(filename)
# Use the mtime as a stamp to see if our cache is still valid
mtime = os.stat(filename).st_mtime
cached = self._getFile(filename)
if cached and int(cached.get('mtime')) == int(mtime):
# Yay, still valid
return cached['details']
# Nope, generate a new dict and cache it
details = {}
Converter().detailsFromDisk(filename, details)
generated = dict(
type = details.get('type'),
rid = details.get('rid'),
title = details.get('title'),
artist = details.get('artist'),
source = details.get('source'),
mtime = mtime,
filename = filename,
details = details,
)
self._insertFile(generated)
return details
def findFiles(self, **kw):
"""Search for files that match all given search keys. This returns an iterator
over filenames, skipping any files that aren't currently valid in the cache.
"""
for cached in self._findFiles(**kw):
try:
mtime = os.stat(cached['filename']).st_mtime
except OSError:
pass
else:
if cached.get('mtime') == mtime:
yield cached['filename']
def scan(self, path):
"""Recursively scan all files within the specified path, creating
or updating their cache entries.
"""
for root, dirs, files in os.walk(path):
for name in files:
filename = os.path.join(root, name)
self.lookup(filename)
# checkpoint this after every directory
self.sync()
_defaultLocalCache = None
def getLocalCache(create=True):
"""Get the default instance of LocalCache"""
global _defaultLocalCache
if (not _defaultLocalCache) and create:
_defaultLocalCache = LocalCache("local")
_defaultLocalCache.open()
return _defaultLocalCache
class Converter:
"""This object manages the connection between different kinds of
metadata- the data stored within a file on disk, mmpython attributes,
Rio attributes, and file extensions.
"""
# Maps mmpython classes to codec names for all formats the player
# hardware supports.
codecNames = {
mmpython.audio.eyed3info.eyeD3Info: 'mp3',
mmpython.audio.mp3info.MP3Info: 'mp3',
mmpython.audio.flacinfo.FlacInfo: 'flac',
mmpython.audio.pcminfo.PCMInfo: 'wave',
mmpython.video.asfinfo.AsfInfo: 'wma',
mmpython.audio.ogginfo.OggInfo: 'vorbis',
}
# Maps codec names to extensions. Identity mappings are the
# default, so they are omitted.
codecExtensions = {
'wave': 'wav',
'vorbis': 'ogg',
}
def filenameFromDetails(self, details,
unicodeEncoding = 'utf-8'):
"""Determine a good filename to use for a file with the given metadata
in the Rio 'details' format. If it's a data file, this will use the
original file as stored in 'title'.
Otherwise, it uses Navi's naming convention: Artist_Name/album_name/##_track_name.extension
"""
if details.get('type') == 'taxi':
return details['title']
# Start with just the artist...
name = details.get('artist', 'None').replace(os.sep, "").replace(" ", "_") + os.sep
album = details.get('source')
if album:
name += album.replace(os.sep, "").replace(" ", "_").lower() + os.sep
track = details.get('tracknr')
if track:
name += "%02d_" % track
name += details.get('title', 'None').replace(os.sep, "").replace(" ", "_").lower()
codec = details.get('codec')
extension = self.codecExtensions.get(codec, codec)
if extension:
name += '.' + extension
return unicode(name).encode(unicodeEncoding, 'replace')
def detailsFromDisk(self, filename, details):
"""Automagically load media metadata out of the provided filename,
adding entries to details. This works on any file type
mmpython recognizes, and other files should be tagged
appropriately for Rio Taxi.
"""
info = mmpython.parse(filename)
st = os.stat(filename)
# Generic details for any file. Note that we start out assuming
# all files are unreadable, and label everything for Rio Taxi.
# Later we'll mark supported formats as music.
details['length'] = st.st_size
details['type'] = 'taxi'
details['rid'] = RidCalculator().fromFile(filename, st.st_size, info)
# We get the bulk of our metadata via mmpython if possible
if info:
self.detailsFromMM(info, details)
if details['type'] == 'taxi':
# All taxi files get their filename as their title, regardless of what mmpython said
details['title'] = os.path.basename(filename)
# Taxi files also always get a codec of 'taxi'
details['codec'] = 'taxi'
# Music files that still don't get a title get their filename minus the extension
if not details.get('title'):
details['title'] = os.path.splitext(os.path.basename(filename))[0]
def detailsFromMM(self, info, details):
"""Update Rio-style 'details' metadata from MMPython info"""
# Mime types aren't implemented consistently in mmpython, but
# we can look at the type of the returned object to decide
# whether this is a format that the Rio probably supports.
# This dictionary maps mmpython clases to Rio codec names.
for cls, codec in self.codecNames.iteritems():
if isinstance(info, cls):
details['type'] = 'tune'
details['codec'] = codec
break
# Map simple keys that don't require and hackery
for fromKey, toKey in (
('artist', 'artist'),
('title', 'title'),
('album', 'source'),
('date', 'year'),
('samplerate', 'samplerate'),
):
v = info[fromKey]
if v is not None:
details[toKey] = v
# The rio uses a two-letter prefix on bit rates- the first letter
# is 'f' or 'v', presumably for fixed or variable. The second is
# 'm' for mono or 's' for stereo. There doesn't seem to be a good
# way to get VBR info out of mmpython, so currently this always
# reports a fixed bit rate. We also have to kludge a bit because
# some metdata sources give us bits/second while some give us
# kilobits/second. And of course, there are multiple ways of
# reporting stereo...
kbps = info['bitrate']
if type(kbps) in (int, float) and kbps > 0:
stereo = bool( (info['channels'] and info['channels'] >= 2) or
(info['mode'] and info['mode'].find('stereo') >= 0) )
if kbps > 8000:
kbps = kbps // 1000
details['bitrate'] = ('fm', 'fs')[stereo] + str(kbps)
# If mmpython gives us a length it seems to always be in seconds,
# whereas the Rio expects milliseconds.
length = info['length']
if length:
details['duration'] = int(length * 1000)
# mmpython often gives track numbers as a fraction- current/total.
# The Rio only wants the current track, and we might as well also
# strip off leading zeros and such.
trackNo = info['trackno']
if trackNo:
details['tracknr'] = int(trackNo.split("/", 1)[0])
Reference: http://svn.navi.cx/misc/trunk/rio-karma/python/RioKarma/Metadata.py
Further:
Including Python modules

You should look at the os.stat functions
https://docs.python.org/2/library/os.html
os.stat returns file creation and modified times ctime, mtime
It should be something like this:
Import os
st= os.stat(full_file_path)
file_ctime= st.st_ctime
print(file_ctime)

Related

is this a lambda in python? [duplicate]

This question already has answers here:
What are type hints in Python 3.5?
(5 answers)
What does -> mean in Python function definitions?
(11 answers)
Closed 2 years ago.
i am using python 3.7 and i have just started my own opensource project. Some time ago a very skilled software developer decided to help, then he didn't have enough time to continue. So i am taking his work back to develop new features for the project. Now he has designed a script to manage the reading of text from pdf and doc files. He has developed it very well but there is something i don't understand:
#classmethod
def extract_document_data(cls, file_path : str) -> DocumentData:
"""
Entry point of the module, it extracts the data from the document
whose path is passed as input.
The extraction strategy is automatically chosen based on the MIME type
of the file.
#type file_path: str
#param file_path: The path of the document to be parsed.
#rtype: DocumentData
#returns: An object containing the data of the parsed document.
"""
mime = magic.Magic(mime=True)
mime_type = mime.from_file(file_path)
document_type = DocumentType.get_instance(mime_type)
strategy = cls.strategies[document_type]
return strategy.extract_document_data(file_path)
this: -> DocumentData is very obscure for me, as if it was a lamdba it shouls be included in the methods arguments as a callback doesn't it? which meaning does it have in this position?
I can paste even the whole classe if you need a more verbose insight:
from enum import Enum
import json
import magic
import docx
from pdfminer.converter import PDFPageAggregator
from pdfminer.layout import LAParams, LTContainer, LTTextContainer
from pdfminer.pdfdocument import PDFDocument, PDFNoOutlines
from pdfminer.pdfinterp import PDFPageInterpreter
from pdfminer.pdfinterp import PDFResourceManager
from pdfminer.pdfpage import PDFPage
from pdfminer.pdfparser import PDFParser
class DocumentType(Enum):
"""
Defines the handled document types.
Each value is associated to a MIME type.
"""
def __init__(self, mime_type):
self.mime_type = mime_type
#classmethod
def get_instance(cls, mime_type : str):
values = [e for e in cls]
for value in values:
if value.mime_type == mime_type:
return value
raise MimeNotValidError(mime_type)
PDF = 'application/pdf'
DOCX = 'application/vnd.openxmlformats-officedocument.wordprocessingml.document'
class MimeNotValidError(Exception):
"""
Exception to be raised when a not valid MIME type is processed.
"""
pass
class DocumentData:
"""
Wrapper for the extracted document data (TOC and contents).
"""
def __init__(self, toc : list = [], pages : list = [], document_text : str = None):
self.toc = toc
self.pages = pages
if document_text is not None:
self.document_text = document_text
else:
self.document_text = ' '.join([page.replace('\n', ' ') for page in pages])
def toc_as_json(self) -> str:
return json.dumps(self.toc)
class ExtractionStrategy:
"""
Base class for the extraction strategies.
"""
#staticmethod
def extract_document_data(file_path : str) -> DocumentData:
pass
class DOCXExtractionStrategy(ExtractionStrategy):
"""
It implements the TOC and contents extraction from a DOCX document.
"""
#staticmethod
def extract_document_data(file_path : str) -> DocumentData:
document = docx.Document(file_path)
body_elements = document._body._body
# Selecting only the <w:t> elements from DOCX XML,
# as they're the only to contain some text.
text_elems = body_elements.xpath('.//w:t')
return DocumentData(document_text = ' '.join([elem.text for elem in text_elems]))
class PDFExtractionStrategy(ExtractionStrategy):
"""
It implements the TOC and contents extraction from a PDF document.
"""
#staticmethod
def parse_toc(doc : PDFDocument) -> list:
raw_toc = []
try:
outlines = doc.get_outlines()
for (level, title, dest, a, se) in outlines:
raw_toc.append((level, title))
except PDFNoOutlines:
pass
return PDFExtractionStrategy.build_toc_tree(raw_toc)
#staticmethod
def build_toc_tree(items : list) -> list:
"""
Builds the TOC tree from a list of TOC items.
#type items: list
#param items: The TOC items.
Each item must have the following format: (<item depth>, <item description>).
E.g: [(1, 'Contents'), (2, 'Chapter 1'), (2, 'Chapter 2')]
#rtype: list
#returns: The TOC tree. The tree hasn't a root element, therefore it
actually is a list.
"""
toc = []
if items is None or len(items) == 0:
return toc
current_toc_level = toc
# Using an explicit stack containing the lists corresponding to
# the various levels of the TOC, to simulate the recursive building
# of the TOC tree in a more efficient way
toc_levels_stack = []
toc_levels_stack.append(current_toc_level)
# Each TOC item can be inserted into the current TOC level as
# string (just the item description) or as dict, where the key is
# the item description and the value is a list containing the
# children TOC items.
# To correctly determine how to insert the current item into
# the current level, a kind of look-ahead is needed, that is
# the depth of the next item has to be considered.
# Initializing the variables related to the previous item.
prev_item_depth, prev_item_desc = items[0]
# Adding a fake final item in order to handle all the TOC items
# inside the cycle.
items.append((-1, ''))
for i in range(1, len(items)):
# In fact each iteration handles the item of the previous
# one, using the current item to determine how to insert
# the previous item into the current TOC level,
# as explained before.
curr_item = items[i]
curr_item_depth = curr_item[0]
if curr_item_depth == prev_item_depth:
# The depth of the current item is the same
# as the previous one.
# Inserting the previous item into the current TOC level
# as string.
current_toc_level.append(prev_item_desc)
elif curr_item_depth == prev_item_depth + 1:
# The depth of the current item is increased by 1 compared to
# the previous one.
# Inserting the previous item into the current TOC level
# as dict.
prev_item_dict = { prev_item_desc : [] }
current_toc_level.append(prev_item_dict)
# Updating the current TOC level with the newly created one
# which contains the children of the previous item.
current_toc_level = prev_item_dict[prev_item_desc]
toc_levels_stack.append(current_toc_level)
elif curr_item_depth < prev_item_depth:
# The depth of the current item is lesser than
# the previous one.
# Inserting the previous item into the current TOC level
# as string.
current_toc_level.append(prev_item_desc)
if i < len(items)-1:
# Executing these steps for all the items except the last one
depth_diff = prev_item_depth - curr_item_depth
# Removing from the stack as many TOC levels as the difference
# between the depth of the previous item and the depth of the
# current one.
for i in range(0, depth_diff):
toc_levels_stack.pop()
# Updating the current TOC level with the one contained in
# the head of the stack.
current_toc_level = toc_levels_stack[-1]
# Updating the previous item with the current one
prev_item_depth, prev_item_desc = curr_item
return toc
#staticmethod
def from_bytestring(s) -> str:
"""
If the input string is a byte-string, converts it to a string using
UTF-8 as encoding.
#param s: A string or a byte-string.
#rtype: str
#returns: The potentially converted string.
"""
if s:
if isinstance(s, str):
return s
else:
return s.encode('utf-8')
#staticmethod
def parse_layout_nodes(container : LTContainer) -> str:
"""
Recursively extracts the text from all the nodes contained in the
input PDF layout tree/sub-tree.
#type container: LTContainer
#param container: The PDF layout tree/sub-tree from which to extract the text.
#rtype: str
#returns: A string containing the extracted text.
"""
text_content = []
# The iterator returns the children nodes.
for node in container:
if isinstance(node, LTTextContainer):
# Only nodes of type LTTextContainer contain text.
text_content.append(PDFExtractionStrategy.from_bytestring(node.get_text()))
elif isinstance(node, LTContainer):
# Recursively calling the method on the current node, which is a container itself.
text_content.append(PDFExtractionStrategy.parse_layout_nodes(node))
else:
# Ignoring all the other node types.
pass
# Joining all the extracted text chunks with a new line character.
return "\n".join(text_content)
#staticmethod
def parse_pages(doc : PDFDocument) -> list:
rsrcmgr = PDFResourceManager()
laparams = LAParams()
device = PDFPageAggregator(rsrcmgr, laparams=laparams)
interpreter = PDFPageInterpreter(rsrcmgr, device)
text_content = []
for i, page in enumerate(PDFPage.create_pages(doc)):
interpreter.process_page(page)
layout = device.get_result()
# Extracts the text from all the nodes of the PDF layout tree of each page
text_content.append(PDFExtractionStrategy.parse_layout_nodes(layout))
return text_content
#staticmethod
def parse_pdf(file_path : str) -> (list, list):
toc = []
pages = []
try:
fp = open(file_path, 'rb')
parser = PDFParser(fp)
doc = PDFDocument(parser)
parser.set_document(doc)
if doc.is_extractable:
toc = PDFExtractionStrategy.parse_toc(doc)
pages = PDFExtractionStrategy.parse_pages(doc)
fp.close()
except IOError:
pass
return (toc, pages)
#staticmethod
def extract_document_data(file_path : str) -> DocumentData:
toc, pages = PDFExtractionStrategy.parse_pdf(file_path)
return DocumentData(toc, pages = pages)
class DocumentDataExtractor:
"""
Main class of the module.
It's responsible for actually executing the text extraction.
The output is constituted by the following items:
-table of contents (TOC);
-pages contents.
"""
# Dictionary containing the extraction strategies for the different
# document types, indexed by the corresponding DocumentType enum values.
strategies = {
DocumentType.DOCX : DOCXExtractionStrategy(),
DocumentType.PDF : PDFExtractionStrategy()
}
#classmethod
def extract_document_data(cls, file_path : str) -> DocumentData:
"""
Entry point of the module, it extracts the data from the document
whose path is passed as input.
The extraction strategy is automatically chosen based on the MIME type
of the file.
#type file_path: str
#param file_path: The path of the document to be parsed.
#rtype: DocumentData
#returns: An object containing the data of the parsed document.
"""
mime = magic.Magic(mime=True)
mime_type = mime.from_file(file_path)
document_type = DocumentType.get_instance(mime_type)
strategy = cls.strategies[document_type]
return strategy.extract_document_data(file_path)

Clone Kubernetes objects programmatically using the Python API

The Python API is available to read objects from a cluster. By cloning we can say:
Get a copy of an existing Kubernetes object using kubectl get
Change the properties of the object
Apply the new object
Until recently, the option to --export api was deprecated in 1.14. How can we use the Python Kubernetes API to do the steps from 1-3 described above?
There are multiple questions about how to extract the code from Python API to YAML, but it's unclear how to transform the Kubernetes API object.
Just use to_dict() which is now offered by Kubernetes Client objects. Note that it creates a partly deep copy. So to be safe:
copied_obj = copy.deepcopy(obj.to_dict())
Dicts can be passed to create* and patch* methods.
For convenience, you can also wrap the dict in Prodict.
copied_obj = Prodict.from_dict(copy.deepcopy(obj.to_dict()))
The final issue is getting rid of superfluous fields. (Unfortunately, Kubernetes sprinkles them throughout the object.) I use kopf's internal facility for getting the "essence" of an object. (It takes care of the deep copy.)
copied_obj = kopf.AnnotationsDiffBaseStorage().build(body=kopf.Body(obj.to_dict()))
copied_obj = Prodic.from_dict(copied_obj)
After looking at the requirement, I spent a couple of hours researching the Kubernetes Python API. Issue 340 and others ask about how to transform the Kubernetes API object into a dict, but the only workaround I found was to retrieve the raw data and then convert to JSON.
The following code uses the Kubernetes API to get a deployment and its related hpa from the namespaced objects, but retrieving their raw values as JSON.
Then, after transforming the data into a dict, you can alternatively clean up the data by removing null references.
Once you are done, you can transform the dict as YAML payload to then save the YAML to the file system
Finally, you can apply either using kubectl or the Kubernetes Python API.
Note:
Make sure to set KUBECONFIG=config so that you can point to a cluster
Make sure to adjust the values of origin_obj_name = "istio-ingressgateway" and origin_obj_namespace = "istio-system" with the name of the corresponding objects to be cloned in the given namespace.
import os
import logging
import yaml
import json
logging.basicConfig(level = logging.INFO)
import crayons
from kubernetes import client, config
from kubernetes.client.rest import ApiException
LOGGER = logging.getLogger(" IngressGatewayCreator ")
class IngressGatewayCreator:
#staticmethod
def clone_default_ingress(clone_context):
# Clone the deployment
IngressGatewayCreator.clone_deployment_object(clone_context)
# Clone the deployment's HPA
IngressGatewayCreator.clone_hpa_object(clone_context)
#staticmethod
def clone_deployment_object(clone_context):
kubeconfig = os.getenv('KUBECONFIG')
config.load_kube_config(kubeconfig)
v1apps = client.AppsV1beta1Api()
deployment_name = clone_context.origin_obj_name
namespace = clone_context.origin_obj_namespace
try:
# gets an instance of the api without deserialization to model
# https://github.com/kubernetes-client/python/issues/574#issuecomment-405400414
deployment = v1apps.read_namespaced_deployment(deployment_name, namespace, _preload_content=False)
except ApiException as error:
if error.status == 404:
LOGGER.info("Deployment %s not found in namespace %s", deployment_name, namespace)
return
raise
# Clone the object deployment as a dic
cloned_dict = IngressGatewayCreator.clone_k8s_object(deployment, clone_context)
# Change additional objects
cloned_dict["spec"]["selector"]["matchLabels"]["istio"] = clone_context.name
cloned_dict["spec"]["template"]["metadata"]["labels"]["istio"] = clone_context.name
# Save the deployment template in the output dir
context.save_clone_as_yaml(cloned_dict, "deployment")
#staticmethod
def clone_hpa_object(clone_context):
kubeconfig = os.getenv('KUBECONFIG')
config.load_kube_config(kubeconfig)
hpas = client.AutoscalingV1Api()
hpa_name = clone_context.origin_obj_name
namespace = clone_context.origin_obj_namespace
try:
# gets an instance of the api without deserialization to model
# https://github.com/kubernetes-client/python/issues/574#issuecomment-405400414
hpa = hpas.read_namespaced_horizontal_pod_autoscaler(hpa_name, namespace, _preload_content=False)
except ApiException as error:
if error.status == 404:
LOGGER.info("HPA %s not found in namespace %s", hpa_name, namespace)
return
raise
# Clone the object deployment as a dic
cloned_dict = IngressGatewayCreator.clone_k8s_object(hpa, clone_context)
# Change additional objects
cloned_dict["spec"]["scaleTargetRef"]["name"] = clone_context.name
# Save the deployment template in the output dir
context.save_clone_as_yaml(cloned_dict, "hpa")
#staticmethod
def clone_k8s_object(k8s_object, clone_context):
# Manipilate in the dict level, not k8s api, but from the fetched raw object
# https://github.com/kubernetes-client/python/issues/574#issuecomment-405400414
cloned_obj = json.loads(k8s_object.data)
labels = cloned_obj['metadata']['labels']
labels['istio'] = clone_context.name
cloned_obj['status'] = None
# Scrub by removing the "null" and "None" values
cloned_obj = IngressGatewayCreator.scrub_dict(cloned_obj)
# Patch the metadata with the name and labels adjusted
cloned_obj['metadata'] = {
"name": clone_context.name,
"namespace": clone_context.origin_obj_namespace,
"labels": labels
}
return cloned_obj
# https://stackoverflow.com/questions/12118695/efficient-way-to-remove-keys-with-empty-strings-from-a-dict/59959570#59959570
#staticmethod
def scrub_dict(d):
new_dict = {}
for k, v in d.items():
if isinstance(v, dict):
v = IngressGatewayCreator.scrub_dict(v)
if isinstance(v, list):
v = IngressGatewayCreator.scrub_list(v)
if not v in (u'', None, {}):
new_dict[k] = v
return new_dict
# https://stackoverflow.com/questions/12118695/efficient-way-to-remove-keys-with-empty-strings-from-a-dict/59959570#59959570
#staticmethod
def scrub_list(d):
scrubbed_list = []
for i in d:
if isinstance(i, dict):
i = IngressGatewayCreator.scrub_dict(i)
scrubbed_list.append(i)
return scrubbed_list
class IngressGatewayContext:
def __init__(self, manifest_dir, name, hostname, nats, type):
self.manifest_dir = manifest_dir
self.name = name
self.hostname = hostname
self.nats = nats
self.ingress_type = type
self.origin_obj_name = "istio-ingressgateway"
self.origin_obj_namespace = "istio-system"
def save_clone_as_yaml(self, k8s_object, kind):
try:
# Just try to create if it doesn't exist
os.makedirs(self.manifest_dir)
except FileExistsError:
LOGGER.debug("Dir already exists %s", self.manifest_dir)
full_file_path = os.path.join(self.manifest_dir, self.name + '-' + kind + '.yaml')
# Store in the file-system with the name provided
# https://stackoverflow.com/questions/12470665/how-can-i-write-data-in-yaml-format-in-a-file/18210750#18210750
with open(full_file_path, 'w') as yaml_file:
yaml.dump(k8s_object, yaml_file, default_flow_style=False)
LOGGER.info(crayons.yellow("Saved %s '%s' at %s: \n%s"), kind, self.name, full_file_path, k8s_object)
try:
k8s_clone_name = "http2-ingressgateway"
hostname = "my-nlb-awesome.a.company.com"
nats = ["123.345.678.11", "333.444.222.111", "33.221.444.23"]
manifest_dir = "out/clones"
context = IngressGatewayContext(manifest_dir, k8s_clone_name, hostname, nats, "nlb")
IngressGatewayCreator.clone_default_ingress(context)
except Exception as err:
print("ERROR: {}".format(err))
Not python, but I've used jq in the past to quickly clone something with the small customisations required for each use case (usually cloning secrets into a new namespace).
kc get pod whatever-85pmk -o json \
| jq 'del(.status, .metadata ) | .metadata.name="newname"' \
| kc apply -f - -o yaml --dry-run
This is really easy to do with Hikaru.
Here is an example from my own open source project:
def duplicate_without_fields(obj: HikaruBase, omitted_fields: List[str]):
"""
Duplicate a hikaru object, omitting the specified fields
This is useful when you want to compare two versions of an object and first "cleanup" fields that shouldn't be
compared.
:param HikaruBase obj: A kubernetes object
:param List[str] omitted_fields: List of fields to be omitted. Field name format should be '.' separated
For example: ["status", "metadata.generation"]
"""
if obj is None:
return None
duplication = obj.dup()
for field_name in omitted_fields:
field_parts = field_name.split(".")
try:
if len(field_parts) > 1:
parent_obj = duplication.object_at_path(field_parts[:-1])
else:
parent_obj = duplication
setattr(parent_obj, field_parts[-1], None)
except Exception:
pass # in case the field doesn't exist on this object
return duplication
Dumping the object to yaml afterwards or re-applying it to the cluster is trivial with Hikaru
We're using this to clean up objects so that can show users a github-style diff when objects change, without spammy fields that change often like generation

pyral deleteAttachment to delete attachment from a Test Case definition not working

I'm trying to delete the attachments from a Test Case definition on Rally using pyral:
del_attachment = rally.deleteAttachment('TestCase',filename)
any suggestions, what is going wrong ?
If you look at the code of pyral, you get the following signature:
def deleteAttachment(self, artifact, filename):
"""
Still unclear for WSAPI v2.0 if Attachment items can be deleted.
Apparently AttachmentContent items can be deleted.
"""
art_type, artifact = self._realizeArtifact(artifact)
if not art_type:
return False
current_attachments = [att for att in artifact.Attachments]
hits = [att for att in current_attachments if att.Name == filename]
if not hits:
return False
...
So the first argument is an artifact (i.e. the test case object), not a string.
The could should be like this:
import logging
logging.basicConfig(format="%(levelname)s:%(module)s:%(lineno)d:%(msg)s")
try:
# Get number of existing steps
testcase = rally.get("TestCase", query="FormattedID = %s" % tcid, instance=True)
has_been_deleted = rally.deleteAttachment(testcase, filename)
if not has_been_deleted:
msg = "Attachment '{0}' of Test Case {1} not deleted successfully"
logging.warning(msg.format(filename, testcase.FormattedID))
except RallyRESTAPIError as e:
logging.error("Error while deleting attachment '{0}': {1}".format(filename, e))
passing a string of the FormattedID of the artifact should work because pyral tries to identify the type of artifact and retrieve it for you in the call below..
art_type, artifact = self._realizeArtifact(artifact)
have look at the code for _realizeArtifact...
def _realizeArtifact(self, artifact):
"""
Helper method to identify the artifact type and to retrieve it if the
artifact value is a FormattedID. If the artifact is already an instance
of a Rally entity, then all that needs to be done is deduce the art_type
from the class name. If the artifact argument given is neither of those
two conditions, return back a 2 tuple of (False, None).
Once you have a Rally instance of the artifact, return back a
2 tuple of (art_type, artifact)
"""
art_type = False
if 'pyral.entity.' in str(type(artifact)):
# we've got the artifact already...
art_type = artifact.__class__.__name__
elif self.FORMATTED_ID_PATTERN.match(artifact):
# artifact is a potential FormattedID value
prefix = artifact[:2]
if prefix[1] in string.digits:
prefix = prefix[0]
art_type = self.ARTIFACT_TYPE[prefix]
response = self.get(art_type, fetch=True, query='FormattedID = %s' % artifact)
if response.resultCount == 1:
artifact = response.next()
else:
art_type = False
else: # the supplied artifact isn't anything we can deal with here...
pass
return art_type, artifact

Using Python Classes and Lists to print reports from a csv

I have a homework assignment that I have been stuck on for several days.
Basic problem description:
Incident class has properties: ID, time, type, location, narrative and status
methods: init, brief, isMorning, resolve
script takes one argument, the full path of crime report csv.
First few lines of CSV:
ID Time Type Location Narrative
1271 11:54 AM Drug Violation Wolf Ridge Report of possible drug violation. Student was referred to the university.
My code so far:
import sys
class Incident:
def __init__(self, ID, time, type, location, narrative, status):
self.ID = id
self.time = time
self.type = type
self.location = location
self.narrative = narrative
self.status = status
def brief(self):
print '''{0}: {1}, {2}
{3}
'''.format(self.ID, self.type, self.status, self.narrative)
def isMorning(self):
if 'AM' in self.time:
return True
else:
return False
def resolve(self):
if self.status == 'Pending':
self.status = 'Resolved'
try:
dataset = sys.argv[1] except IndexError:
print 'Usage: Requires full path input file name.'
sys.exit()
# Create an empty list to contain the Incident objects. crimeList = []
# Read the crime report. with open(dataset, 'r') as f:
# Read the header.
headers = f.readline().split(',')
# Read each record and parse the attributes.
for line in f:
lineList = line.strip().split(',')
reportNumber = lineList[0]
timeReported = lineList[1]
incidentType = lineList[2]
location = lineList[3]
narrative = lineList[4]
status = lineList[5].strip()
### Create initialize an Incident object instance and store it in a variable
crime = Incident(reportNumber, timeReported, incidentType, location, narrative, status)
### Append the new Incident object to the crimeList.
crimeList.append(crime)
What i'm stuck on:
I need to access the "nth" Incident in the crimeList and run various methods. I can't seem to find a way to access the item and have it functional to run methods on.
I've tried enumerating and splicing but just can't get anything to work?
Anyone have any suggestions?
Look up the nth crime from your crimeList like so: x=crimeList[n], and then call the methods on that instance: x.brief(), x.resolve(), etc.

Parsing a file with multiple xmls in it

Is there a way to parse a file which contains multiple xmls in it?
eg., if I have a file called stocks.xml and within the stocks.xml i have more than one xml content, is there any way to parse this xml file ?.
-- stocks.xml
<?xml version="1.0" encoding="ASCII"?><PRODUCT><ID>A001</ID>..</PRODUCT><SHOP-1><QUANTITY>nn</QUANITY><SHOP-1><QUANTITY>nn</QUANITY>
<?xml version="1.0" encoding="ASCII"?><PRODUCT><ID>A002</ID>..</PRODUCT><SHOP-1><QUANTITY>nn</QUANITY><SHOP-1><QUANTITY>nn</QUANITY>
If you can assume that each xml document begins with <?xml version="1.0" ..., simply read the file line-by-line looking for a lines that match that pattern (or, read all the data and then do a search through the data).
Once you find a line, keep it, and append subsequent lines until the next xml document is found or you hit EOF. lather, rinse, repeat.
You now have one xml document in a string. You can then parse the string using the normal XML parsing tools, or you write it to a file.
This will work fine in most cases, but of course it could fall down if one of your embedded xml documents contains data that exactly matches the same pattern as the beginning of a document. Most likely you don't have to worry about that, and if you do there are ways to avoid that with a little more cleverness.
The right solution really depends on your needs. If you're creating a general purpose must-work-at-all-times solution this might not be right for you. For real world, special purpose problems it's probably more than Good Enough, and often Good Enough is indeed Good Enough.
You should see this python program by Michiel de Hoon
And if you want to parse multiple files, then a rule to detect that we are in other xml must be developed, for example,at first you read <stocks> .... and at the end you must reead </stocks> when you find that then if there is something else,well, continue reading and do the same parser until reach eof.
# Copyright 2008 by Michiel de Hoon. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Parser for XML results returned by NCBI's Entrez Utilities. This
parser is used by the read() function in Bio.Entrez, and is not intended
be used directly.
"""
# The question is how to represent an XML file as Python objects. Some
# XML files returned by NCBI look like lists, others look like dictionaries,
# and others look like a mix of lists and dictionaries.
#
# My approach is to classify each possible element in the XML as a plain
# string, an integer, a list, a dictionary, or a structure. The latter is a
# dictionary where the same key can occur multiple times; in Python, it is
# represented as a dictionary where that key occurs once, pointing to a list
# of values found in the XML file.
#
# The parser then goes through the XML and creates the appropriate Python
# object for each element. The different levels encountered in the XML are
# preserved on the Python side. So a subelement of a subelement of an element
# is a value in a dictionary that is stored in a list which is a value in
# some other dictionary (or a value in a list which itself belongs to a list
# which is a value in a dictionary, and so on). Attributes encountered in
# the XML are stored as a dictionary in a member .attributes of each element,
# and the tag name is saved in a member .tag.
#
# To decide which kind of Python object corresponds to each element in the
# XML, the parser analyzes the DTD referred at the top of (almost) every
# XML file returned by the Entrez Utilities. This is preferred over a hand-
# written solution, since the number of DTDs is rather large and their
# contents may change over time. About half the code in this parser deals
# wih parsing the DTD, and the other half with the XML itself.
import os.path
import urlparse
import urllib
import warnings
from xml.parsers import expat
# The following four classes are used to add a member .attributes to integers,
# strings, lists, and dictionaries, respectively.
class IntegerElement(int):
def __repr__(self):
text = int.__repr__(self)
try:
attributes = self.attributes
except AttributeError:
return text
return "IntegerElement(%s, attributes=%s)" % (text, repr(attributes))
class StringElement(str):
def __repr__(self):
text = str.__repr__(self)
try:
attributes = self.attributes
except AttributeError:
return text
return "StringElement(%s, attributes=%s)" % (text, repr(attributes))
class UnicodeElement(unicode):
def __repr__(self):
text = unicode.__repr__(self)
try:
attributes = self.attributes
except AttributeError:
return text
return "UnicodeElement(%s, attributes=%s)" % (text, repr(attributes))
class ListElement(list):
def __repr__(self):
text = list.__repr__(self)
try:
attributes = self.attributes
except AttributeError:
return text
return "ListElement(%s, attributes=%s)" % (text, repr(attributes))
class DictionaryElement(dict):
def __repr__(self):
text = dict.__repr__(self)
try:
attributes = self.attributes
except AttributeError:
return text
return "DictElement(%s, attributes=%s)" % (text, repr(attributes))
# A StructureElement is like a dictionary, but some of its keys can have
# multiple values associated with it. These values are stored in a list
# under each key.
class StructureElement(dict):
def __init__(self, keys):
dict.__init__(self)
for key in keys:
dict.__setitem__(self, key, [])
self.listkeys = keys
def __setitem__(self, key, value):
if key in self.listkeys:
self[key].append(value)
else:
dict.__setitem__(self, key, value)
def __repr__(self):
text = dict.__repr__(self)
try:
attributes = self.attributes
except AttributeError:
return text
return "DictElement(%s, attributes=%s)" % (text, repr(attributes))
class NotXMLError(ValueError):
def __init__(self, message):
self.msg = message
def __str__(self):
return "Failed to parse the XML data (%s). Please make sure that the input data are in XML format." % self.msg
class CorruptedXMLError(ValueError):
def __init__(self, message):
self.msg = message
def __str__(self):
return "Failed to parse the XML data (%s). Please make sure that the input data are not corrupted." % self.msg
class ValidationError(ValueError):
"""Validating parsers raise this error if the parser finds a tag in the XML that is not defined in the DTD. Non-validating parsers do not raise this error. The Bio.Entrez.read and Bio.Entrez.parse functions use validating parsers by default (see those functions for more information)"""
def __init__(self, name):
self.name = name
def __str__(self):
return "Failed to find tag '%s' in the DTD. To skip all tags that are not represented in the DTD, please call Bio.Entrez.read or Bio.Entrez.parse with validate=False." % self.name
class DataHandler:
home = os.path.expanduser('~')
local_dtd_dir = os.path.join(home, '.biopython', 'Bio', 'Entrez', 'DTDs')
del home
from Bio import Entrez
global_dtd_dir = os.path.join(str(Entrez.__path__[0]), "DTDs")
del Entrez
def __init__(self, validate):
self.stack = []
self.errors = []
self.integers = []
self.strings = []
self.lists = []
self.dictionaries = []
self.structures = {}
self.items = []
self.dtd_urls = []
self.validating = validate
self.parser = expat.ParserCreate(namespace_separator=" ")
self.parser.SetParamEntityParsing(expat.XML_PARAM_ENTITY_PARSING_ALWAYS)
self.parser.XmlDeclHandler = self.xmlDeclHandler
def read(self, handle):
"""Set up the parser and let it parse the XML results"""
try:
self.parser.ParseFile(handle)
except expat.ExpatError, e:
if self.parser.StartElementHandler:
# We saw the initial <!xml declaration, so we can be sure that
# we are parsing XML data. Most likely, the XML file is
# corrupted.
raise CorruptedXMLError(e)
else:
# We have not seen the initial <!xml declaration, so probably
# the input data is not in XML format.
raise NotXMLError(e)
try:
return self.object
except AttributeError:
if self.parser.StartElementHandler:
# We saw the initial <!xml declaration, and expat didn't notice
# any errors, so self.object should be defined. If not, this is
# a bug.
raise RuntimeError("Failed to parse the XML file correctly, possibly due to a bug in Bio.Entrez. Please contact the Biopython developers at biopython-dev#biopython.org for assistance.")
else:
# We did not see the initial <!xml declaration, so probably
# the input data is not in XML format.
raise NotXMLError("XML declaration not found")
def parse(self, handle):
BLOCK = 1024
while True:
#Read in another block of the file...
text = handle.read(BLOCK)
if not text:
# We have reached the end of the XML file
if self.stack:
# No more XML data, but there is still some unfinished
# business
raise CorruptedXMLError
try:
for record in self.object:
yield record
except AttributeError:
if self.parser.StartElementHandler:
# We saw the initial <!xml declaration, and expat
# didn't notice any errors, so self.object should be
# defined. If not, this is a bug.
raise RuntimeError("Failed to parse the XML file correctly, possibly due to a bug in Bio.Entrez. Please contact the Biopython developers at biopython-dev#biopython.org for assistance.")
else:
# We did not see the initial <!xml declaration, so
# probably the input data is not in XML format.
raise NotXMLError("XML declaration not found")
self.parser.Parse("", True)
self.parser = None
return
try:
self.parser.Parse(text, False)
except expat.ExpatError, e:
if self.parser.StartElementHandler:
# We saw the initial <!xml declaration, so we can be sure
# that we are parsing XML data. Most likely, the XML file
# is corrupted.
raise CorruptedXMLError(e)
else:
# We have not seen the initial <!xml declaration, so
# probably the input data is not in XML format.
raise NotXMLError(e)
if not self.stack:
# Haven't read enough from the XML file yet
continue
records = self.stack[0]
if not isinstance(records, list):
raise ValueError("The XML file does not represent a list. Please use Entrez.read instead of Entrez.parse")
while len(records) > 1: # Then the top record is finished
record = records.pop(0)
yield record
def xmlDeclHandler(self, version, encoding, standalone):
# XML declaration found; set the handlers
self.parser.StartElementHandler = self.startElementHandler
self.parser.EndElementHandler = self.endElementHandler
self.parser.CharacterDataHandler = self.characterDataHandler
self.parser.ExternalEntityRefHandler = self.externalEntityRefHandler
self.parser.StartNamespaceDeclHandler = self.startNamespaceDeclHandler
def startNamespaceDeclHandler(self, prefix, un):
raise NotImplementedError("The Bio.Entrez parser cannot handle XML data that make use of XML namespaces")
def startElementHandler(self, name, attrs):
self.content = ""
if name in self.lists:
object = ListElement()
elif name in self.dictionaries:
object = DictionaryElement()
elif name in self.structures:
object = StructureElement(self.structures[name])
elif name in self.items: # Only appears in ESummary
name = str(attrs["Name"]) # convert from Unicode
del attrs["Name"]
itemtype = str(attrs["Type"]) # convert from Unicode
del attrs["Type"]
if itemtype=="Structure":
object = DictionaryElement()
elif name in ("ArticleIds", "History"):
object = StructureElement(["pubmed", "medline"])
elif itemtype=="List":
object = ListElement()
else:
object = StringElement()
object.itemname = name
object.itemtype = itemtype
elif name in self.strings + self.errors + self.integers:
self.attributes = attrs
return
else:
# Element not found in DTD
if self.validating:
raise ValidationError(name)
else:
# this will not be stored in the record
object = ""
if object!="":
object.tag = name
if attrs:
object.attributes = dict(attrs)
if len(self.stack)!=0:
current = self.stack[-1]
try:
current.append(object)
except AttributeError:
current[name] = object
self.stack.append(object)
def endElementHandler(self, name):
value = self.content
if name in self.errors:
if value=="":
return
else:
raise RuntimeError(value)
elif name in self.integers:
value = IntegerElement(value)
elif name in self.strings:
# Convert Unicode strings to plain strings if possible
try:
value = StringElement(value)
except UnicodeEncodeError:
value = UnicodeElement(value)
elif name in self.items:
self.object = self.stack.pop()
if self.object.itemtype in ("List", "Structure"):
return
elif self.object.itemtype=="Integer" and value:
value = IntegerElement(value)
else:
# Convert Unicode strings to plain strings if possible
try:
value = StringElement(value)
except UnicodeEncodeError:
value = UnicodeElement(value)
name = self.object.itemname
else:
self.object = self.stack.pop()
return
value.tag = name
if self.attributes:
value.attributes = dict(self.attributes)
del self.attributes
current = self.stack[-1]
if current!="":
try:
current.append(value)
except AttributeError:
current[name] = value
def characterDataHandler(self, content):
self.content += content
def elementDecl(self, name, model):
"""This callback function is called for each element declaration:
<!ELEMENT name (...)>
encountered in a DTD. The purpose of this function is to determine
whether this element should be regarded as a string, integer, list
dictionary, structure, or error."""
if name.upper()=="ERROR":
self.errors.append(name)
return
if name=='Item' and model==(expat.model.XML_CTYPE_MIXED,
expat.model.XML_CQUANT_REP,
None, ((expat.model.XML_CTYPE_NAME,
expat.model.XML_CQUANT_NONE,
'Item',
()
),
)
):
# Special case. As far as I can tell, this only occurs in the
# eSummary DTD.
self.items.append(name)
return
# First, remove ignorable parentheses around declarations
while (model[0] in (expat.model.XML_CTYPE_SEQ,
expat.model.XML_CTYPE_CHOICE)
and model[1] in (expat.model.XML_CQUANT_NONE,
expat.model.XML_CQUANT_OPT)
and len(model[3])==1):
model = model[3][0]
# PCDATA declarations correspond to strings
if model[0] in (expat.model.XML_CTYPE_MIXED,
expat.model.XML_CTYPE_EMPTY):
self.strings.append(name)
return
# List-type elements
if (model[0] in (expat.model.XML_CTYPE_CHOICE,
expat.model.XML_CTYPE_SEQ) and
model[1] in (expat.model.XML_CQUANT_PLUS,
expat.model.XML_CQUANT_REP)):
self.lists.append(name)
return
# This is the tricky case. Check which keys can occur multiple
# times. If only one key is possible, and it can occur multiple
# times, then this is a list. If more than one key is possible,
# but none of them can occur multiple times, then this is a
# dictionary. Otherwise, this is a structure.
# In 'single' and 'multiple', we keep track which keys can occur
# only once, and which can occur multiple times.
single = []
multiple = []
# The 'count' function is called recursively to make sure all the
# children in this model are counted. Error keys are ignored;
# they raise an exception in Python.
def count(model):
quantifier, name, children = model[1:]
if name==None:
if quantifier in (expat.model.XML_CQUANT_PLUS,
expat.model.XML_CQUANT_REP):
for child in children:
multiple.append(child[2])
else:
for child in children:
count(child)
elif name.upper()!="ERROR":
if quantifier in (expat.model.XML_CQUANT_NONE,
expat.model.XML_CQUANT_OPT):
single.append(name)
elif quantifier in (expat.model.XML_CQUANT_PLUS,
expat.model.XML_CQUANT_REP):
multiple.append(name)
count(model)
if len(single)==0 and len(multiple)==1:
self.lists.append(name)
elif len(multiple)==0:
self.dictionaries.append(name)
else:
self.structures.update({name: multiple})
def open_dtd_file(self, filename):
path = os.path.join(DataHandler.local_dtd_dir, filename)
try:
handle = open(path, "rb")
except IOError:
pass
else:
return handle
path = os.path.join(DataHandler.global_dtd_dir, filename)
try:
handle = open(path, "rb")
except IOError:
pass
else:
return handle
return None
def externalEntityRefHandler(self, context, base, systemId, publicId):
"""The purpose of this function is to load the DTD locally, instead
of downloading it from the URL specified in the XML. Using the local
DTD results in much faster parsing. If the DTD is not found locally,
we try to download it. If new DTDs become available from NCBI,
putting them in Bio/Entrez/DTDs will allow the parser to see them."""
urlinfo = urlparse.urlparse(systemId)
#Following attribute requires Python 2.5+
#if urlinfo.scheme=='http':
if urlinfo[0]=='http':
# Then this is an absolute path to the DTD.
url = systemId
elif urlinfo[0]=='':
# Then this is a relative path to the DTD.
# Look at the parent URL to find the full path.
url = self.dtd_urls[-1]
source = os.path.dirname(url)
url = os.path.join(source, systemId)
self.dtd_urls.append(url)
# First, try to load the local version of the DTD file
location, filename = os.path.split(systemId)
handle = self.open_dtd_file(filename)
if not handle:
# DTD is not available as a local file. Try accessing it through
# the internet instead.
message = """\
Unable to load DTD file %s.
Bio.Entrez uses NCBI's DTD files to parse XML files returned by NCBI Entrez.
Though most of NCBI's DTD files are included in the Biopython distribution,
sometimes you may find that a particular DTD file is missing. While we can
access the DTD file through the internet, the parser is much faster if the
required DTD files are available locally.
For this purpose, please download %s from
%s
and save it either in directory
%s
or in directory
%s
in order for Bio.Entrez to find it.
Alternatively, you can save %s in the directory
Bio/Entrez/DTDs in the Biopython distribution, and reinstall Biopython.
Please also inform the Biopython developers about this missing DTD, by
reporting a bug on http://bugzilla.open-bio.org/ or sign up to our mailing
list and emailing us, so that we can include it with the next release of
Biopython.
Proceeding to access the DTD file through the internet...
""" % (filename, filename, url, self.global_dtd_dir, self.local_dtd_dir, filename)
warnings.warn(message)
try:
handle = urllib.urlopen(url)
except IOError:
raise RuntimeException("Failed to access %s at %s" % (filename, url))
parser = self.parser.ExternalEntityParserCreate(context)
parser.ElementDeclHandler = self.elementDecl
parser.ParseFile(handle)
handle.close()
self.dtd_urls.pop()
return 1
So you have a file containing multiple XML documents one after the other? Here is an example which strips out the <?xml ?> PIs and wraps the data in a root tag to parse the whole thing as a single XML document:
import re
import lxml.etree
re_strip_pi = re.compile('<\?xml [^?>]+\?>', re.M)
data = '<root>' + open('stocks.xml', 'rb').read() + '</root>'
match = re_strip_pi.search(data)
data = re_strip_pi.sub('', data)
tree = lxml.etree.fromstring(match.group() + data)
for prod in tree.xpath('//PRODUCT'):
print prod
You can't have multiple XML documents in one XML file. Split the documents - composed in whatever way - into single XML files and parse them one-by-one.

Categories