from typing import List
from PyPDF2 import PdfFileReader
from PyPDF2.generic import Destination
def get_outlines(pdf_filepath: str) -> List[Destination]:
"""Get the bookmarks of a PDF file."""
with open(pdf_filepath, "rb") as fp:
pdf_file_reader = PdfFileReader(fp)
outlines = pdf_file_reader.getOutlines()
return outlines
print(get_outlines("PDF-export-example.pdf"))
pyPdf.pdf.Destination has many properties, but I can't find any referring page number of that bookmark. How can I get the page number of the bookmarks?
For example outlines[1].page.idnum returns a number which is approximately 3 times bigger than referenced page number in PDF document, which I assume references some object smaller then page, as running .page.idnum on whole PDF document outline returns array of numbers which is not even linearly correlated with "real" page number destinations in PDF document and it's roughly multiple by ~ 3
Update: This question is same as this: split a pdf based on outline although I don't understand what author did in his self answer there. Seems too complicated to me to be usable
As #theta pointed out "split a pdf based on outline" has the code required to extract page numbers. If you feel this is complicated I copied part of the code which maps page ids to page numbers and made it a function. Here is a working example that prints page number of bookmark o[0]:
from PyPDF2 import PdfFileReader
def _setup_page_id_to_num(pdf, pages=None, _result=None, _num_pages=None):
if _result is None:
_result = {}
if pages is None:
_num_pages = []
pages = pdf.trailer["/Root"].getObject()["/Pages"].getObject()
t = pages["/Type"]
if t == "/Pages":
for page in pages["/Kids"]:
_result[page.idnum] = len(_num_pages)
_setup_page_id_to_num(pdf, page.getObject(), _result, _num_pages)
elif t == "/Page":
_num_pages.append(1)
return _result
# main
f = open('document.pdf','rb')
p = PdfFileReader(f)
# map page ids to page numbers
pg_id_num_map = _setup_page_id_to_num(p)
o = p.getOutlines()
pg_num = pg_id_num_map[o[0].page.idnum] + 1
print(pg_num)
probably too late for #theta but might help others :) btw my first post on stackoverflow so excuse me if I did not follow the usual format
To extend this further:
If you are looking to get the exact location on the page for a bookmark this will make your job easier:
from PyPDF2 import PdfFileReader
import PyPDF2 as pyPdf
def _setup_page_id_to_num(pdf, pages=None, _result=None, _num_pages=None):
if _result is None:
_result = {}
if pages is None:
_num_pages = []
pages = pdf.trailer["/Root"].getObject()["/Pages"].getObject()
t = pages["/Type"]
if t == "/Pages":
for page in pages["/Kids"]:
_result[page.idnum] = len(_num_pages)
_setup_page_id_to_num(pdf, page.getObject(), _result, _num_pages)
elif t == "/Page":
_num_pages.append(1)
return _result
def outlines_pg_zoom_info(outlines, pg_id_num_map, result=None):
if result is None:
result = dict()
if type(outlines) == list:
for outline in outlines:
result = outlines_pg_zoom_info(outline, pg_id_num_map, result)
elif type(outlines) == pyPdf.pdf.Destination:
title = outlines['/Title']
result[title.split()[0]] = dict(title=outlines['/Title'], top=outlines['/Top'], \
left=outlines['/Left'], page=(pg_id_num_map[outlines.page.idnum]+1))
return result
# main
pdf_name = 'document.pdf'
f = open(pdf_name,'rb')
pdf = PdfFileReader(f)
# map page ids to page numbers
pg_id_num_map = _setup_page_id_to_num(pdf)
outlines = pdf.getOutlines()
bookmarks_info = outlines_pg_zoom_info(outlines, pg_id_num_map)
print(bookmarks_info)
Note: My bookmarks are section numbers (ex: 1.1 Introduction) and I am mapping the bookmark info to the section number. If your bookmarks are different modify this part of the code:
elif type(outlines) == pyPdf.pdf.Destination:
title = outlines['/Title']
result[title.split()[0]] = dict(title=outlines['/Title'], top=outlines['/Top'], \
left=outlines['/Left'], page=(pg_id_num_map[outlines.page.idnum]+1))
Manage bookmarks recursively with vjayky and Giulio D suggestion
PyPDF2 >= v1.25
from PyPDF2 import PdfFileReader
def printBookmarksPageNumbers(pdf):
def review_and_print_bookmarks(bookmarks, lvl=0):
for b in bookmarks:
if type(b) == list:
review_and_print_bookmarks(b, lvl + 4)
continue
pg_num = pdf.getDestinationPageNumber(b) + 1 #page count starts from 0
print("%s%s: Page %s" %(" "*lvl, b.title, pg_num))
review_and_print_bookmarks(pdf.getOutlines())
with open('document.pdf', "rb") as f:
pdf = PdfFileReader(f)
printBookmarksPageNumbers(pdf)
PyPDF2 < v1.25
from PyPDF2 import PdfFileReader
def printBookmarksPageNumbers(pdf):
# Map page ids to page numbers
pg_id_to_num = {}
for pg_num in range(0, pdf.getNumPages()):
pg_id_to_num[pdf.getPage(pg_num).indirectRef.idnum] = pg_num
def review_and_print_bookmarks(bookmarks, lvl=0):
for b in bookmarks:
if type(b) == list:
review_and_print_bookmarks(b, lvl + 4)
continue
pg_num = pg_id_to_num[b.page.idnum] + 1 #page count starts from 0
print("%s%s: Page %s" %(" "*lvl, b.title, pg_num))
review_and_print_bookmarks(pdf.getOutlines())
with open('document.pdf', "rb") as f:
pdf = PdfFileReader(f)
printBookmarksPageNumbers(pdf)
In 2019, for ones who are interested in a faster way, it's possible to use:
from PyPDF2 import PdfFileReader
def printPageNumberFrom(filename):
with open(filename, "rb") as f:
pdf = PdfFileReader(f)
bookmarks = pdf.getOutlines()
for b in bookmarks:
print(pdf.getDestinationPageNumber(b) + 1) #page count starts from 0
I'm not sure but according to the docs for pypdf.Destination the page number for the bookmark is just Destination.page .
Related
I have a number of Word files, transcripts from MS Teams. I have a script that parses them into a pandas df and breaks it down by speaker, text, and time. I would like to use dcc.Upload to upload, parse, store, and extract some insight with an NLP pipeline from these transcripts. The example provided by Plotly, naturally, deals with the cav and xlsx formats. Any ideas on how I would approach this? Any help is much appreciated!
def get_data_from_word(path_to_file):
from docx import Document
# Creating a word file object
doc_object = open(path_to_file, "rb")
# creating word reader object
doc_reader = Document(doc_object)
data = ""
for p in doc_reader.paragraphs:
data += p.text + "\n"
return data
def get_csv(paragraphs):
combined_paragraphs = []
speaker_text = []
for x in range(len(paragraphs)):
try:
speaker = paragraphs[x][1]
next_speaker = paragraphs[x + 1][1]
if speaker == next_speaker:
speaker_text.append(paragraphs[x][2])
# extract sentences
else:
speaker_text.append(paragraphs[x][2])
text = ''.join(speaker_text)
combined_paragraphs.append([speaker, text])
speaker_text = []
except:
pass
I am trying to split a PDF file by finding a key word of text and then grabbing that page the key word is on and the following 4 pages after, so total of 5 pages, and splitting them from that original PDF and putting them into their own PDF so the new PDF will have those 5 pages only, then loop through again find that key text again because its repeated further down the original PDF X amount of times, grabbing that page plus the 4 after and putting into its own PDF.
Example: key word is found on page 7 the first loop so need page 7 and also pages 8-11 and put those 5 pages 7-11 into a pdf file,
the next loop they key word is found on page 12 so need page 12 and pages 13-16 so pages 12-16 split onto their own pdf at this point it has created 2 separate pdfs
the below code finds the key word and puts it into its own pdf file but only got it for that one page not sure how to include the range
import os
from PyPDF2 import PdfFileReader, PdfFileWriter
path = "example.pdf"
fname = os.path.basename(path)
reader = PdfFileReader(path)
for page_number in range(reader.getNumPages()):
writer = PdfFileWriter()
writer.addPage(reader.getPage(page_number))
text = reader.getPage(page_number).extractText()
text_stripped = text.replace("\n", "")
print(text_stripped)
if text_stripped.find("Disregarded Branch") != (-1):
output_filename = f"{fname}_page_{page_number + 1}.pdf"
with open(output_filename, "wb") as out:
writer.write(out)
print(f"Created: {output_filename}")
disclaimer: I am the author of borb, the library used in this answer.
I think your question comes down to 2 common functionalities:
find the location of a given piece of text
merge/split/extract pages from a PDF
For the first part, there is a good tutorial in the examples repo.
You can find it here. I'll repeat one of the examples here for completeness.
import typing
from borb.pdf.document.document import Document
from borb.pdf.pdf import PDF
from borb.toolkit.text.simple_text_extraction import SimpleTextExtraction
def main():
# read the Document
doc: typing.Optional[Document] = None
l: SimpleTextExtraction = SimpleTextExtraction()
with open("output.pdf", "rb") as in_file_handle:
doc = PDF.loads(in_file_handle, [l])
# check whether we have read a Document
assert doc is not None
# print the text on the first Page
print(l.get_text_for_page(0))
if __name__ == "__main__":
main()
This example extracts all the text from page 0 of the PDF. of course you could simply iterate over all pages, and check whether a given page contains the keyword you're looking for.
For the second part, you can find a good example in the examples repository. This is the link. This example (and subsequent example) takes you through the basics of frankensteining a PDF from various sources.
The example I copy/paste here will show you how to build a PDF by alternatively picking a page from input document 1, and input document 2.
import typing
from borb.pdf.document.document import Document
from borb.pdf.pdf import PDF
import typing
from decimal import Decimal
from borb.pdf.document.document import Document
from borb.pdf.page.page import Page
from borb.pdf.pdf import PDF
def main():
# open doc_001
doc_001: typing.Optional[Document] = Document()
with open("output_001.pdf", "rb") as pdf_file_handle:
doc_001 = PDF.loads(pdf_file_handle)
# open doc_002
doc_002: typing.Optional[Document] = Document()
with open("output_002.pdf", "rb") as pdf_file_handle:
doc_002 = PDF.loads(pdf_file_handle)
# create new document
d: Document = Document()
for i in range(0, 10):
p: typing.Optional[Page] = None
if i % 2 == 0:
p = doc_001.get_page(i)
else:
p = doc_002.get_page(i)
d.append_page(p)
# write
with open("output_003.pdf", "wb") as pdf_file_handle:
PDF.dumps(pdf_file_handle, d)
if __name__ == "__main__":
main()
You've almost got it!
import os
from PyPDF2 import PdfFileReader, PdfFileWriter
def create_4page_pdf(base_pdf_path, start):
reader = PdfFileReader(base_pdf_path)
writer = PdfFileWriter()
for i in range(4):
index = start + i
if index < len(reader.pages):
page = reader.pages[index]
writer.addPage(page)
fname = os.path.basename(base_pdf_path)
output_filename = f"{fname}_page_{start + 1}.pdf"
with open(output_filename, "wb") as out:
writer.write(out)
print(f"Created: {output_filename}")
def main(base_pdf_path="example.pdf"):
base_pdf_path = "example.pdf"
reader = PdfFileReader(base_pdf_path)
for page_number, page in enumerate(reader.pages):
text = page.extractText()
text_stripped = text.replace("\n", "")
print(text_stripped)
if text_stripped.find("Disregarded Branch") != (-1):
create_4page_pdf(base_pdf_path, page_number)
I am trying to copy elements of a doc from one doc file to other. The text part is easy, the images is where it gets tricky.
Attaching an image to explain the structure of the doc: Just some text and 1 image.
from docx import Document
import io
doc = Document('/Users/neha/Desktop/testing.docx')
new_doc = Document()
for elem in doc.element.body:
new_doc.element.body.append(elem)
new_doc.save('/Users/neha/Desktop/out.docx')
This gets me the whole structure of the doc in the new_doc but the image is still blank. Image below:
Good thing is I have the blank image in the right place so I thought of getting the byte level data from the previous image and insert it in the new doc. Here is how I extended the above code:
from docx import Document
import io
doc = Document('/Users/neha/Desktop/testing.docx')
new_doc = Document()
for elem in doc.element.body:
new_doc.element.body.append(elem)
im = doc.inline_shapes[0]
blip = im._inline.graphic.graphicData.pic.blipFill.blip
rId = blip.embed
doc_part = doc.part
image_part = doc_part.related_parts[rId]
bytes = image_part._blob #Here I get the byte level data for the image
im2 = new_doc.inline_shapes[0]
blip2 = im2._inline.graphic.graphicData.pic.blipFill.blip
rId2 = blip2.embed
document_part2 = new_doc.part
document_part2.related_parts[rId2]._blob = bytes
new_doc.save('/Users/neha/Desktop/out.docx')
But the image still shows empty in the new_doc. What should I do from here?
I figured out a solution a couple of days back. However the text loses formatting using this way, but the images are correctly placed.
So the idea is, for para in paras for the source doc, if there is text, I write it to dest doc. And if there is an inline image present, I add a unique identifier at that place in the dest doc (refer here to see how these identifiers work, and contexts in docxtpl). These identifiers and docxtpl proved to be particularly useful here. And then using those unique identifiers I create a 'context' (as shown below) which is basically a map mapping the unique identifier to its particular InlineImage, and finally I render this context..
Below is my code (Apologies for the unnecessary indentation, I copied it directly from my text editor, and shift+tab doesn't work here :P)
from docxtpl import DocxTemplate, InlineImage
import Document
import io
import xml.etree.ElementTree as ET
dest = DocxTemplate()
source = Document(source_path)
context = {}
ims = [im for im in source.inline_shapes]
im_addresses = []
im_streams = []
count = 0
for im in ims:
blip = im._inline.graphic.graphicData.pic.blipFill.blip
rId = blip.embed
doc_part = source.part
image_part = doc_part.related_parts[rId]
byte_data = image_part._blob
image_stream = io.BytesIO(byte_data)
im_streams.append(image_stream)
image_name = self.img_path+"img_"+"_"+str(count)+".jpeg"
with open(image_name, "wb") as fh:
fh.write(byte_data)
fh.close()
im_addresses.append(image_name)
count += 1
paras = source.paragraphs
im_idx = 0
for para in paras:
p = dest.add_paragraph()
r = p.add_run()
if(para.text):
r.add_text(para.text)
root = ET.fromstring(para._p.xml)
namespace = {'wp':"http://schemas.openxmlformats.org/drawingml/2006/wordprocessingDrawing"}
inlines = root.findall('.//wp:inline',namespace)
if(len(inlines) > 0):
uid = "img_"+str(im_idx)
r.add_text("{{ " + uid + " }}")
context[uid] = InlineImage(dest,im_addresses[im_idx])
im_idx += 1
try:
dest.render(context)
except Exception as e:
print(e)
dest.save(dest_path)
PS: If a paragraph has two images, this code will prove to be sub-optimal.. One will have to make some change in the following:
if(len(inlines) > 0):
uid = "img_"+str(im_idx)
r.add_text("{{ " + uid + " }}")
context[uid] = InlineImage(dest,im_addresses[im_idx])
im_idx += 1
Will have to add a for loop inside the if statement as well. Since I didn't need as usually my images were big enough, so they always came in different paragraphs. Just a side note for anyone who may need it..
Cheers!
You could try:
Extracting the images from the first document by unzipping the .docx file (per How can I search a word in a Word 2007 .docx file?)
Save those images to the file system (as foo.png, for instance)
Generate the new .docx file with Python and add the .png file using document.add_picture('foo.png').
This problem is solved by this package https://docxtpl.readthedocs.io/en/latest/
This may just be due to PyPdf2's extract text function but when I run the code below in order to rename the files, a lot of the most common words come out like "Nthe", "Nfrom" and "Ncommunications". I'm not sure what I can do to stop this happening or alternatively how to work around it.
What causes a problem like this?
Where are the N's coming from?
Other PDFs do perfectly what I want so I'm not sure where to go from here.
import PyPDF2
import re
from collections import Counter
import os.path
files = [f for f in os.listdir('.') if os.path.isfile(f)]
files = filter(lambda f: f.endswith(('.pdf','.PDF')), files)
for file in files:
pdfFileObj = open('{0}'.format(file), 'rb') #Open the File
pdfReader = PyPDF2.PdfFileReader(pdfFileObj) #Read the file
frequency = {} #Create dict
ignore = {'the','a','if','in','it','of','or','and','for','can','that','are','this','your','you','will','not','have','its','with','need','has','from','more'} #Ignore dese ones
print "Number of Pages %s " % pdfReader.numPages #Print Num Pages
word_list = []
for i in range(pdfReader.numPages):
pageObj = pdfReader.getPage(i) # Get the first page
word_list.append(pageObj.extractText()) #Add the pages together
match_pattern = re.findall(r'\b[a-z]{3,15}\b', str(word_list)) #Find the text
cnt = Counter()
for words in match_pattern: #Start counting the frequency
words.lower() # Lower Case Words
if words not in ignore: #Ignore common words
count = frequency.get(words,0) #Start the count?
frequency[words] = count + 1 #Add one
fl = sorted(frequency, key=frequency.__getitem__, reverse = True)[:3] #Sort according to frequency
pdfFileObj.close() #Close the PDF
newtitle = ' '.join(map(str,fl, )).title() #Join the title list together
try:
print newtitle #Print the title
os.rename('{0}'.format(file), '{0}.pdf'.format(newtitle))#Rename the file
except:
print "Unable to Rename File"
I have created a web scraper to extract information of research papers that are published in a digital library (sample document).
Basically I'm extracting the title, abstract and list of references for each paper and storing them in text files. This process is repeated for all the referenced papers also.
I have used a queue to store the document IDs.
I need to extract such information from atleast 5000 papers, but the program is too slow and takes about 3 hours to go through 250-300 papers.
What are the possible ways of improving the speed of this scraper?
Here is the code:
# _*_ coding:utf-8 _*_
import urllib2
import json
import Queue
crawled = []
fo = open("paper.txt", "w")
class Paper(object):
def __init__(self, paper_id):
self.paper_id = paper_id
self.title, self.abstract = self.fetch_data()
def fetch_data(self):
base_url = "http://ieeexplore.ieee.org/rest/document/{0}/{1}"
data_url = base_url.format(self.paper_id, "abstract")
response = urllib2.urlopen(data_url)
html = response.readlines()
data = json.loads("\n".join(html))
title = data["title"]
abstract = data["abstract"]
return title, abstract
def fetch_ieee_references(self):
base_url = "http://ieeexplore.ieee.org/rest/document/{0}/{1}"
data_url = base_url.format(self.paper_id, "references")
response = urllib2.urlopen(data_url)
html = response.readlines()
data = json.loads("\n".join(html))
references = []
try:
for ref in data["references"]:
try:
ref_link = ref["links"]["documentLink"]
ref_paper_id = ref_link.split("/")[-1]
references.append(Paper(ref_paper_id))
except:
pass
except:
pass
return references
def extract_paper(self):
try:
print "Paper ID"
print self.paper_id
fname = str(self.paper_id)
fname = fname + ".txt"
fcon = open(fname,"w")
print
print "Title"
print self.title
print >>fcon, self.title
print "Abstract"
print self.abstract
print >>fcon, self.abstract
print "References"
for ref in self.fetch_ieee_references():
print ref.paper_id, ref.title
print >>fo, self.paper_id, ref.paper_id
except:
pass
def new_func():
n_id = 6639344
q = Queue.Queue()
q.put_nowait(n_id)
crawled.append(n_id)
while not q.empty():
p_id = q.get_nowait()
paper = Paper(p_id)
paper.extract_paper()
for ref in paper.fetch_ieee_references():
if ref.paper_id not in crawled:
crawled.append(ref.paper_id)
q.put_nowait(ref.paper_id)
new_func()
As already mentioned by other users it mostly depends on the speed of the HTTP request so you are dependent on the server of the site. So to speed things up you can divide the papers between multiple processes.
Also I don't get why you read the html and then use json.loads you can just use json.load on the response, this will speed things up a little bit.