How to create a multiple page pdf with pytesseract? - python

I'm trying to mark only a few words in a pdf and with the results I want to make a new pdf using only pytesseract.
Here is the code:
images = convert_from_path(name,poppler_path=r'C:\Program Files\poppler-0.68.0\bin')
for i in images:
img = cv.cvtColor(np.array(i),cv.COLOR_RGB2BGR)
d = pytesseract.image_to_data(img,output_type=Output.DICT,lang='eng+equ',config="--psm 6")
boxes = len(d['level'])
for i in range(boxes):
for e in functionEvent: #functionEvent is a list of strings
if e in d['text'][i]:
(x,y,w,h) = (d['left'][i],d['top'][i],d['width'][i],d['height'][i])
cv.rectangle(img,(x,y),(x+w,y+h),(0,255,0),2)
pdf = pytesseract.image_to_pdf_or_hocr(img,extension='pdf')
with open('results.pdf','w+b') as f:
f.write(pdf)
What have I tried:
with open('results.pdf','a+b') as f:
f.write(pdf)
If you know how can I fix this just let me know.
Also I don't care at all if you recommand another module or your opinion how am I supposed to write code.
Thanks in advance!

Try using PyPDF2 to link your pdfs together.
Firstly you extract your text from pdf with tesseract OCR and store it into list object like this :
for filename in tqdm(os.listdir(in_dir)):
img = Image.open(os.path.join(in_dir,filename))
pdf = pytesseract.image_to_pdf_or_hocr(img, lang='slk', extension='pdf')
pdf_pages.append(pdf)
then iterate trough each processed image or file, read the bytes and add pages using PdfFileReader like this(do not forget to import io):
pdf_writer = PdfFileWriter()
for page in pdf_pages:
pdf = PdfFileReader(io.BytesIO(page))
pdf_writer.addPage(pdf.getPage(0))
In the end create the file and store data to it:
file = open(out_dir, "w+b")
pdf_writer.write(file)
file.close()

Related

Extract all the pages of all the PDFs present in folder in jpg format and store the images in separate folder

I want to get the pdf pages as jpg , (suppose pdf contains 3 pages then output should be 3 images with JPG extension)
I tried 2-3 different ways but not getting the results!
Below is the script I wrote but it only gives the picture of heading present on first page , and also the image created is not stored at the specific folder
`import os
from PyPDF2 import PdfReader
from wand.image import Image as WImage
pdf_file = r"C:\Users\saura\Aidetic\image_processing_data\sample_file.pdf"
def pdf_to_img(pdf_file):
# Open the PDF file
# pdf = open("pdf_file", "rb")
pdf = PdfReader(pdf_file)
# Create a folder to store the images
folder_name = pdf_file[:-4]
if not os.path.exists(folder_name):
os.makedirs(folder_name)
page = pdf.pages[0]
count = 0
# Iterate through each page of the PDF
# for i in pdf.pages:
# Get the current page
for image in page.images:
# page = pdf.pages[i]
# Convert the page to an image
with open(str(count) + image.name, "wb") as fp:
fp.write(image.data)
count += 1
# Save the image to the folder
# img.save(filename=os.path.join(folder_name, str(i) + ".jpg"))
# Get the list of PDF files in the current directory
pdf_files = [f for f in os.listdir() if f.endswith(".pdf")]
# Iterate through each PDF file
for pdf_file in pdf_files:
pdf_to_img(pdf_file)
`
As far as I know, PyPDF2 is not capable of rendering a page of a document as an image.
In contrast, PyMuPDF can do this. Snippet here:
import fitz # import PyMuPDF
doc = fitz.open("your.pdf")
# NOTE: apart from PDF, you can do the exact same thing for EPUB,
# MOBI, XPS, FB2, CBZ documents - no code change required.
for page in doc: # iterate over document pages
pix = page.get_pixmap(dpi=150) # render full page with desired DPI
pix.save(f"page-%04i.png" % page.number) # PNG directly supported
# if JPEG desired, use a variant that employs Pillow:
# pix.pil_save(f"page-%04i.jpg" % page.number)
In next version, JPEG will be directly supported.
Method get_pixmap() has several parameters to choose the colorspace (e.g. gray), include transparency channel, or restrict the page area to be rendered.
Here is a version that iterates over a list of files (PDF or other document types), and saves all their pages in a folder "images". This time we are using Python context manager.
import os
import fitz
filelist = [] # list of filenames (PDFs, XPS, EPUB, MOBI, ...)
for fname in filenames:
basename = os.path.basename(fname)
first = os.path.splitext(basename)[0]
with fitz.open(fname) as doc:
for page in doc:
pix = page.get_pixmap(dpi=150)
pix.save(os.path.join("images", "%s_%04.png" % (first, page.number)))

is it possible to write image to csv file?

Hi everyone this is my first post here and wanted to know how can ı write image files that ı scraped from a website to a csv file or if its not possible to write on csv how can ı write this header,description,time info and image to a maybe word file Here is the code
Everything works perfectly just wanna know how can ı write the images that i downloaded to disk to a csv or word file
Thanks for your helps
import csv
import requests
from bs4 import BeautifulSoup
site_link = requests.get("websitenamehere").text
soup = BeautifulSoup(site_link,"lxml")
read_file = open("blogger.csv","w",encoding="UTF-8")
csv_writer = csv.writer(read_file)
csv_writer.writerow(["Header","links","Publish Time"])
counter = 0
for article in soup.find_all("article"):
###Counting lines
counter += 1
print(counter)
#Article Headers
headers = article.find("a")["title"]
print(headers)
#### Links
links = article.find("a")["href"]
print(links)
#### Publish time
publish_time = article.find("div",class_="mkdf-post-info-date entry-date published updated")
publish_time = publish_time.a.text.strip()
print(publish_time)
###image links
images = article.find("img",class_="attachment-full size-full wp-post-image nitro-lazy")["nitro-lazy-src"]
print(images)
###Download Article Pictures to disk
pic_name = f"{counter}.jpg"
with open(pic_name, 'wb') as handle:
response = requests.get(images, stream=True)
for block in response.iter_content(1024):
handle.write(block)
###CSV Rows
csv_writer.writerow([headers, links, publish_time])
print()
read_file.close()
You could basically convert to base64 and write to a file as you need it
import base64
with open("image.png", "rb") as image_file:
encoded_string= base64.b64encode(img_file.read())
print(encoded_string.decode('utf-8'))
A csv file is supposed to only contain text fields. Even if the csv module does its best to quote fields to allow almost any character in them, including the separator or a new line, it is not able to process NULL characters that could exist in an image file.
That means that you will have to encode the image bytes if you want to store them in a csv file. Base64 is a well known format natively supported by the Python Standard Library. So you could change you code to:
import base64
...
###Download Article Pictures
response = requests.get(images, stream=True)
image = b''.join(block for block in response.iter_content(1024)) # raw image bytes
image = base64.b64encode(image) # base 64 encoded (text) string
###CSV Rows
csv_writer.writerow([headers, links, publish_time, image])
Simply the image will have to be decoded before being used...

How to read a TEXT in an image in PDF extension file using Python?

I have tried reading a PDF file with tabular data with texts and succeed it. But i have an image which is in PDF format and contains some text which need to be fetched for record purpose.All the PDFs are in a specific folder. I know only basics in python.
Could anyone help me with this?
You can extract the both images (inline & XObject) and texts (plain and containing PDF operators) from PDF document using pdfreader
Here is a sample code extracting all the above from all document pages.
from pdfreader import SimplePDFViewer, PageDoesNotExist
fd = open(you_pdf_file_name, "rb")
viewer = SimplePDFViewer(fd)
plain_text = ""
pdf_markdown = ""
images = []
try:
while True:
viewer.render()
pdf_markdown += viewer.canvas.text_content
plain_text += "".join(viewer.canvas.strings)
images.extend(viewer.canvas.inline_images)
images.extend(viewer.canvas.images.values())
viewer.next()
except PageDoesNotExist:
pass
You can also convert images to PIL/Pillow object and save
for i, img in enumerate(images):
img.to_Pillow().save("{}.png".format(i))

How to convert whole pdf to text in python

I have to convert whole pdf to text. i have seen at many places converting pdf to text but particular page.
from PyPDF2 import PdfFileReader
import os
def text_extractor(path):
with open(os.path.join(path,file), 'rb') as f:
pdf = PdfFileReader(f)
###Here i can specify page but i need to convert whole pdf without specifying pages###
page = pdf.getPage(0)
text = page.extractText()
print(text)
if __name__ == '__main__':
path="C:\\Users\\AAAA\\Desktop\\BB"
for file in os.listdir(path):
if not file.endswith(".pdf"):
continue
text_extractor(path)
How to convert whole pdf file to text without using getpage()??
You may want to use textract as this answer recommends to get the full document if all you want is the text.
If you want to use PyPDF2 then you can first get the number of pages then iterate over each page such as:
from PyPDF2 import PdfFileReader
import os
def text_extractor(path):
with open(os.path.join(path,file), 'rb') as f:
pdf = PdfFileReader(f)
###Here i can specify page but i need to convert whole pdf without specifying pages###
text = ""
for page_num in range(pdf.getNumPages()):
page = pdf.getPage(page_num)
text += page.extractText()
print(text)
if __name__ == '__main__':
path="C:\\Users\\AAAA\\Desktop\\BB"
for file in os.listdir(path):
if not file.endswith(".pdf"):
continue
text_extractor(path)
Though you may want to remember which page the text came from in which case you could use a list:
page_text = []
for page_num in range(pdf.getNumPages()): # For each page
page = pdf.getPage(page_num) # Get that page's reference
page_text.append(page.extractText()) # Add that page to our array
for page in page_text:
print(page) # print each page
You could use tika to accomplish this task, but the output needs a little cleaning.
from tika import parser
parse_entire_pdf = parser.from_file('mypdf.pdf', xmlContent=True)
parse_entire_pdf = parse_entire_pdf['content']
print (parse_entire_pdf)
This answer uses PyPDF2 and encode('utf-8') to keep the output per page together.
from PyPDF2 import PdfFileReader
def pdf_text_extractor(path):
with open(path, 'rb') as f:
pdf = PdfFileReader(f)
# Get total pdf page number.
totalPageNumber = pdf.numPages
currentPageNumber = 0
while (currentPageNumber < totalPageNumber):
page = pdf.getPage(currentPageNumber)
text = page.extractText()
# The encoding put each page on a single line.
# type is <class 'bytes'>
print(text.encode('utf-8'))
#################################
# This outputs the text to a list,
# but it doesn't keep paragraphs
# together
#################################
# output = text.encode('utf-8')
# split = str(output, 'utf-8').split('\n')
# print (split)
#################################
# Process next page.
currentPageNumber += 1
path = 'mypdf.pdf'
pdf_text_extractor(path)
Try pdfreader. You can extract either plain text or decoded text containing "pdf markdown":
from pdfreader import SimplePDFViewer, PageDoesNotExist
fd = open(you_pdf_file_name, "rb")
viewer = SimplePDFViewer(fd)
plain_text = ""
pdf_markdown = ""
try:
while True:
viewer.render()
pdf_markdown += viewer.canvas.text_content
plain_text += "".join(viewer.canvas.strings)
viewer.next()
except PageDoesNotExist:
pass
PDF is a page-oriented format & therefore you'll need to deal with the concept of pages.
What makes it perhaps even more difficult, you're not guaranteed that the text excerpts you're able to extract are extracted in the same order as they are presented on the page: PDF allows one to say "put this text within a 4x3 box situated 1" from the top, with a 1" left margin.", and then I can put the next set of text somewhere else on the same page.
Your extractText() function simply gets the extracted text blocks in document order, not presentation order.
Tables are notoriously difficult to extract in a common, meaningful way... You see them as tables, PDF sees them as text blocks placed on the page with little or no relationship.
Still, getPage() and extractText() are good starting points & if you have simply formatted pages, they may work fine.
I found out a very simple way to do this.
You have to follow this steps:
Install PyPDF2 :To do this step if you use Anaconda, search for Anaconda Prompt and digit the following command, you need administrator permission to do this.
pip install PyPDF2
If you're not using Anaconda you have to install pip and put its path
to your cmd or terminal.
Python Code: This following code shows how to convert a pdf file very easily:
import PyPDF2
with open("pdf file path here",'rb') as file_obj:
pdf_reader = PyPDF2.PdfFileReader(file_obj)
raw = pdf_reader.getPage(0).extractText()
print(raw)
I just used pdftotext module to get this done easily.
import pdftotext
# Load your PDF
with open("test.pdf", "rb") as f:
pdf = pdftotext.PDF(f)
# creating a text file after iterating through all pages in the pdf
file = open("test.txt", "w")
for page in pdf:
file.write(page)
file.close()
Link: https://github.com/manojitballav/pdf-text

Extract images from PDF using python PyPDF2

Is there any way to extract images as stream from pdf document (using PyPDF2 library)?
Also is it possible to replace some images to another (generated with PIL for example or loaded from file)?
I'm able to get EncodedStreamObject from pdf objects tree and get encoded stream (by calling getData() method), but looks like it just raw content w/o any image headers and other meta information.
>>> import PyPDF2
>>> # sample.pdf contains png images
>>> reader = PyPDF2.PdfFileReader(open('sample.pdf', 'rb'))
>>> reader.resolvedObjects[0][9]
{'/BitsPerComponent': 8,
'/ColorSpace': ['/ICCBased', IndirectObject(20, 0)],
'/Filter': '/FlateDecode',
'/Height': 30,
'/Subtype': '/Image',
'/Type': '/XObject',
'/Width': 100}
>>>
>>> reader.resolvedObjects[0][9].__class__
PyPDF2.generic.EncodedStreamObject
>>>
>>> s = reader.resolvedObjects[0][9].getData()
>>> len(s), s[:10]
(9000, '\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc')
I've looked across PyPDF2, ReportLab and PDFMiner solutions quite a bit, but haven't found anything like what I'm looking for.
Any code samples and links will be very helpful.
import fitz
doc = fitz.open(filePath)
for i in range(len(doc)):
for img in doc.getPageImageList(i):
xref = img[0]
pix = fitz.Pixmap(doc, xref)
if pix.n < 5: # this is GRAY or RGB
pix.writePNG("p%s-%s.png" % (i, xref))
else: # CMYK: convert to RGB first
pix1 = fitz.Pixmap(fitz.csRGB, pix)
pix1.writePNG("p%s-%s.png" % (i, xref))
pix1 = None
pix = None
Image metadata is not stored within the encoded images of a PDF. If metadata is stored at all, it is stored in PDF itself, but stripped from the underlying image. The metadata you see in your example is likely all that you'll be able to get. It's possible that PDF encoders may store image metadata elsewhere in the PDF, but I haven't seen this. (Note this metadata question was also asked for Java.)
It's definitely possible to extract the stream however, as you mentioned, you use the getData operation.
As for replacing it, you'll need to create a new image object with the PDF, add it to the end, and update the indirect Object pointers accordingly. It will be difficult to do this with PyPdf2.
Extracting Images from PDF
This code helps to fetch any images in scanned or machine generated
pdf or normal pdf
determines its occurrence example how many images in each page
Fetches images with same resolution and extension
pip install PyMuPDF
import fitz
import io
from PIL import Image
#file path you want to extract images from
file = r"File_path"
#open the file
pdf_file = fitz.open(file)
#iterate over PDF pages
for page_index in range(pdf_file.page_count):
#get the page itself
page = pdf_file[page_index]
image_li = page.get_images()
#printing number of images found in this page
#page index starts from 0 hence adding 1 to its content
if image_li:
print(f"[+] Found a total of {len(image_li)} images in page {page_index+1}")
else:
print(f"[!] No images found on page {page_index+1}")
for image_index, img in enumerate(page.get_images(), start=1):
#get the XREF of the image
xref = img[0]
#extract the image bytes
base_image = pdf_file.extract_image(xref)
image_bytes = base_image["image"]
#get the image extension
image_ext = base_image["ext"]
#load it to PIL
image = Image.open(io.BytesIO(image_bytes))
#save it to local disk
image.save(open(f"image{page_index+1}_{image_index}.{image_ext}", "wb"))
`

Categories