Export PDF to csv using python (tabula) - python

When exporting a PDF file to csv, it returns an error:writeheader() takes 1 positional argumentbut 2 were given
from tabula import read_pdf
from tabulate import tabulate
import csv
df = read_pdf("asd.pdf")
print(df)
with open('ddd.csv', "w", newline="") as file:
columns = ['specialty ',"name",'number_of_seats','Total_seats,' "document_type", "concent"]
writer = csv.DictWriter(file, fieldnames=columns)
writer.writeheader(df)

Code copied from http://theautomatic.net/2019/05/24/3-ways-to-scrape-tables-from-pdfs-with-python/, there is also more details ...
import tabula
file = "http://lab.fs.uni-lj.si/lasin/wp/IMIT_files/neural/doc/seminar8.pdf"
#tables = tabula.read_pdf(file, pages = "all", multiple_tables = True)
# output just the first table in the PDF to a CSV
tabula.convert_into(file, "output.csv", output_format="csv")
# output all the tables in the PDF to a CSV
tabula.convert_into(file, "output.csv", output_format="csv", pages='all')

Related

How to convert Word Doc with tables to csv and output as JSON

I have the following code to extract tables from a word doc and create a list of csv files from the tables:
from docx import Document
import pandas as pd
import csv
import json
import time
document = Document('pathtoFile')
tables = []
for table in document.tables:
df = [['' for i in range(len(table.columns))] for j in range(len(table.rows))]
for i, row in enumerate(table.rows):
for j, cell in enumerate(row.cells):
if cell.text:
df[i][j] = cell.text
tables.append(pd.DataFrame(df))
for nr, i in enumerate(tables):
i.to_csv("table_" + str(nr) + ".csv")
I also have the following script to take a csv file and extract it to JSON:
import csv
import json
import time
def csv_to_json(csvFilePath, jsonFilePath):
jsonArray = []
#read csv file
with open(csvFilePath, encoding='utf-8', errors='ignore') as csvf:
#load csv file data using csv library's dictionary reader
csvReader = csv.DictReader(csvf)
#convert each csv row into python dict
for row in csvReader:
#add this python dict to json array
jsonArray.append(row)
#convert python jsonArray to JSON String and write to file
with open(jsonFilePath, 'w', encoding='utf-8') as jsonf:
jsonString = json.dumps(jsonArray, indent=4)
jsonf.write(jsonString)
csvFilePath = r'pathtoFile'
jsonFilePath = r'pathtoFile'
start = time.perf_counter()
csv_to_json(csvFilePath, jsonFilePath)
finish = time.perf_counter()
print(f"Conversion completed successfully in {finish - start:0.4f} seconds")
The main issue is combining the two and figuring out how to go about taking the word document with the tables, extracting them to csv's, then taking the csv and converting to JSON. I may be overcomplicating this but open to suggestions.

How to download a csv file in Python

I am trying to download a csv file from the url
https://qubeshub.org/publications/1220/supportingdocs/1#supportingdocs .
the file is Elephant Morphometrics and Tusk Size-originaldata-3861.csv
I have tried using using pd.read_csv()
and
import pandas as pd
import io
import requests
url="https://qubeshub.org/publications/1220/supportingdocs/1#supportingdocs/Elephant Morphometrics and Tusk Size-originaldata-3861.csv"
s=requests.get(url).content
c=pd.read_csv(io.StringIO(s.decode('utf-8')))
Try:
import requests
url = "https://qubeshub.org/publications/1220/serve/1/3861?el=1&download=1"
r = requests.get(url)
filename = r.headers["Content-Disposition"].split('"')[1]
with open(filename, "wb") as f_out:
print(f"Downloading {filename}")
f_out.write(r.content)
Prints:
Downloading Elephant Morphometrics and Tusk Size-originaldata-3861.csv
and saves the file.
This should download the file and parse the rows and columns into a csv file
import requests
import csv
url = "https://qubeshub.org/publications/1220/serve/1/3861?el=1&download=1"
req=requests.get(url)
rows = req.content.decode('utf-8').split("\r\n")
rows.pop()
csv_local_filename = "test.csv"
with open(csv_local_filename, 'w') as fs:
writer = csv.writer(fs, delimiter = ',')
for row in rows:
entries = row.split(',')
b=writer.writerow(entries)
You'll likely want to convert those columns into the desired types before you start working with them. The example code above leaves everything as a string.
After I run the above code I see:
>tail test.csv
2005-13,88,m,32.5,290,162.3,40
2005-13,51,m,37.5,270,113.2,40
2005-13,86,m,37.5,310,175.3,38
and
>head test.csv
Years of sample collection,Elephant ID,Sex,Estimated Age (years),shoulder Height in cm,Tusk Length in cm,Tusk Circumference in cm
1966-68,12,f,0.08,102,,
1966-68,34,f,0.08,89,,
1966-68,162,f,0.083,89,,
1966-68,292,f,0.083,92,,
In Firefox after downloading file in browser you can check link to this file and it shows
https://qubeshub.org/publications/1220/serve/1/3861?el=1&download=1
and this link you should use in code
import pandas as pd
df = pd.read_csv('https://qubeshub.org/publications/1220/serve/1/3861?el=1&download=1')
print(df)

how to download excel file in python and streamlit?

I have a Python script that read files and convert it to dataframe using Python and streamlit. Then I want to create a function to allows the user to download this dataframe as an Excel file with extension .xls.
So I tried to read the dataframe and convert it to an Excel file using these two functions:
pd.ExcelWriter
df.to_excel
But when I try to download the file using a link the file doesn't download and displays this error:
Failed-Network error
Code:
import pandas as pd
import streamlit as st
writer = pd.ExcelWriter('update2.xlsx')
df.to_excel(writer, index = False, header=True,encoding='utf-8')
with open(writer,'rb') as f:
b64 = base64.b64encode(f.read())
href = f'Download {extension}'
st.write(href, unsafe_allow_html=True)
With the streamlit latest release(above 1.0.0):
Use
st.download_button
Displays a download button widget.
This is useful when you would like to provide a way for your users to download a file directly from your app.
Note that the data to be downloaded is stored in memory while the user is connected, so it's a good idea to keep file sizes under a couple of hundred megabytes to conserve memory.
Here is a sample code from the discussion, that can be helpful to download excel files...
import pandas as pd
from io import BytesIO
from pyxlsb import open_workbook as open_xlsb
import streamlit as st
def to_excel(df):
output = BytesIO()
writer = pd.ExcelWriter(output, engine='xlsxwriter')
df.to_excel(writer, index=False, sheet_name='Sheet1')
workbook = writer.book
worksheet = writer.sheets['Sheet1']
format1 = workbook.add_format({'num_format': '0.00'})
worksheet.set_column('A:A', None, format1)
writer.save()
processed_data = output.getvalue()
return processed_data
df_xlsx = to_excel(df)
st.download_button(label='📥 Download Current Result',
data=df_xlsx ,
file_name= 'df_test.xlsx')
This worked for me
import pandas as pd
from io import BytesIO
import streamlit as st
def to_excel(df: pd.DataFrame):
in_memory_fp = BytesIO()
df.to_excel(in_memory_fp)
# Write the file out to disk to demonstrate that it worked.
in_memory_fp.seek(0, 0)
return in_memory_fp.read()
cols = ["col1", "col2"]
df = pd.DataFrame.from_records([{k: 0.0 for k in cols} for _ in range(25)])
excel_data = to_excel(df)
file_name = "excel.xlsx"
st.download_button(
f"Click to download {file_name}",
excel_data,
file_name,
f"text/{file_name}",
key=file_name
)
line 5 can't be executed since you havent assigned any excel to the DataFrame df.
try something like this in your code:
df = pd.read_csv('update2.xlsx')
I hope, this helped.
Take care
def get_binary_file_downloader_html(bin_file, file_label='File'):
with open(bin_file, 'rb') as f:
data = f.read()
bin_str = base64.b64encode(data).decode()
href = f'Descargar {file_label}'
return href
st.markdown(get_binary_file_downloader_html('Wip_QRY.xlsx', 'Excel'), unsafe_allow_html=True)

I have converted a pdf file to csv using anaconda python3 But the converted csv file is not in a readable form how to make it readable?

# importing required modules
import PyPDF2
# creating a pdf file object
pdfFileObj = open(path, 'rb')
# creating a pdf reader object
pdfReader = PyPDF2.PdfFileReader(pdfFileObj)
# printing number of pages in pdf file
print(pdfReader.numPages)
# creating a page object
pageObj = pdfReader.getPage(0)
# extracting text from page
print(pageObj.extractText())
df = pd.DataFrame(pdfFileObj)
print (df)
df.to_csv('output.csv')
I have converted a pdf file to csv using anaconda python 3. But the converted csv file is not in a readable form. how to make that csv in readable format?
I tested your method and I couldn't find a way to correct the csv ouput. I useally do it this way:
import csv
import os
from miner_text_generator import extract_text_by_page
def export_as_csv(pdf_path, csv_path):
filename = os.path.splitext(os.path.basename(pdf_path))[0]
counter = 1
with open(csv_path, 'w') as csv_file:
writer = csv.writer(csv_file)
for page in extract_text_by_page(pdf_path):
text = page[0:100]
words = text.split()
writer.writerow(words)
if __name__ == '__main__':
pdf_path = '<your path to the file>.pdf'
csv_path = '<path to the output>.csv'
export_as_csv(pdf_path, csv_path)

Converting all worksheets in an Excel workbook to csv format

My Excel document my.xlsx has two Sheets named Sheet1 and Sheet2. I want to convert all worksheets to csv format using xlsx2csv. I used the following commands:
from xlsx2csv import *
xlsx2csv my.xlsx convert.csv
File "<stdin>", line 1
xlsx2csv my.xlsx convert.csv
^
SyntaxError: invalid syntax
x2c -a my.xlsx my1.csv
File "<stdin>", line 1
x2c -a my.xlsx my1.csv
^
SyntaxError: invalid syntax
Any help, please.
I have not used xlsx2csv before but why don't we try pandas.
Your requirement can be solved like this:
import pandas as pd
for sheet in ['Sheet1', 'Sheet2']:
df = pd.read_excel('my.xlsx', sheetname=sheet)
df.to_csv(sheet + '_output.csv', index=False)
You can do something as the follows:
import pandas as pd
xls_file = pd.ExcelFile('<path_to_your_excel_file>')
sheet_names = xls_file.sheet_names
for sheet in sheet_names:
df = xls_file.parse(sheet)
Xlsx2csv python implementation:
Could only execute Xlsx2csv with sheetid parameter. In order to get sheet names and ids, get_sheet_details was used.
csvfrmxlsx creates csv files for each sheet in csv folder under parent directory.
import pandas as pd
from pathlib import Path
def get_sheet_details(filename):
import os
import xmltodict
import shutil
import zipfile
sheets = []
# Make a temporary directory with the file name
directory_to_extract_to = (filename.with_suffix(''))
os.mkdir(directory_to_extract_to)
# Extract the xlsx file as it is just a zip file
zip_ref = zipfile.ZipFile(filename, 'r')
zip_ref.extractall(directory_to_extract_to)
zip_ref.close()
# Open the workbook.xml which is very light and only has meta data, get sheets from it
path_to_workbook = directory_to_extract_to / 'xl' / 'workbook.xml'
with open(path_to_workbook, 'r') as f:
xml = f.read()
dictionary = xmltodict.parse(xml)
for sheet in dictionary['workbook']['sheets']['sheet']:
sheet_details = {
'id': sheet['#sheetId'], # can be sheetId for some versions
'name': sheet['#name'] # can be name
}
sheets.append(sheet_details)
# Delete the extracted files directory
shutil.rmtree(directory_to_extract_to)
return sheets
def csvfrmxlsx(xlsxfl, df): # create csv files in csv folder on parent directory
from xlsx2csv import Xlsx2csv
for index, row in df.iterrows():
shnum = row['id']
shnph = xlsxfl.parent / 'csv' / Path(row['name'] + '.csv') # path for converted csv file
Xlsx2csv(str(xlsxfl), outputencoding="utf-8").convert(str(shnph), sheetid=int(shnum))
return
pthfnc = 'c:/xlsx/'
wrkfl = 'my.xlsx'
xls_file = Path(pthfnc + wrkfl)
sheetsdic = get_sheet_details(xls_file) # dictionary with sheet names and ids without opening xlsx file
df = pd.DataFrame.from_dict(sheetsdic)
csvfrmxlsx(xls_file, df) # df with sheets to be converted

Categories