batch tsv to csv script - python

I'm pretty new to Python, I wrote this script that batch converts tsv files to csv. I keep getting an error message and spend hours trying to see what I did wrong. Any help on this will truly be appreciate it. Error code is "UnicodeDecodeError: 'utf-8' codec can't decode byte 0xff in position 0: invalid start byte"
import os
import sys
import shutil
import pandas as pd
import argparse
def main():
if len(sys.argv) == 1:
files = [x for x in os.listdir('.') if x.endswith('.tsv')]
else:
files = [sys.argv[1]]
for file in files:
df = pd.read_csv(file, header=0, sep='\t', encoding='utf-8', quoting=3)
new_filename = f'{file.replace(".tsv", "")}.csv'
df.to_csv(new_filename, encoding='utf-8', index=False)
print(f'Converted file: {new_filename}')
print('Done!')
if __name__ == '__main__':
main()

When the CSV is read into Pandas, it uses utf-8 encoding, however, there are other encoding formats that could have been used on the file
In this line of code:
df = pd.read_csv(file, header=0, sep='\t', encoding='utf-8', quoting=3)
Try setting encoding to a different format.
There are many different formats you can try, here is full list. I would recommend opening the file with notepad, or another text editor, and then save as a CSV with a utf-8 encoding.

Related

Convert multiple CSV files into UTF-8 encoding

I need to convert multiple CSV files (with different encodings) into UTF-8.
Here is my code:
#find encoding and if not in UTF-8 convert it
import os
import sys
import glob
import chardet
import codecs
myFiles = glob.glob('/mypath/*.csv')
csv_encoding = []
for file in myFiles:
with open(file, 'rb') as opened_file:
bytes_file=opened_file.read()
result=chardet.detect(bytes_file)
my_encoding=result['encoding']
csv_encoding.append(my_encoding)
print(csv_encoding)
for file in myFiles:
if csv_encoding in ['utf-8', 'ascii']:
print(file + ' in utf-8 encoding')
else:
with codecs.open(file, 'r') as file_for_conversion:
read_file_for_conversion = file_for_conversion.read()
with codecs.open(file, 'w', 'utf-8') as converted_file:
converted_file.write(read_file_for_conversion)
print(file +' converted to utf-8')
When I try to run this code I get the following error:
UnicodeDecodeError: 'utf-8' codec can't decode byte 0xf3 in position 5057: invalid continuation byte
Can someone help me? Thanks!!!
You need to zip the lists myFiles and csv_encoding to get their values aligned:
for file, encoding in zip(myFiles, csv_encoding):
...
And you need to specify that value in the open() call:
...
with codecs.open(file, 'r', encoding=encoding) as file_for_conversion:
Note: in Python 3 there's no need to use the codecs module for opening files.
Just use the built-in open function and specify the encoding with the encoding parameter.

text contents of pdf to csv file conversion- How to?

I want to take a PDF File as an input. And as an output file I want a csv file to show. So all the textual data which is there in the pdf file should be converted to a csv file. But I am not understanding how would this happen..I need your help at the earliest as I've tried to do but couldn't do it.
what ive done is used a library called Tabula-py which converts pdf to csv file. It does create a csv format but there are no contents being copied to the csv file from the pdf file.
heres the code
from tabula import convert_into,read_pdf
import tabula
df = tabula.read_pdf("crimestory.pdf", spreadsheet=True,
pages='all',output_format="csv")
df.to_csv('crimestoryy.csv', index=False)
the output should come as a csv file where the data is present.
what i am getting is a blank csv file.
I have find answer to this question by my own
To tackle this issue I came up with converting the pdf file into a text file. Then I converted this text file to a csv file.here's my code.
conversion.py
import os.path
import csv
import pdftotext
#Load your PDF
with open("crimestory.pdf", "rb") as f:
pdf = pdftotext.PDF(f)
# Save all text to a txt file.
with open('crimestory.txt', 'w') as f:
f.write("\n\n".join(pdf))
save_path = "/home/mayureshk/PycharmProjects/NLP/"
completeName_in = os.path.join(save_path, 'crimestory' + '.txt')
completeName_out = os.path.join(save_path, 'crimestoryycsv' + '.csv')
file1 = open(completeName_in)
In_text = csv.reader(file1, delimiter=',')
file2 = open(completeName_out, 'w')
out_csv = csv.writer(file2)
file3 = out_csv.writerows(In_text)
file1.close()
file2.close()
Try this, hope it will works
import tabula
# convert PDF into CSV
tabula.convert_into("crimestory.pdf", "crimestory.csv", output_format="csv", pages='all')
or
df = tabula.read_pdf("crimestory.pdf", encoding='utf-8', spreadsheet=True, pages='all')
df.to_csv('crimestory.csv', encoding='utf-8')
or
from tabula import read_pdf
df = read_pdf("crimestory.pdf")
df
#make sure df displays your pdf contents in the output
from tabula import convert_into
convert_into("crimestory.pdf", "crimestory.csv", output_format="csv")
!cat.crimestory.csv

Apply GZIP compression to a CSV in Python Pandas

I am trying to write a dataframe to a gzipped csv in python pandas, using the following:
import pandas as pd
import datetime
import csv
import gzip
# Get data (with previous connection and script variables)
df = pd.read_sql_query(script, conn)
# Create today's date, to append to file
todaysdatestring = str(datetime.datetime.today().strftime('%Y%m%d'))
print todaysdatestring
# Create csv with gzip compression
df.to_csv('foo-%s.csv.gz' % todaysdatestring,
sep='|',
header=True,
index=False,
quoting=csv.QUOTE_ALL,
compression='gzip',
quotechar='"',
doublequote=True,
line_terminator='\n')
This just creates a csv called 'foo-YYYYMMDD.csv.gz', not an actual gzip archive.
I've also tried adding this:
#Turn to_csv statement into a variable
d = df.to_csv('foo-%s.csv.gz' % todaysdatestring,
sep='|',
header=True,
index=False,
quoting=csv.QUOTE_ALL,
compression='gzip',
quotechar='"',
doublequote=True,
line_terminator='\n')
# Write above variable to gzip
with gzip.open('foo-%s.csv.gz' % todaysdatestring, 'wb') as output:
output.write(d)
Which fails as well. Any ideas?
Using df.to_csv() with the keyword argument compression='gzip' should produce a gzip archive. I tested it using same keyword arguments as you, and it worked.
You may need to upgrade pandas, as gzip was not implemented until version 0.17.1, but trying to use it on prior versions will not raise an error, and just produce a regular csv. You can determine your current version of pandas by looking at the output of pd.__version__.
It is done very easily with pandas
import pandas as pd
Write a pandas dataframe to disc as gunzip compressed csv
df.to_csv('dfsavename.csv.gz', compression='gzip')
Read from disc
df = pd.read_csv('dfsavename.csv.gz', compression='gzip')
From documentation
import gzip
content = "Lots of content here"
with gzip.open('file.txt.gz', 'wb') as f:
f.write(content)
with pandas
import gzip
content = df.to_csv(
sep='|',
header=True,
index=False,
quoting=csv.QUOTE_ALL,
quotechar='"',
doublequote=True,
line_terminator='\n')
with gzip.open('foo-%s.csv.gz' % todaysdatestring, 'wb') as f:
f.write(content)
The trick here being that to_csv outputs text if you don't pass it a filename. Then you just redirect that text to gzip's write method.
with gzip.open('foo-%s.csv.gz' % todaysdatestring, 'wb') as f:
f.write(df.to_csv(sep='|', index=False, quoting=csv.QUOTE_ALL))

DBF - encoding cp1250

I have dbf database encoded in cp1250 and I am reading this database using folowing code:
import csv
from dbfpy import dbf
import os
import sys
filename = sys.argv[1]
if filename.endswith('.dbf'):
print "Converting %s to csv" % filename
csv_fn = filename[:-4]+ ".csv"
with open(csv_fn,'wb') as csvfile:
in_db = dbf.Dbf(filename)
out_csv = csv.writer(csvfile)
names = []
for field in in_db.header.fields:
names.append(field.name)
#out_csv.writerow(names)
for rec in in_db:
out_csv.writerow(rec.fieldData)
in_db.close()
print "Done..."
else:
print "Filename does not end with .dbf"
Problem is, that final csv file is wrong. Encoding of the file is ANSI and some characters are corrupted. I would like to ask you, if you can help me how to read dbf file correctly.
EDIT 1
I tried different code from https://pypi.python.org/pypi/simpledbf/0.2.4, there is some error.
Source 2:
from simpledbf import Dbf5
import os
import sys
dbf = Dbf5('test.dbf', codec='cp1250');
dbf.to_csv('junk.csv');
Output:
python program2.py
Traceback (most recent call last):
File "program2.py", line 5, in <module>
dbf = Dbf5('test.dbf', codec='cp1250');
File "D:\ProgramFiles\Anaconda\lib\site-packages\simpledbf\simpledbf.py", line 557, in __init__
assert terminator == b'\r'
AssertionError
I really don't know how to solve this problem.
Try using my dbf library:
import dbf
with dbf.Table('test.dbf') as table:
dbf.export(table, 'junk.csv')
I wrote simpledbf. The line that is causing you problems was from some testing I was doing when developing the module. First of all, you might want to update your installation, as 0.2.6 is the most recent. Then you can try removing that particular line (#557) from the file "D:\ProgramFiles\Anaconda\lib\site-packages\simpledbf\simpledbf.py". If that doesn't work, you can ping me at the GitHub repo for simpledbf, or you could try Ethan's suggestion for the dbf module.
You can decode and encode as necessary. dbfpy assumes strings are utf8 encoded, so you can decode as it isn't that encoding and then encode again with the right encoding.
import csv
from dbfpy import dbf
import os
import sys
filename = sys.argv[1]
if filename.endswith('.dbf'):
print "Converting %s to csv" % filename
csv_fn = filename[:-4]+ ".csv"
with open(csv_fn,'wb') as csvfile:
in_db = dbf.Dbf(filename)
out_csv = csv.writer(csvfile)
names = []
for field in in_db.header.fields:
names.append(field.name)
#out_csv.writerow(names)
for rec in in_db:
row = [i.decode('utf8').encode('cp1250') if isinstance(i, str) else i for i in rec.fieldData]
out_csv.writerow(rec.fieldData)
in_db.close()
print "Done..."
else:
print "Filename does not end with .dbf"

xls to csv converter

I am using win32.client in python for converting my .xlsx and .xls file into a .csv. When I execute this code it's giving an error. My code is:
def convertXLS2CSV(aFile):
'''converts a MS Excel file to csv w/ the same name in the same directory'''
print "------ beginning to convert XLS to CSV ------"
try:
import win32com.client, os
from win32com.client import constants as c
excel = win32com.client.Dispatch('Excel.Application')
fileDir, fileName = os.path.split(aFile)
nameOnly = os.path.splitext(fileName)
newName = nameOnly[0] + ".csv"
outCSV = os.path.join(fileDir, newName)
workbook = excel.Workbooks.Open(aFile)
workbook.SaveAs(outCSV, c.xlCSVMSDOS) # 24 represents xlCSVMSDOS
workbook.Close(False)
excel.Quit()
del excel
print "...Converted " + nameOnly + " to CSV"
except:
print ">>>>>>> FAILED to convert " + aFile + " to CSV!"
convertXLS2CSV("G:\\hello.xlsx")
I am not able to find the error in this code. Please help.
I would use xlrd - it's faster, cross platform and works directly with the file.
As of version 0.8.0, xlrd reads both XLS and XLSX files.
But as of version 2.0.0, support was reduced back to only XLS.
import xlrd
import csv
def csv_from_excel():
wb = xlrd.open_workbook('your_workbook.xls')
sh = wb.sheet_by_name('Sheet1')
your_csv_file = open('your_csv_file.csv', 'wb')
wr = csv.writer(your_csv_file, quoting=csv.QUOTE_ALL)
for rownum in xrange(sh.nrows):
wr.writerow(sh.row_values(rownum))
your_csv_file.close()
I would use pandas. The computationally heavy parts are written in cython or c-extensions to speed up the process and the syntax is very clean. For example, if you want to turn "Sheet1" from the file "your_workbook.xls" into the file "your_csv.csv", you just use the top-level function read_excel and the method to_csv from the DataFrame class as follows:
import pandas as pd
data_xls = pd.read_excel('your_workbook.xls', 'Sheet1', index_col=None)
data_xls.to_csv('your_csv.csv', encoding='utf-8')
Setting encoding='utf-8' alleviates the UnicodeEncodeError mentioned in other answers.
Maybe someone find this ready-to-use piece of code useful. It allows to create CSVs from all spreadsheets in Excel's workbook.
Python 2:
# -*- coding: utf-8 -*-
import xlrd
import csv
from os import sys
def csv_from_excel(excel_file):
workbook = xlrd.open_workbook(excel_file)
all_worksheets = workbook.sheet_names()
for worksheet_name in all_worksheets:
worksheet = workbook.sheet_by_name(worksheet_name)
with open(u'{}.csv'.format(worksheet_name), 'wb') as your_csv_file:
wr = csv.writer(your_csv_file, quoting=csv.QUOTE_ALL)
for rownum in xrange(worksheet.nrows):
wr.writerow([unicode(entry).encode("utf-8") for entry in worksheet.row_values(rownum)])
if __name__ == "__main__":
csv_from_excel(sys.argv[1])
Python 3:
import xlrd
import csv
from os import sys
def csv_from_excel(excel_file):
workbook = xlrd.open_workbook(excel_file)
all_worksheets = workbook.sheet_names()
for worksheet_name in all_worksheets:
worksheet = workbook.sheet_by_name(worksheet_name)
with open(u'{}.csv'.format(worksheet_name), 'w', encoding="utf-8") as your_csv_file:
wr = csv.writer(your_csv_file, quoting=csv.QUOTE_ALL)
for rownum in range(worksheet.nrows):
wr.writerow(worksheet.row_values(rownum))
if __name__ == "__main__":
csv_from_excel(sys.argv[1])
I'd use csvkit, which uses xlrd (for xls) and openpyxl (for xlsx) to convert just about any tabular data to csv.
Once installed, with its dependencies, it's a matter of:
python in2csv myfile > myoutput.csv
It takes care of all the format detection issues, so you can pass it just about any tabular data source. It's cross-platform too (no win32 dependency).
First read your excel spreadsheet into pandas, below code will import your excel spreadsheet into pandas as a OrderedDict type which contain all of your worksheet as dataframes. Then simply use worksheet_name as a key to access specific worksheet as a dataframe and save only required worksheet as csv file by using df.to_csv(). Hope this will workout in your case.
import pandas as pd
df = pd.read_excel('YourExcel.xlsx', sheet_name=None)
df['worksheet_name'].to_csv('YourCsv.csv')
If your Excel file contain only one worksheet then simply use below code:
import pandas as pd
df = pd.read_excel('YourExcel.xlsx')
df.to_csv('YourCsv.csv')
If someone want to convert all the excel worksheets from single excel workbook to the different csv files, try below code:
import pandas as pd
def excelTOcsv(filename):
df = pd.read_excel(filename, sheet_name=None)
for key, value in df.items():
return df[key].to_csv('%s.csv' %key)
This function is working as a multiple Excel sheet of same excel workbook to multiple csv file converter. Where key is the sheet name and value is the content inside sheet.
#andi I tested your code, it works great, BUT
In my sheets there's a column like this
2013-03-06T04:00:00
date and time in the same cell
It gets garbled during exportation, it's like this in the exported file
41275.0416667
other columns are ok.
csvkit, on the other side, does ok with that column but only exports ONE sheet, and my files have many.
xlsx2csv is faster than pandas and xlrd.
xlsx2csv -s 0 crunchbase_monthly_.xlsx cruchbase
excel file usually comes with n sheetname.
-s is sheetname index.
then, cruchbase folder will be created, each sheet belongs to xlsx will be converted to a single csv.
p.s. csvkit is awesome too.
Quoting an answer from Scott Ming, which works with workbook containing multiple sheets:
Here is a python script getsheets.py (mirror), you should install pandas and xlrd before you use it.
Run this:
pip3 install pandas xlrd # or `pip install pandas xlrd`
How does it works?
$ python3 getsheets.py -h
Usage: getsheets.py [OPTIONS] INPUTFILE
Convert a Excel file with multiple sheets to several file with one sheet.
Examples:
getsheets filename
getsheets filename -f csv
Options:
-f, --format [xlsx|csv] Default xlsx.
-h, --help Show this message and exit.
Convert to several xlsx:
$ python3 getsheets.py goods_temp.xlsx
Sheet.xlsx Done!
Sheet1.xlsx Done!
All Done!
Convert to several csv:
$ python3 getsheets.py goods_temp.xlsx -f csv
Sheet.csv Done!
Sheet1.csv Done!
All Done!
getsheets.py:
# -*- coding: utf-8 -*-
import click
import os
import pandas as pd
def file_split(file):
s = file.split('.')
name = '.'.join(s[:-1]) # get directory name
return name
def getsheets(inputfile, fileformat):
name = file_split(inputfile)
try:
os.makedirs(name)
except:
pass
df1 = pd.ExcelFile(inputfile)
for x in df1.sheet_names:
print(x + '.' + fileformat, 'Done!')
df2 = pd.read_excel(inputfile, sheetname=x)
filename = os.path.join(name, x + '.' + fileformat)
if fileformat == 'csv':
df2.to_csv(filename, index=False)
else:
df2.to_excel(filename, index=False)
print('\nAll Done!')
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
#click.command(context_settings=CONTEXT_SETTINGS)
#click.argument('inputfile')
#click.option('-f', '--format', type=click.Choice([
'xlsx', 'csv']), default='xlsx', help='Default xlsx.')
def cli(inputfile, format):
'''Convert a Excel file with multiple sheets to several file with one sheet.
Examples:
\b
getsheets filename
\b
getsheets filename -f csv
'''
if format == 'csv':
getsheets(inputfile, 'csv')
else:
getsheets(inputfile, 'xlsx')
cli()
We can use Pandas lib of Python to conevert xls file to csv file
Below code will convert xls file to csv file .
import pandas as pd
Read Excel File from Local Path :
df = pd.read_excel("C:/Users/IBM_ADMIN/BU GPA Scorecard.xlsx",sheetname=1)
Trim Spaces present on columns :
df.columns = df.columns.str.strip()
Send Data frame to CSV file which will be pipe symbol delimted and without Index :
df.to_csv("C:/Users/IBM_ADMIN/BU GPA Scorecard csv.csv",sep="|",index=False)
Python is not the best tool for this task. I tried several approaches in Python but none of them work 100% (e.g. 10% converts to 0.1, or column types are messed up, etc). The right tool here is PowerShell, because it is an MS product (as is Excel) and has the best integration.
Simply download this PowerShell script, edit line 47 to enter the path for the folder containing the Excel files and run the script using PowerShell.
Using xlrd is a flawed way to do this, because you lose the Date Formats in Excel.
My use case is the following.
Take an Excel File with more than one sheet and convert each one into a file of its own.
I have done this using the xlsx2csv library and calling this using a subprocess.
import csv
import sys, os, json, re, time
import subprocess
def csv_from_excel(fname):
subprocess.Popen(["xlsx2csv " + fname + " --all -d '|' -i -p "
"'<New Sheet>' > " + 'test.csv'], shell=True)
return
lstSheets = csv_from_excel(sys.argv[1])
time.sleep(3) # system needs to wait a second to recognize the file was written
with open('[YOUR PATH]/test.csv') as f:
lines = f.readlines()
firstSheet = True
for line in lines:
if line.startswith('<New Sheet>'):
if firstSheet:
sh_2_fname = line.replace('<New Sheet>', '').strip().replace(' - ', '_').replace(' ','_')
print(sh_2_fname)
sh2f = open(sh_2_fname+".csv", "w")
firstSheet = False
else:
sh2f.close()
sh_2_fname = line.replace('<New Sheet>', '').strip().replace(' - ', '_').replace(' ','_')
print(sh_2_fname)
sh2f = open(sh_2_fname+".csv", "w")
else:
sh2f.write(line)
sh2f.close()
I've tested all anwers, but they were all too slow for me. If you have Excel installed you can use the COM.
I thought initially it would be slower since it will load everything for the actual Excel application, but it isn't for huge files. Maybe because the algorithm for opening and saving files runs a heavily optimized compiled code, Microsoft guys make a lot of money for it after all.
import sys
import os
import glob
from win32com.client import Dispatch
def main(path):
excel = Dispatch("Excel.Application")
if is_full_path(path):
process_file(excel, path)
else:
files = glob.glob(path)
for file_path in files:
process_file(excel, file_path)
excel.Quit()
def process_file(excel, path):
fullpath = os.path.abspath(path)
full_csv_path = os.path.splitext(fullpath)[0] + '.csv'
workbook = excel.Workbooks.Open(fullpath)
workbook.Worksheets(1).SaveAs(full_csv_path, 6)
workbook.Saved = 1
workbook.Close()
def is_full_path(path):
return path.find(":") > -1
if __name__ == '__main__':
main(sys.argv[1])
This is very raw code and won't check for errors, print help or anything, it will just create a csv file for each file that matches the pattern you entered in the function so you can batch process a lot of files only launching excel application once.
As much as I hate to rely on Windows Excel proprietary software, which is not cross-platform, my testing of csvkit for .xls, which uses xlrd under the hood, failed to correctly parse dates (even when using the commandline parameters to specify strptime format).
For example, this xls file, when parsed with csvkit, will convert cell G1 of 12/31/2002 to 37621, whereas when converted to csv via excel -> save_as (using below) cell G1 will be "December 31, 2002".
import re
import os
from win32com.client import Dispatch
xlCSVMSDOS = 24
class CsvConverter(object):
def __init__(self, *, input_dir, output_dir):
self._excel = None
self.input_dir = input_dir
self.output_dir = output_dir
if not os.path.isdir(self.output_dir):
os.makedirs(self.output_dir)
def isSheetEmpty(self, sheet):
# https://archive.is/RuxR7
# WorksheetFunction.CountA(ActiveSheet.UsedRange) = 0 And ActiveSheet.Shapes.Count = 0
return \
(not self._excel.WorksheetFunction.CountA(sheet.UsedRange)) \
and \
(not sheet.Shapes.Count)
def getNonEmptySheets(self, wb, as_name=False):
return [ \
(sheet.Name if as_name else sheet) \
for sheet in wb.Sheets \
if not self.isSheetEmpty(sheet) \
]
def saveWorkbookAsCsv(self, wb, csv_path):
non_empty_sheet_names = self.getNonEmptySheets(wb, as_name=True)
assert (len(non_empty_sheet_names) == 1), \
"Expected exactly 1 sheet but found %i non-empty sheets: '%s'" \
%(
len(non_empty_sheet_names),
"', '".join(name.replace("'", r"\'") for name in non_empty_sheet_names)
)
wb.Worksheets(non_empty_sheet_names[0]).SaveAs(csv_path, xlCSVMSDOS)
wb.Saved = 1
def isXlsFilename(self, filename):
return bool(re.search(r'(?i)\.xls$', filename))
def batchConvertXlsToCsv(self):
xls_names = tuple( filename for filename in next(os.walk(self.input_dir))[2] if self.isXlsFilename(filename) )
self._excel = Dispatch('Excel.Application')
try:
for xls_name in xls_names:
csv_path = os.path.join(self.output_dir, '%s.csv' %os.path.splitext(xls_name)[0])
if not os.path.isfile(csv_path):
workbook = self._excel.Workbooks.Open(os.path.join(self.input_dir, xls_name))
try:
self.saveWorkbookAsCsv(workbook, csv_path)
finally:
workbook.Close()
finally:
if not len(self._excel.Workbooks):
self._excel.Quit()
self._excel = None
if __name__ == '__main__':
self = CsvConverter(
input_dir='C:\\data\\xls\\',
output_dir='C:\\data\\csv\\'
)
self.batchConvertXlsToCsv()
The above will take an input_dir containing .xls and output them to output_dir as .csv -- it will assert that there is exactly 1 non-empty sheet in the .xls; if you need to handle multiple sheets into multiple csv then you'll need to edit saveWorkbookAsCsv.
I was trying to use xlrd library in order to convert the format xlsx into csv, but I was getting error: xlrd.biffh.XLRDError: Excel xlsx file; not supported. That was happening because this package is no longer reading any other format unless xls, according to xlrd documentation.
Following the answer from Chris Withers I was able to change the engine for the function read_excel() from pandas, then I was able to a create a function that is converting any sheet from your Excel spreadsheet you want to successfully.
In order to work the function below, don't forget to install the openpyxl library from here.
Function:
import os
import pathlib
import pandas as pd
# Function to convert excel spreadsheet into csv format
def Excel_to_csv():
# Excel file full path
excel_file = os.path.join(os.path.sep, pathlib.Path(__file__).parent.resolve(), "Excel_Spreadsheet.xlsx")
# Excel sheets
excel_sheets = ['Sheet1', 'Sheet2', 'Sheet3']
for sheet in excel_sheets:
# Create dataframe for each sheet
df = pd.DataFrame(pd.read_excel(excel_file, sheet, index_col=None, engine='openpyxl'))
# Export to csv. i.e: sheet_name.csv
df.to_csv(os.path.join(os.path.sep, pathlib.Path(__file__).parent.resolve(), sheet + '.csv'), sep=",", encoding='utf-8', index=False, header=True)
# Runs the excel_to_csv function:
Excel_to_csv()

Categories