script to merge multiple csv's into single xslx not working - python

Have read all of the threads on this but I'm still hitting a dead end. Just trying to take all the csv's in a directory and add them as new sheets to a new xlsx workbook. Here's what I've got:
import xlwt, csv, os, glob
def make_excel_workbook(path):
wb = xlwt.Workbook()
for filename in os.listdir(folder_path):
if filename.endswith('.csv'):
ws = wb.add_sheet(os.path.splitext(filename)[0])
with open('{}\\{}'.format(folder_path, filename), 'rb') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for rowx, row in enumerate(reader):
for colx, value in enumerate(row):
ws.write(rowx, colx, value)
return wb
csvDir = "C:\\Temp\\Data\\outfiles"
outDir = "C:\\Temp\\Data\\output"
os.chdir(csvDir)
csvFileList = []
searchTerm = "character string"
for file in glob.glob('*.csv'):
csvFileList.append(file)
for i in csvFileList: # search a set of extant csv files for a string and make new csv files filtered on the search term
csv_file = csv.reader(open(i, 'rb'), delimiter=',')
rowList = []
for row in csv_file:
for field in row:
if searchTerm in field:
rowList.append(row)
outputCsvFile = os.path.join(rootDir, i)
with open(outputCsvFile, 'wb') as newCsvFile:
wr = csv.writer(newCsvFile, quoting=csv.QUOTE_ALL)
wr.writerows(rowList)
So far, it works, and creates the new csv files from the original, much larger ones. Here's where it breaks:
if __name__ == '__main__':
xls = make_excel_workbook(outDir)
xls_name = "My_Team_Tasks"
xls.save('{}\\{}{}.'format(outDir, xls_name, '.xls'))
print('{}\\{}{} saved successfully'.format(outDir, xls_name, '.xls'))
when it gets to xls.save, it gives me the following error:
Update: here's the entire traceback:
Traceback (most recent call last):
File"M:/Testing/scripts/csv_parse.py", line 44, in <module>
xls.save('{}\\{}{}'.format(rootDir, xls_name, '.xls'))
File "C:\Python27\ArcGIS10.4\lib\site-packages\xlwt\Workbook.py", line 696, in save
doc.save(filename_or_stream, self.get_biff_data())
File "C:\Python27\ArcGIS10.4\lib\site-packages\xlwt\Workbook.py", line 660, in get_biff_data
shared_str_table = self.__sst_rec()
File "C:\Python27\ArcGIS10.4\lib\site-packages\xlwt\Workbook.py", line 662, in __sst_rec
return self.__sst.get_biff_record()
File "C:\Python27\ArcGIS10.4\lib\site-packages\xlwt\BIFFRecords.py", line 77, in get_biff_record
self._add_to_sst(s)
File "C:\Python27\ArcGIS10.4\lib\site-packages\xlwt\BIFFRecords.py", line 92, in _add_to_sst
u_str = upack2(s, self.encoding)
File "C:\Python27\ArcGIS10.4\lib\site-packages\xlwt\UnicodeUtils.py", line 50, in upack2
us = unicode(s, encoding)
UnicodeDecodeError: 'ascii' codec can't decode byte 0xc2 in position 69: ordinal not in range (128)

Do you know how the input CSV files are encoded? It appears from the error message to be unicode?
You can try:
wb = xlwt.Workbook(encoding='utf-8')
Failing that, as per this answer (xlwt module - saving xls unicode error) it seems another possible way to get around this issue is to encode your text into unicode before writing out.
ws.write(rowx, colx, value.decode('utf-8'))
Again, it depends on how your inputs are encoded.

Related

Keep getting Unicode Error with Streamlit

I am trying to use OpenAi and Streamlit together to merge them into a little dummy website. Here is the coding:
save_folder = "files"
if not os.path.exists(save_folder):
os.makedirs(save_folder)
uploaded_files = st.file_uploader("Choose a file", accept_multiple_files=True)
for uploaded_file in uploaded_files:
bytes_data = uploaded_file.read()
s = bytes_data.decode("UTF-8")
with open(f"{save_folder}/{uploaded_file.name}", "w") as f:
f.write(s)
SimpleDirectoryReader = download_loader("SimpleDirectoryReader")
loader = SimpleDirectoryReader(save_folder)
documents = loader.load_data()
index = GPTSimpleVectorIndex(documents)
index.save_to_disk('index.json')
question = st.text_input("What do you want me to do with the file uploaded?")
response = index.query(question)
st.write(response)
I keep getting the same error:
File "/home/appuser/venv/lib/python3.9/site-packages/streamlit/runtime/scriptrunner/script_runner.py", line 565, in _run_script
exec(code, module.__dict__)
File "/app/indextest/streamlit_app.py", line 17, in <module>
s = bytes_data.decode("UTF-8")
UnicodeDecodeError: 'utf-8' codec can't decode bytes in position 14-15: invalid continuation byte
Anyone know why?

UnicodeDecodeError when I want to convert dbf files to csv

I wrote a little program to convert dbf files to csv and it works on 80 % files.
When I try to convert whole folder I get error on last 2 dbf files.
UnicodeDecodeError: 'ascii' codec can't decode byte 0x88 in position 14: ordinal not in range(128)
This is my program:
import csv
from dbfread import DBF
from tkinter import filedialog
from tkinter import *
import os, sys
import glob
def dbf_to_csv(dbf_table_pth):#Input a dbf, output a csv, same name, same path, except extension
csv_fn = dbf_table_pth[:-4]+ ".csv" #Set the csv file name
table = DBF(dbf_table_pth)# table variable is a DBF object
with open(csv_fn, 'w',encoding = 'utf-8', newline = '') as f:# create a csv file, fill it with dbf content
writer = csv.writer(f)
writer.writerow(table.field_names)# write the column name
for record in table:# write the rows
writer.writerow(list(record.values()))
return csv_fn# return the csv name
listOfFiles = []
def get_filenames():
Tk().withdraw()
print("Initializing Dialogue... \\nPlease select a file.")
tk_filenames = filedialog.askdirectory()
tempDir = tk_filenames
return tempDir
choosen_dir = get_filenames()
os.chdir(choosen_dir)
for file in glob.glob("*.dbf"):
listOfFiles.append(file)
for file in listOfFiles:
dbf_to_csv(file)
Here I paste Trackback:
Traceback (most recent call last):
File "C:\Pliki po Dawidzie\Converter\main.py", line 35, in <module>
dbf_to_csv(file)
File "C:\Pliki po Dawidzie\Converter\main.py", line 15, in dbf_to_csv
for record in table:# write the rows
File "C:\Users\maciej.olech\PycharmProjects\pythonProject\venv\lib\site-packages\dbfread\dbf.py", line 314, in _iter_records
items = [(field.name,
File "C:\Users\maciej.olech\PycharmProjects\pythonProject\venv\lib\site-packages\dbfread\dbf.py", line 315, in <listcomp>
parse(field, read(field.length))) \
File "C:\Users\maciej.olech\PycharmProjects\pythonProject\venv\lib\site-packages\dbfread\field_parser.py", line 79, in parse
return func(field, data)
File "C:\Users\maciej.olech\PycharmProjects\pythonProject\venv\lib\site-packages\dbfread\field_parser.py", line 87, in parseC
return self.decode_text(data.rstrip(b'\0 '))
File "C:\Users\maciej.olech\PycharmProjects\pythonProject\venv\lib\site-packages\dbfread\field_parser.py", line 45, in decode_text
return decode_text(text, self.encoding, errors=self.char_decode_errors)
UnicodeDecodeError: 'ascii' codec can't decode byte 0x88 in position 14: ordinal not in range(128)
Thanks for any help help.

Python Unicode decode error- Not able to run script even after suggested correction

I am using python 2.7.9 to create excel sheet using tab delimited text files; however I am getting problem while running this python script
#!/usr/bin/env python
# encoding=utf8
import xlwt
import os
import sys
reload(sys)
sys.setdefaultencoding('utf8')
wb = xlwt.Workbook()
path = "/home/Final_analysis/"
#print(os.listdir())
lis = os.listdir(path)
sheetnumber = 1
for x in lis:
if os.path.isfile(x)==True:
extension = os.path.splitext(x)
print(extension[1])
if extension[1] == '.txt':
#print("Yes")
ws = wb.add_sheet(extension[0])
row = 0
column = 0
a = open(x)
while True:
a1 = a.readline()
if len(a1)==0:
break
data = a1.split("\t")
for z in data:
ws.write(row,column,z)
column += 1
column = 0
row += 1
sheetnumber+=1
else:
pass
wb.save("Ronic.xls")
I am getting following error
Traceback (most recent call last):
File "home/Final_analysis/combine_excel_v2.py", line 39, in <module>
wb.save("Ronic.xls")
File "/usr/local/lib/python2.7/site-packages/xlwt/Workbook.py", line 710, in save
doc.save(filename_or_stream, self.get_biff_data())
File "/usr/local/lib/python2.7/site-packages/xlwt/Workbook.py", line 674, in get_biff_data
shared_str_table = self.__sst_rec()
File "/usr/local/lib/python2.7/site-packages/xlwt/Workbook.py", line 636, in __sst_rec
return self.__sst.get_biff_record()
File "/usr/local/lib/python2.7/site-packages/xlwt/BIFFRecords.py", line 77, in get_biff_record
self._add_to_sst(s)
File "/usr/local/lib/python2.7/site-packages/xlwt/BIFFRecords.py", line 92, in _add_to_sst
u_str = upack2(s, self.encoding)
File "/usr/local/lib/python2.7/site-packages/xlwt/UnicodeUtils.py", line 50, in upack2
us = unicode(s, encoding)
UnicodeDecodeError: 'ascii' codec can't decode byte 0xc3 in position 83: ordinal not in range(128)
I have used answer given in thread How to fix: "UnicodeDecodeError: 'ascii' codec can't decode byte"
But it didn't work.
problem is at wb.save() command
Setting the encoding at the top of your program is to handle non-ascii characters in your code, not your data. sys.setdefaultencoding('utf8') is not intended to be used in ordinary programs and does more harm than good.
To fix the problem, tell xlwt about the encoding to use.
Change this line:
wb = xlwt.Workbook()
to this:
wb = xlwt.Workbook(encoding="UTF-8")

Unicode error ascii can't encode character

I am trying to import a csv file in order to train my classifier but I keep receiving this error
traceback (most recent call last):
File "updateClassif.py", line 17, in <module>
myClassif = NaiveBayesClassifier(fp, format="csv")
File "C:\Python27\lib\site-packages\textblob\classifiers.py", line 191, in __init__
super(NLTKClassifier, self).__init__(train_set, feature_extractor, format, **kwargs)
File "C:\Python27\lib\site-packages\textblob\classifiers.py", line 123, in __init__
self.train_set = self._read_data(train_set, format)
File "C:\Python27\lib\site-packages\textblob\classifiers.py", line 143, in _read_data
return format_class(dataset, **self.format_kwargs).to_iterable()
File "C:\Python27\lib\site-packages\textblob\formats.py", line 68, in __init__
self.data = [row for row in reader]
File "C:\Python27\lib\site-packages\textblob\unicodecsv\__init__.py", line 106, in next
row = self.reader.next()
UnicodeEncodeError: 'ascii' codec can't encode character u'\xe6' in position 55: ordinal not in range(128)
The CSV file contains 1600000 lines of tweets so I believe some tweets contain special characters. I have tried saving it using open office as someone recommended but still the same result. I also tried using latin encoding but the same result.
This is my code :
with codecs.open('tr.csv', 'r' ,encoding='latin-1') as fp:
myClassif = NaiveBayesClassifier(fp, format="csv")
This is the code from the library I am using:
def __init__(self, csvfile, fieldnames=None, restkey=None, restval=None,
dialect='excel', encoding='utf-8', errors='strict', *args,
**kwds):
if fieldnames is not None:
fieldnames = _stringify_list(fieldnames, encoding)
csv.DictReader.__init__(self, csvfile, fieldnames, restkey, restval, dialect, *args, **kwds)
self.reader = UnicodeReader(csvfile, dialect, encoding=encoding,
errors=errors, *args, **kwds)
if fieldnames is None and not hasattr(csv.DictReader, 'fieldnames'):
# Python 2.5 fieldnames workaround. (http://bugs.python.org/issue3436)
reader = UnicodeReader(csvfile, dialect, encoding=encoding, *args, **kwds)
self.fieldnames = _stringify_list(reader.next(), reader.encoding)
self.unicode_fieldnames = [_unicodify(f, encoding) for f in
self.fieldnames]
self.unicode_restkey = _unicodify(restkey, encoding)
def next(self):
row = csv.DictReader.next(self)
result = dict((uni_key, row[str_key]) for (str_key, uni_key) in
izip(self.fieldnames, self.unicode_fieldnames))
rest = row.get(self.restkey)
In Python2, the csv module does not support unicode. So you must pass in some kind of iterator object (such as a file) which only produces byte-strings.
This means that your code should look like this:
with open('tr.csv', 'rb') as fp:
myClassif = NaiveBayesClassifier(fp, format="csv")
But note that the csv file must be encoded as UTF-8. If it's not, you will obviously need to convert it to UTF-8 first, in order for the code above to work.
Note that the traceback says EncodeError, not DecodeError. It looks like the NaiveBayesClassifier is expecting ascii. Either make it accept Unicode, or, if this is OK for your application, replace non-ascii characters with '?' or something.

Loading a file as a JSON in python?

I'm quite new to python and im trying to get a JSON file to be created and then loaded and then organised. But I keep getting a weird error.
This is my code to write the file:
def save(): # save to file
with open(class_name, 'a') as f:
data = [name, score]
json.dump(data, f)
This is my code to load the file:
with open(class_name, 'r') as f:
data2 = json.load(f)
This is my code to organise the file:
with open(class_name, 'r') as f:
data2 = json.load(f)
Alpha = sorted(data, key=str.lower)
print(Alpha)
And this is my error:
Traceback (most recent call last):
File "<pyshell#2>", line 1, in <module>
viewscores()
File "C:\Users\Danny\Desktop\Task123.py", line 60, in viewscores
data2 = json.load(f)
File "C:\Python34\lib\json\__init__.py", line 268, in load
parse_constant=parse_constant, object_pairs_hook=object_pairs_hook, **kw)
File "C:\Python34\lib\json\__init__.py", line 318, in loads
return _default_decoder.decode(s)
File "C:\Python34\lib\json\decoder.py", line 346, in decode
raise ValueError(errmsg("Extra data", s, end, len(s)))
ValueError: Extra data: line 1 column 13 - line 1 column 153 (char 12 - 152)
You are appending data to your file, creating multiple JSON documents in that file. The json.load() command on the other hand can only load one JSON document from a file, and is now complaining you have more data after the JSON document.
Either adjust your code to load those documents separately (insert newlines after each entry you append, and load the JSON documents line by line, or replace everything in the file with a new JSON object.
Using newlines as separators, then loading all the entries separately would look like:
def save(): # save to file
with open(class_name, 'a') as f:
data = [name, score]
json.dump(data, f)
f.write('\n')
and loading would be:
with open(class_name, 'r') as f:
scores = []
for line in f:
entry = json.loads(line)
scores.append(entry)
after which you could sort those entries if you so wish.

Categories