That is my code
def report_one_friend(self):
filename = QFileDialog.getSaveFileName(self, "", "cars.xlsx","Excel(.xlsx)")
if filename:
openFile = open(filename, 'r').read()
self.plainTextEdit.appendPlainText(openFile)
wb = xlsxwriter.Workbook(filename[0])
sheet1 = wb.add_worksheet()
sql = '''SELECT * FROM ahmed WHERE mth_search = %s'''
mth_search = self.lineEdit_3.text()
c = self.conn.cursor()
c.execute(sql, [(mth_search)])
data = c.fetchall()
for row in data:
print(row)
sheet1.write(0,2,'الاسم')
sheet1.write(0,0,row[1])
sheet1.write(1, 2, 'الرقم')
sheet1.write(1, 0, row[2])
wb.close()
and that gives me an error :
Connected to MySQL database using C extension... MySQL Server version on 8.0.12
Traceback (most recent call last):
File "/Users/mahmoudtarek/Desktop/mth1/index.py", line 174, in mth_friends
self.report_one_friend()
File "/Users/mahmoudtarek/Desktop/mth1/index.py", line 208, in report_one_friend
openFile = open(filename, 'r').read()
TypeError: expected str, bytes or os.PathLike object, not tuple
you get a tuple of "the selected file" and "the applied filter".
minimal code example:
from PyQt5.Qt import *
app = QApplication([])
print(QFileDialog.getSaveFileName())
selected "some file":
('C:/scratches/scratch.py', 'All Files (*)')
But when you dont select something and cancel the dialog, then the two strings are empty: ('', '').
Because a tuple with two strings in it is true, you try to open() a file with this, which leads to you error.
Solution:
unpack the tuple into two variables or use index[0] like in the following:
filename, filter = QFileDialog.getSaveFileName(self, "", "cars.xlsx","Excel(.xlsx)")
if filename:
openFile = open(filename, 'r').read()
...
Related
I saved an image as a BLOB in a sqlite3 database column profile - I summon the function insertBLOB with relevant info:
sqliteConnection = sqlite3.connect('image_try.db')
cursor = sqliteConnection.cursor()
cursor.execute("""CREATE TABLE IF NOT EXISTS images (
id INTEGER PRIMARY KEY,
fullname TEXT,
username TEXT,
profile BLOB)""")
def convertToBinaryData(filename):
with open(filename, 'rb') as file:
blobData = file.read()
return blobData
def insertBLOB(name, username, photo):
sqliteConnection = sqlite3.connect('image_try.db')
sqliteConnection.text_factory = str
cursor = sqliteConnection.cursor()
sqlite_insert_blob_query = """ INSERT INTO images
(fullname, username, profile) VALUES (?, ?, ?)"""
empPhoto = convertToBinaryData(photo)
data_tuple = (name, username, empPhoto)
cursor.execute(sqlite_insert_blob_query, data_tuple)
sqliteConnection.commit()
I tried to access the image file (so I could display it in a Label) like this - by summoning the function readBlobData:
def writeTofile(data):
# Convert binary data to proper format and write it on Hard Disk
this = open(data, 'rb')
this.open(io.BytesIO(base64.b64decode(data)))
return this
def readBlobData(empId):
try:
sqliteConnection = sqlite3.connect('image_try.db')
sqliteConnection.text_factory = str
cursor = sqliteConnection.cursor()
sql_fetch_blob_query = """SELECT * from images where id = ?"""
cursor.execute(sql_fetch_blob_query, (empId,))
record = cursor.fetchall()
profile = record[0][3] #Blob object
profile = writeTofile(profile)
image = ImageTk.PhotoImage(profile)
image_label = Label(root, image=image)
image_label.photo = image
image_label.pack()
cursor.close()
when I summon the function readBlobData I get this Error:
Traceback (most recent call last):
File "C:/Users/hilab/PycharmProjects/dafyProject/addimage.py", line 90, in
<module>
readBlobData(1)
File "C:/Users/hilab/PycharmProjects/dafyProject/addimage.py", line 67, in
readBlobData
profile = writeTofile(profile)
File "C:/Users/hilab/PycharmProjects/dafyProject/addimage.py", line 51, in
writeTofile
this = open(data, 'rb')
TypeError: file() argument 1 must be encoded string without NULL bytes, not str
Do you have any idea what seems to be the problem? and how can I fix it? How can I access the BLOB object from the SQLite database and present it???
The traceback is telling us that something is going wrong in the writeToFile function, specifically when we try to open a file:
profile = writeTofile(profile)
File "C:/Users/hilab/PycharmProjects/dafyProject/addimage.py", line 51, in
writeTofile
this = open(data, 'rb')
TypeError: file() argument 1 must be encoded string without NULL bytes, not str
The value that we are passing to the function is the binary image data read from the database
profile = record[0][3]
In the function, we are trying to use this binary data as the name of the file that we are going to read from to get the binary data in some format.
def writeTofile(data):
# Convert binary data to proper format and write it on Hard Disk
this = open(data, 'rb')
this.open(io.BytesIO(base64.b64decode(data)))
return this
tkinter.PhotoImage expects the path to a file, according to its documentation, so we have to create a file from the image bytes.:
def writeTofile(data):
# Write it to the Hard Disk
# (ideally with a suitable name and extension)
filename = 'myfile.img'
with open('myfile.img', 'wb') as f:
f.write(data)
return filename
And in readBlobData:
image = ImageTk.PhotoImage(file=profile)
And then all should be well.
I'm trying to import 5'000 .txt files into a postgresql database. My script is running fine as long as it doesn't reach a line which doesn't fit the format. For example every file has a new line at the end which also causes the script to crash.
I've tried to handle exceptions but to no success...
My script:
import csv
import os
import sys
import psycopg2
conn = psycopg2.connect(
host="localhost",
database="demo",
user="demo",
password="123",
port="5432"
)
cur = conn.cursor()
maxInt = sys.maxsize
while True:
try:
csv.field_size_limit(maxInt)
break
except OverflowError:
maxInt = int(maxInt / 10)
def searchFiles(directory='', extension=''):
print('SEARCHING IN: ', directory)
filelist = []
extension = extension.lower()
for dirpath, dirnames, files in os.walk(directory):
for name in files:
if extension and name.lower().endswith(extension):
filelist.append(os.path.join(dirpath, name))
elif not extension:
print('FAILED TO READ: ', (os.path.join(dirpath, name)))
print('FINISHED FILE SEARCH AND FOUND ', str(len(filelist)), ' FILES')
return filelist
def importData(fileToImport):
with open(fileToImport, 'r') as f:
reader = csv.reader(f, delimiter=':')
for line in reader:
try:
cur.execute("""INSERT INTO demo VALUES (%s, %s)""", (line[0], line[1]))
conn.commit()
except:
pass
print('FAILED AT LINE: ', line)
print(conn.get_dsn_parameters())
cur.execute("SELECT version();")
record = cur.fetchone()
print("You are connected to - ", record)
fileList = searchFiles('output', '.txt')
counter = 0
length = len(fileList)
for file in fileList:
# if counter % 10 == 0:
print('Processing File: ', str(file), ', COMPLETED: ', str(counter), '/', str(length))
importData(str(file))
counter += 1
print('FINISHED IMPORT OF ', str(length), ' FILES')
A few lines of the data I'm trying to import:
example1#example.com:123456
example2#example.com:password!1
The error I'm getting:
File "import.py", line 66, in <module>
importData(str(file))
File "import.py", line 45, in importData
for line in reader:
_csv.Error: line contains NULL byte
How should I handle lines which can not get imported?
Thanks for any help
Your traceback shows the source of the exception in for line in reader:
File "import.py", line 45, in importData
for line in reader:
_csv.Error: line contains NULL byte
and you do not handle exceptions at that point. As the exception suggests, it is raised by your csv reader instance. While you certainly could wrap your for loop in a try-except block, your loop will still end once the exception raises.
This exception may be caused by the file having a different encoding than your locale's, which is assumed by open() if no encoding is explicitly provided:
In text mode, if encoding is not specified the encoding used is
platform dependent: locale.getpreferredencoding(False) is called to
get the current locale encoding.
The accepted answer in this Q&A outlines a solution to deal with that, provided that you can identify the correct encoding to open the file with. The Q&A also shows some approaches on how to get rid of NULL bytes in the file, prior to handing it over to a reader.
You might also want to simply skip empty lines instead of firing them to your DB and handle the exception, e.g.
for line in reader:
if not line:
continue
try:
[...]
I am fetching values from a database table called blocked_sites. If the value of 0th attribute in the table blocked_sites is present the 19th or 26th field of the file items.csv, then that row of the csv is to be excluded from the csv file. I am writing a code for that and getting this error:
$ python csv_dupli_prev.py
Traceback (most recent call last):
File "csv_dupli_prev.py", line 48, in <module>
found = re.search(row[0], row1[19])
File "/home/debarati/anaconda3/lib/python3.6/re.py", line 182, in search
return _compile(pattern, flags).search(string)
File "/home/debarati/anaconda3/lib/python3.6/re.py", line 300, in _compile
raise TypeError("first argument must be string or compiled pattern")
TypeError: first argument must be string or compiled pattern
The code is as follows:
connection = pymysql.connect (host = "localhost", user = "root", passwd = "......", db = "city_details")
cursor = connection.cursor ()
csv_file = csv.reader(open("items.csv", "r"))
newrows = []
cursor.execute ("select * from blocked_sites")
data4 = cursor.fetchall ()
for row in data4:
for row1 in csv_file:
str1 = row1[19]
str2 = row1[26]
found = re.search(row[0], str1)
found1 = re.search(row[0], str2)
if found==None and found1==None and row1 not in newrows:
newrows.append(row1)
writer = csv.writer(open("items.csv", "w"))
writer.writerows(newrows)
I changed the following line in my code:
cursor.execute ("select * from blocked_sites")
to this:
cursor.execute ("select content from blocked_sites")
and the bug got fixed.
Hello all I keep getting this error while making a small program to sort large CSV files out, below is my code and error, what am I doing wrong?
if selection:
for stuff in stuffs:
try:
textFile = open("output.txt",'w')
mycsv = csv.reader(open(stuff))
d_reader = csv.DictReader(mycsv)
headers = d_reader.fieldnames <-- Error happens here
if selection in headers:
placeInList = headers.index(selection)
#placeInList = selection.index(selection)
for selection in tqdm(mycsv, desc='Extracting column values...', leave = True):
textFile.write(str(selection[int(placeInList)])+'\n')
print 'Done!'
textFile.close()
sys.exit()
except IOError:
print 'No CSV file present in directory'
sys.exit()
else:
sys.exit()
And the error:
Traceback (most recent call last):
File "postcodeExtractor.py", line 27, in <module> headers = d_reader.fieldnames
File "C:\Python27\lib\csv.py", line 90, in fieldnames self._fieldnames = self.reader.next()
TypeError: expected string or Unicode object, list found
instead of
mycsv = csv.reader(open(stuff))
d_reader = csv.DictReader(mycsv)
you want
d_reader = csv.DictReader(open(stuff))
the first line is the problem.
I'm creating a csv.reader object, setting it as an instance variable, but then when I try to iterate through it I get an error saying I'm trying to operate on a closed file. Is the reader still linked to the file somehow? I'm assigning it within my with open(blah) block, so I'm confused as to why this is happening.
Here is my code:
def __init__(self, infile, header_file, transact_file):
self.infile = infile
self.header_of = header_file
self.transact_of = transact_file
def create_reader(self):
"""Create a csv reader."""
with open(self.infile, 'r') as inf:
logging.info('Infile name: {0}'.format(inf))
self.csv_reader = reader(inf, quotechar='"')
def parse_headers(self):
"""Separate header files ("H", "S") from transaction files."""
headers = []
transactions = []
for row in self.csv_reader:
row_type = row[0]
logging.info('Row type is: {0}'.format(row_type))
if row_type == 'H':
logging.info('Row added to header list.')
headers.append(row)
elif row_type == 'S':
if row not in headers:
logging.info('Row added to header list.')
headers.append(row)
else:
logging.info('Row added to transaction list.')
transactions.append(row)
# Debugging and verification
logging.info('Header list contains: {0}'.format('\n'.join([str(header) for header
in headers])))
logging.info('Transaction list contains: {0}'.format(
'\n'.join([str(trans) for trans in transactions])))
Here is my error stack:
Traceback (most recent call last): x
File "./gen_pre.py", line 155, in <module> x
main() x
File "./gen_pre.py", line 25, in main x
parser.run_process() x
File "./gen_pre.py", line 140, in run_process x
self.parse_headers() x
File "./gen_pre.py", line 68, in parse_headers x
for row in self.csv_reader: x
ValueError: I/O operation on closed file
with automatically closes the file when you leave the block.
You have to do
self.inf = open(self.infile, 'r')
self.csv_reader = reader(self.inf, quotechar='"') # self.inf
and you will have to close the file manually.
def close_reader(self):
self.csv_reader.close()
self.inf.close()
Context managers are great because they automatically close files for you. Instead of manually opening and closing the file, you could read the whole file and pass a list of the rows to the CSV reader:
def create_reader(self):
"""Create a csv reader."""
with open(self.infile, 'r') as inf:
logging.info('Infile name: {0}'.format(inf))
file_data = inf.readlines()
self.csv_reader = reader(file_data, quotechar='"')
The csv.reader object will accept anything it can iterate over, so a list of each line in the file (from readlines) will work fine.