I'm creating a csv.reader object, setting it as an instance variable, but then when I try to iterate through it I get an error saying I'm trying to operate on a closed file. Is the reader still linked to the file somehow? I'm assigning it within my with open(blah) block, so I'm confused as to why this is happening.
Here is my code:
def __init__(self, infile, header_file, transact_file):
self.infile = infile
self.header_of = header_file
self.transact_of = transact_file
def create_reader(self):
"""Create a csv reader."""
with open(self.infile, 'r') as inf:
logging.info('Infile name: {0}'.format(inf))
self.csv_reader = reader(inf, quotechar='"')
def parse_headers(self):
"""Separate header files ("H", "S") from transaction files."""
headers = []
transactions = []
for row in self.csv_reader:
row_type = row[0]
logging.info('Row type is: {0}'.format(row_type))
if row_type == 'H':
logging.info('Row added to header list.')
headers.append(row)
elif row_type == 'S':
if row not in headers:
logging.info('Row added to header list.')
headers.append(row)
else:
logging.info('Row added to transaction list.')
transactions.append(row)
# Debugging and verification
logging.info('Header list contains: {0}'.format('\n'.join([str(header) for header
in headers])))
logging.info('Transaction list contains: {0}'.format(
'\n'.join([str(trans) for trans in transactions])))
Here is my error stack:
Traceback (most recent call last): x
File "./gen_pre.py", line 155, in <module> x
main() x
File "./gen_pre.py", line 25, in main x
parser.run_process() x
File "./gen_pre.py", line 140, in run_process x
self.parse_headers() x
File "./gen_pre.py", line 68, in parse_headers x
for row in self.csv_reader: x
ValueError: I/O operation on closed file
with automatically closes the file when you leave the block.
You have to do
self.inf = open(self.infile, 'r')
self.csv_reader = reader(self.inf, quotechar='"') # self.inf
and you will have to close the file manually.
def close_reader(self):
self.csv_reader.close()
self.inf.close()
Context managers are great because they automatically close files for you. Instead of manually opening and closing the file, you could read the whole file and pass a list of the rows to the CSV reader:
def create_reader(self):
"""Create a csv reader."""
with open(self.infile, 'r') as inf:
logging.info('Infile name: {0}'.format(inf))
file_data = inf.readlines()
self.csv_reader = reader(file_data, quotechar='"')
The csv.reader object will accept anything it can iterate over, so a list of each line in the file (from readlines) will work fine.
Related
I have most of the program done. The last part of this program needs to open the file in append mode> Add 2 names > close file. Then, it has to open file in read mode> print contents> close file.
The file path has been assigned to a variable.
I keep getting the below error. (code is below that)
I don't know what to do to fix this
Traceback (most recent call last):
File "C:\Users\gorri\Desktop\College Work\CPT180_ShellScripting\Assignments\Programs\workWithFiles2.py", line 34, in
cat_files = open(cat_files, 'a')
TypeError: expected str, bytes or os.PathLike object, not TextIOWrapper
from pathlib import Path
import os
import os.path
path_E = Path('E:/CPT180Stuff')
os.chdir(path_E)
cat_files = (path_E / 'pets' / 'cats' / 'catnames.txt')
#opens catnames file to append end and add names.
cat_files = open(cat_files, 'a')
cat_files.write('Princess\nFreya\n')
cat_files.close()
cat_files = open(cat_files, 'r')
cat_files = cat_files.read()
print(cat_files)
cat_files.close()
In your current code, you are first assigning cat_files to the file name, but then in this line:
cat_files = open(cat_files, 'r')
You are now assigning cat_files to a file handle, which is not a string. This is why the next statement fails: it is expecting the filename string, not the file handle. You should use a different variable name for the handle, e.g.:
#opens catnames file to append end and add names.
f = open(cat_files, 'a')
f.write('Princess\nFreya\n')
f.close()
f = open(cat_files, 'r')
f = f.read()
print(f)
f.close()
I am trying to run through an excel file line by line and create a new list and then append every cell value on that line to the list. I don't think my code is correct but I just want to know why it cannot find the file, this is the error message.
def createPersonList(fileName):
open(fileName)
i = 0.0
for line in fileName:
i += 1
Person = []
for cell in line:
Person.append(cell)
return Person
error message:
createPersonList(personData.csv) Traceback (most recent call last):
File "<ipython-input-36-207031458d64>", line 1, in <module>
createPersonList(personData.csv) NameError: name 'personData' is not defined
I don't understand very well what you want, and also i don't know your structure of file.
But that's something similar with what you want:
import csv
def createPersonList(fileName):
personList = []
with open(fileName, 'r') as csv_file:
csv_reader = csv.reader(csv_file, delimiter='\t')
next(csv_reader, None)
for row in csv_reader:
for column in row:
personList.append(column)
return personList
I have a large zone file that I want to move to another provider. My issue is the export is only one large concatenated zone file where as my new registrar only accepts single standard zone files.
For example allzone.txt contains:
somedomain.com
=========
Record data...
...
------------
anotherdomain.com
=========
Record data...
...
------------
evenmoredomain.com
=========
Record data...
...
------------
What I'd like to happen is that it takes the one file above and creates 3 files.
somedomain.txt
anotherdomain.com.txt
evenmoredomain.com.txt
Inside each of the files the delimiters of :
anydomain.com
=========
and
------------
are removed only leaving
"Record data"
Between.
So a file should be named domainA.com.txt and inside just the corresponding record data.
Not sure what the best way to do this. I can split on a delimiter but not sure how to take that content to write a new file where the name is what is before the delimiter (anydomain.com)
Thanks!
More or less
current_file = None
with open('allzone.txt') as f:
# read line by line
for line in f:
# open next file and close previous
if line.startswith('domain'):
# close previous file
if current_file:
current_file.close()
# open new file
current_file = open(line.strip() + '.txt', 'w')
# write to current file
if current_file:
if not (line.startswith('domain') or line.startswith('---') or line.startswith('===')):
current_file.write(line)
# close last file
if current_file:
current_file.close()
EDIT: new version for any domain
current_file = None
with open('allzone.txt') as f:
# read line by line
for line in f:
# open next file
if not current_file:
# open new file
current_file = open(line.strip() + '.txt', 'w')
# skip next line
next(f)
else:
# close previous file
if line.startswith('---') :
current_file.close()
current_file = None
# write records
#elif not line.startswith('==='): # use it if you don't use `next(f)`
else:
current_file.write(line)
# close last file
if current_file:
current_file.close()
Maybe something like this would work? It might still need some tweaking
def main():
with open('allzone.txt', 'r+') as f:
data = ''
first_line = ''
for line in f:
if first_line == '':
first_line = line
elif line == '------------\n':
new_file = open('%s.txt' % first_line.rstrip(), 'w+')
new_file.write(data)
new_file.close()
first_line = ''
data = ''
elif line == '=========\n' or line == '...\n' or line == '------------\n':
pass
else:
data += line
if __name__ == '__main__':
main()
I'm a QA tester who is new to python, trying to create a script to create multiple XML files from a CSV file containing various fields. I feel I am close to creating this program. Unfortunately,I have been getting the following error when adding code to advance to the next line in the CSV file(line = next(reader)).If I don't add the line to to advance, the program will run but multiple xml files will be created with information from only the first line of the CSV file. I can't figure out why or how to fix it.
Error Message:
Traceback (most recent call last):
File "C:\Users\xxxxxxxxx\Desktop\defxmlImportcsv.py", line 22, in <module>
line = next(reader)
ValueError: I/O operation on closed file.
Here is my code:
import xml.etree.ElementTree as etree
import csv
with open('datanames.csv') as csvfile:
reader = csv.reader(csvfile)
x=0
line = next(reader)
line = next(reader)
while x<2:
filename = "Output"+str(x)+".xml"
[firstName,lastName] = line
print(line)
tree = etree.parse('WB5655(BR-09).xml')
root = tree.getroot()
registration_id=tree.find('primaryApplicant/ssn')
registration_id.text = str(53)
first_name = tree.find('primaryApplicant/firstName')
first_name.text = (line[0])
last_name = tree.find('primaryApplicant/lastName')
last_name.text =(line[1])
line = next(reader)
tree.write(filename)
print(x)
x=x+1
Any help would be greatly appreciated. Thanks in advance.
csvfile is automatically closed when you exit your with block. Which means that reader, in turn, can no longer read from it, causing your line = next(reader) line to fail.
The easiest (and likely most correct) fix is to add indentation to your code so that your while loop is inside the with block.
You exited the with statement:
with open('datanames.csv') as csvfile:
reader = csv.reader(csvfile)
x=0
line = next(reader)
line = next(reader)
while x<2:
# ...
The moment the while line is reached the csvfile file object is closed, because, logically, that block is outside of the with statement (not matching the indentation).
The solution is to indent the whole while loop to be within the with block:
with open('datanames.csv') as csvfile:
reader = csv.reader(csvfile)
x=0
line = next(reader)
line = next(reader)
while x<2:
# ...
Rather than use while, use itertools.islice() to loop just twice:
from itertools import islice
tree = etree.parse('WB5655(BR-09).xml')
registration_id=tree.find('primaryApplicant/ssn')
registration_id.text = '53'
with open('datanames.csv') as csvfile:
reader = csv.reader(csvfile)
# skip two lines
next(islice(reader, 2, 2), None)
for x, row in enumerate(islice(reader, 2)):
filename = "Output{}.xml".format(x)
first_name = tree.find('primaryApplicant/firstName')
last_name = tree.find('primaryApplicant/lastName')
first_name.text, last_name.text = row
tree.write(filename)
I simplified your XML handling as well; you don't have to read the input XML tree twice, for example.
Ok so the file contains:
apple,bot,cheese,-999
tea,fire,water,1
water,mountain,care,-999
So I want to check if the lines in file 1 have a -999 at the end and if they do, remove that line, and transfer the one that does not into a new file. So far my function has:
def clean(filename,cleanfile,value,position):
filename.readline()
for line in filename:
if line[position] != value:
cleanfile.write(line)
Value is -999 and position is 3. I opened my files in my main and passed them to the function, the problem is that the new file is empty.
You can use the csv module to figure out the details of splitting and joining the comma-separated values.
import csv
def clean(filename,cleanfile,value,position):
with open(filename) as reader_fp, open(cleanfile, 'w') as writer_fp:
reader = csv.reader(reader_fp)
writer = csv.writer(writer_fp)
for row in reader:
if row[position] != value:
writer.writerow(row)
try this:
def clean(filename,cleanfile,value,position):
for lines in filename.readlines():
line = lines.strip().split(",")
if line[position] != value:
cleanfile.write(",".join(line) + "\n")
clean(open("readFrom.txt", "r"), open("writeTo.txt", "w"), "-999", 3)
if you know that the value is always at the end of each line, you can try :
def clean (file1, file2, value):
for line in file1 :
if line.strip().split(",")[-1] != value :
file2.write(line)
file1.close()
file2.close()
clean(open("readFrom.txt", "r"), open("writeTo.txt", "w"), "-999")