I have a little program that just needs to read one (and only one) row from a csv file and write the column values to a series of files. The program has three system arguments: the path to the data file, the job id (uuid), and the target row number, i.e. the row in the csv that I want to parse. It's not working, how can I fix it?
import csv
import sys
import itertools
f = sys.argv[1]
uuid = sys.argv[2]
target_row = sys.argv[3]
tmpdir="/tmp/pagekicker/"
folder = tmpdir+uuid
destination1 = folder + '/csv/row.editedby'
destination3 = folder + '/csv/row.booktitle'
destination4 = folder + '/csv/row.seeds'
destination5 = folder + '/csv/row.imprint'
f = open(f, 'rb')
f1 = open(destination1, 'w')
f3 = open(destination3, 'w')
f4 = open(destination4, 'w')
f5 = open(destination5, 'w')
target_row = int(target_row)
try:
reader = csv.reader(f) # creates the reader object
for row in itertools.islice(reader,1,1): # iterates the rows of the file in orders
editedby = row[0] # we throw away column 2
booktitle = row[2]
print row[2]
seeds = row[3]
imprint = row[4]
f1.write(editedby)
f3.write(booktitle)
f4.write(seeds)
f5.write(imprint)
f.close()
f1.close()
f3.close()
f4.close()
f5.close()
finally:
print 'done'
UPDATE: thanks Graham Bell for his suggested code. There are two "f5s" in the first line of his 'with' statement My code now looks like this:
i
mport csv
import sys
import itertools
f = sys.argv[1]
uuid = sys.argv[2]
target_row = sys.argv[3]
tmpdir="/tmp/pagekicker/"
folder = tmpdir+uuid
# os.mkdir(folder)
destination3 = folder + '/csv/row.booktitle'
destination1 = folder + '/csv/row.editedby'
destination4 = folder + '/csv/row.seeds'
destination5 = folder + '/csv/row.imprint'
with open(f, 'rb') as f, open(destination1, 'w') as f1, open(destination3, 'w') as f3, open(destination4, 'w') as f4, open(destination5, 'w') as f5:
target_row = int(target_row)
try:
reader = csv.reader(f) # creates the reader object
for row in itertools.islice(reader,1,1): # iterates the rows of the file in orders
editedby = row[0] # we throw away column 2
booktitle = row[2]
print row[2]
seeds = row[3]
imprint = row[4]
f1.write(editedby)
f3.write(booktitle)
f4.write(seeds)
f5.write(imprint)
except
print 'done'
Without the except, it generates "unexpected unindent" when I run it. With the except, it says that the except line is invalid syntax.
the csv library DictReader() object has the ability to display the current line number with:
reader = csv.DictReader(csv_file)
reader.line_num
you could iterate through and do nothing until you reach the correct line number that you need, something like this:
for row in reader:
if reader.line_num == row_you_want
do something
the DictReader class also allows you to have the first row in your CSV file to be title columns, and then you can access them like so:
reader["title_of_column1"]
which might save you some work as well, also you should use the python with block when working with files like so:
with open(f, 'rb') as f, open(destination1, 'w') as f1, open(destination3, 'w') as f3, open(destination4, 'w') as f5, open(destination5, 'w') as f5:
target_row = int(target_row)
try:
reader = csv.reader(f) # creates the reader object
for row in itertools.islice(reader,1,1): # iterates the rows of the file in orders
editedby = row[0] # we throw away column 2
booktitle = row[2]
print row[2]
seeds = row[3]
imprint = row[4]
f1.write(editedby)
f3.write(booktitle)
f4.write(seeds)
f5.write(imprint)
This way you don't have to worry about closing them all
Assuming you count rows from 1 (rather than 0), here's a standalone function that will do it:
import csv
from contextlib import contextmanager
import sys
import itertools
#contextmanager
def multi_file_manager(files, mode='r'):
""" Context manager for multiple files. """
files = [open(file, mode) for file in files]
yield files
for file in files:
file.close()
# This is the standalone function
def csv_read_row(filename, n):
""" Read and return nth row of a csv file, counting from 1. """
with open(filename, 'rb') as f:
reader = csv.reader(f)
return next(itertools.islice(reader, n-1, n))
if len(sys.argv) != 4:
print('usage: utility <csv filename> <uuid> <target row>')
sys.exit(1)
tmpdir = "/tmp/pagekicker"
f = sys.argv[1]
uuid = sys.argv[2]
target_row = int(sys.argv[3])
folder = os.path.join(tmpdir, uuid)
destinations = [folder+dest for dest in ('/csv/row.editedby',
'/csv/row.booktitle',
'/csv/row.seeds',
'/csv/row.imprint')]
with multi_file_manager(destinations, mode='w') as files:
row = csv_read_row(f, target_row)
#editedby, booktitle, seeds, imprint = row[0], row[2], row[3], row[4]
for i,j in zip(range(4), (0, 2, 3, 4)):
files[i].write(row[j]+'\n')
Related
heres my little program. at the end i want to write the names and passwords
into csv file like this:
Jack,9978
Sara,1647
but i cant!? my program output is correct but when i write it into csv it goes like:
Jack9978,Sara1674
how will you fix it?
import hashlib
import csv
answer = []
usr_pas = []
with open('...', 'r') as f:
reader = csv.reader(f)
for word in reader:
usr_pas.append(word)
for i in range(999, 10000):
num = str(i)
m = hashlib.sha256()
m.update(num.encode('utf-8'))
hsh = m.hexdigest()
hash_dict = {hsh: num}
for key in list(hash_dict.items()):
for value in usr_pas:
if key[0] == value[1]:
answer.append(value[0] +','+ key[1])
file = open("...", 'w', newline='')
with file:
writer = csv.writer(file)
writer.writerow(i.strip().replace(',', '') for i in answer)
file.close()
what did i wrong!?
Try this (lines with comments are changed):
import hashlib
import csv
answer = []
usr_pas = []
with open('...', 'r') as f:
reader = csv.reader(f)
for word in reader:
usr_pas.append(word)
for i in range(999, 10000):
num = str(i)
m = hashlib.sha256()
m.update(num.encode('utf-8'))
hsh = m.hexdigest()
hash_dict = {hsh: num}
for key in list(hash_dict.items()):
for value in usr_pas:
if key[0] == value[1]:
answer.append(value[0] +','+ key[1] + '\n') #added '\n' at the end
file = open("...", 'w', newline='')
with file:
writer = csv.writer(file)
writer.writerow(i for i in answer) #removed i.replace
file.close()
I guess you want a csv file with multiple lines instead of one. If so, my suggestion is to use csv.csvwriter.writerows instead of csv.csvwriter.writerow. The latter is designed to write a single row. See the official document here. Indeed multiple lines might be created with \n manipulator, it means a single line with multiple elements that contains "new line", which seems awkward.
Since we can use the default delimiter (comma), we just need to manage each element in the line as a tuple (or a list). Answers should be added into list answer like this:
answer.append((value[0], key[1]))
while we write rows in this way:
writer.writerows(answer)
Let's put them together:
import hashlib
import csv
answer = []
usr_pas = []
with open('...', 'r') as f:
reader = csv.reader(f)
for word in reader:
usr_pas.append(word)
for i in range(999, 10000):
num = str(i)
m = hashlib.sha256()
m.update(num.encode('utf-8'))
hsh = m.hexdigest()
hash_dict = {hsh: num}
for key in list(hash_dict.items()):
for value in usr_pas:
if key[0] == value[1]:
# answer.append(value[0] +','+ key[1])
answer.append((value[0], key[1]))
file = open("...", 'w', newline='')
with file:
writer = csv.writer(file)
# writer.writerow(i.strip().replace(',', '') for i in answer)
writer.writerows(answer)
file.close()
I have the following input file with a header row:
test_in.csv
LON,LAT,NUMBER,STREET,UNIT,CITY,DISTRICT,REGION,POSTCODE,ID,HASH
-72.5708234,41.4155142,39,HICKORY LA,,,,,,,8a0df668e0d49b02
-72.5647745,41.4160301,1213,KILLINGWORTH RD,,,,,,,b3ecaab86e476f46
I need to replace any of the columns with a specified string
for example CITY column's data should be replaced from "" to "MyCity"
My code only outputs the header and first row
python test_forcefld.py test_in.csv MyCity CITY out_test.csv
import csv
import sys
in_file_name = sys.argv[1]
force_data = sys.argv[2]
force_fld = sys.argv[3]
out_file_name = sys.argv[4]
# First read top row/header from input file
fieldnames = []
for filename in [in_file_name]:
with open(filename, "rb") as f_in:
reader = csv.reader(f_in)
headers = next(reader)
for h in headers:
fieldnames.append(h)
#print headers to output file
with open(out_file_name, 'w') as fou:
dw = csv.DictWriter(fou, delimiter=',', fieldnames=fieldnames)
dw.writeheader()
f_in2 = open(in_file_name, "rb")
reader2 = csv.DictReader(f_in2) # Uses the field names in this file
datarow = next(reader2)
datarow[force_fld] = force_data
with open(out_file_name, 'wa') as fou:
dw2 = csv.DictWriter(fou, delimiter=',', fieldnames=fieldnames)
dw2.writeheader()
dw2.writerow(data row)
Output shows
LON,LAT,NUMBER,STREET,UNIT,CITY,DISTRICT,REGION,POSTCODE,ID,HASH
-72.5708234,41.4155142,39,HICKORY LA,,MyCity,,,,,8a0df668e0d49b02
Your code is a little difficult to read, but assuming datarow is a dictionary containing your records:
In your last row, change
dw2.writerow(datarow)
Into
dw2.writerows(datarow)
While you're at it, you should also consider using datarow.keys() for your fieldnames, for conciseness.
This should do it, you just need pandas:
import pandas as pd
df = pd.read_csv(in_file_name, sep=',')
df['CITY'].fillna('MyCity', inplace=True)
And to save it:
df.to_csv(out_file_name)
You can try somthing like this in order to have your desired file:
I'm assuming your input file is called f_input.txt and your output file is called f_output.txt:
data = list(k.rstrip().split(',') for k in open("f_input.txt", 'r'))
with open("f_output.txt", 'a+') as f:
f.write(",".join(data[0]) + '\n')
for k in data[1:]:
# Modify the positions of k[:n] + your data + k[n+1]
# if you need to handle another position
f.write(",".join(k[:6]) + "MyCity" + ",".join(k[7:]) + "\n")
This worked in the end:
import csv
import sys
in_file_name = sys.argv[1]
force_data = sys.argv[2]
force_fld = sys.argv[3]
out_file_name = sys.argv[4]
# First read top row/header from input file
fieldnames = []
for filename in [in_file_name]:
with open(filename, "rb") as f_in:
reader = csv.reader(f_in)
headers = next(reader)
for h in headers:
fieldnames.append(h)
f_in2 = open(in_file_name, "r")
#print headers to output file
fou = open(out_file_name, 'wa')
dw = csv.DictWriter(fou, delimiter=',', fieldnames=fieldnames)
dw.writeheader()
reader2 = csv.DictReader(f_in2) # Uses the field names in this file
for row in reader2:
row[force_fld] = force_data
dw2 = csv.DictWriter(fou, delimiter=',', fieldnames=fieldnames)
dw2.writerow(row)
When using this py code to split a large csv into smaller csv's (about ) I am receiving the error:
"OSError: [Error 24] Too Many Open Files:"
After running this there should be 29,930 separate files, however its stopping after 2048.
I have done some research and it looks like there is a per process limit of 2048. How can I get around this?
#!/usr/bin/env python3
import binascii
import csv
import os.path
import sys
from tkinter.filedialog import askopenfilename, askdirectory
from tkinter.simpledialog import askinteger
def split_csv_file(f, dst_dir, keyfunc):
csv_reader = csv.reader(f)
header = next(csv_reader)
csv_writers = {}
for row in csv_reader:
k = keyfunc(row)
if k not in csv_writers:
writer = csv.writer(open(os.path.join(dst_dir, k),
mode='w', newline=''))
writer.writerow(header)
csv_writers[k] = writer
csv_writers[k].writerow(row[0:1])
def get_args_from_cli():
input_filename = sys.argv[1]
column = int(sys.argv[2])
dst_dir = sys.argv[3]
return (input_filename, column, dst_dir)
def get_args_from_gui():
input_filename = askopenfilename(
filetypes=(('CSV', '.csv'),),
title='Select CSV Input File')
column = askinteger('Choose Table Column', 'Table column')
dst_dir = askdirectory(title='Select Destination Directory')
return (input_filename, column, dst_dir)
if __name__ == '__main__':
if len(sys.argv) == 1:
input_filename, column, dst_dir = get_args_from_gui()
elif len(sys.argv) == 4:
input_filename, column, dst_dir = get_args_from_cli()
else:
raise Exception("Invalid number of arguments")
with open(input_filename, mode='r', newline='') as f:
split_csv_file(f, dst_dir, lambda r: r[column-1]+'.csv')
# if the column has funky values resulting in invalid filenames
# replace the line from above with:
# split_csv_file(f, dst_dir, lambda r: binascii.b2a_hex(r[column-1].encode('utf-8')).decode('utf-8')+'.csv')
You don't need to keep the csv writers in a dictionary. You can re-open the file to append to it:
Replace:
if k not in csv_writers:
csv_writers[k] = csv.writer(open(os.path.join(dst_dir, k),
mode='w', newline=''))
csv_writers[k].writerow(row)
With:
filename = os.path.join(dst_dir, k)
with open(filename, mode='a', newline='') as output:
csv.writer(output).writerow(row)
I would like to do the following
read a csv file, Add a new first column, then rename some of the columns
then load the records from csv file.
Ultimately, I would like the first column to be populated with the file
name.
I'm fairly new to Python and I've kind of worked out how to change the fieldnames however, loading the data is a problem as it's looking for the original fieldnames which no longer match.
Code snippet
import csv
import os
inputFileName = "manifest1.csv"
outputFileName = os.path.splitext(inputFileName)[0] + "_modified.csv"
with open(inputFileName, 'rb') as inFile, open(outputFileName, 'wb') as outfile:
r = csv.DictReader(inFile)
fieldnames = ['MapSvcName','ClientHostName', 'Databasetype', 'ID_A', 'KeepExistingData', 'KeepExistingMapCache', 'Name', 'OnPremisePath', 'Resourcestype']
w = csv.DictWriter(outfile,fieldnames)
w.writeheader()
*** Here is where I start to go wrong
# copy the rest
for node, row in enumerate(r,1):
w.writerow(dict(row))
Error
File "D:\Apps\Python27\ArcGIS10.3\lib\csv.py", line 148, in _dict_to_list
+ ", ".join([repr(x) for x in wrong_fields]))
ValueError: dict contains fields not in fieldnames: 'Databases [xsi:type]', 'Resources [xsi:type]', 'ID'
Would like to some assistance to not just learn but truly understand what I need to do.
Cheers and thanks
Peter
Update..
I think I've worked it out
import csv
import os
inputFileName = "manifest1.csv"
outputFileName = os.path.splitext(inputFileName)[0] + "_modified.csv"
with open(inputFileName, 'rb') as inFile, open(outputFileName, 'wb') as outfile:
r = csv.reader(inFile)
w = csv.writer(outfile)
header = next(r)
header.insert(0, 'MapSvcName')
#w.writerow(header)
next(r, None) # skip the first row from the reader, the old header
# write new header
w.writerow(['MapSvcName','ClientHostName', 'Databasetype', 'ID_A', 'KeepExistingData', 'KeepExistingMapCache', 'Name', 'OnPremisePath', 'Resourcestype'])
prevRow = next(r)
prevRow.insert(0, '0')
w.writerow(prevRow)
for row in r:
if prevRow[-1] == row[-1]:
val = '0'
else:
val = prevRow[-1]
row.insert(0,val)
prevRow = row
w.writerow(row)
I'm trying to iterate over a CSV file that has a 'master list' of names, and compare it to another CSV file that contains only the names of people who were present and made phone calls.
I'm trying to iterate over the master list and compare it to the names in the other CSV file, take the number of calls made by the person and write a new CSV file containing number of Calls if the name isn't found or if it's 0, I need that column to have 0 there.
I'm not sure if its something incredibly simple I'm overlooking, or if I am truly going about this incorrectly.
Edited for formatting.
import csv
import sys
masterlst = open('masterlist.csv')
comparelst = open(sys.argv[1])
masterrdr = csv.DictReader(masterlst, dialect='excel')
comparerdr = csv.DictReader(comparelst, dialect='excel')
headers = comparerdr.fieldnames
with open('callcounts.csv', 'w') as outfile:
wrtr = csv.DictWriter(outfile, fieldnames=headers, dialect='excel', quoting=csv.QUOTE_MINIMAL, delimiter=',', escapechar='\n')
wrtr.writerow(dict((fn,fn) for fn in headers))
for lines in masterrdr:
for row in comparerdr:
if lines['Names'] == row['Names']:
print(lines['Names'] + ' has ' + row['Calls'] + ' calls')
wrtr.writerow(row)
elif lines['Names'] != row['Names']:
row['Calls'] = ('%s' % 0)
wrtr.writerow(row)
print(row['Names'] + ' had 0 calls')
masterlst.close()
comparelst.close()
Here's how I'd do it, assuming the file sizes do not prove to be problematic:
import csv
import sys
with open(sys.argv[1]) as comparelst:
comparerdr = csv.DictReader(comparelst, dialect='excel')
headers = comparerdr.fieldnames
names_and_counts = {}
for line in comparerdr:
names_and_counts[line['Names']] = line['Calls']
# or, if you're sure you only want the ones with 0 calls, just use a set and only add the line['Names'] values that that line['Calls'] == '0'
with open('masterlist.csv') as masterlst:
masterrdr = csv.DictReader(masterlst, dialect='excel')
with open('callcounts.csv', 'w') as outfile:
wrtr = csv.DictWriter(outfile, fieldnames=headers, dialect='excel', quoting=csv.QUOTE_MINIMAL, delimiter=',', escapechar='\n')
wrtr.writerow(dict((fn,fn) for fn in headers))
# or if you're on 2.7, wrtr.writeheader()
for line in masterrdr:
if names_and_counts.get(line['Names']) == '0':
row = {'Names': line['Names'], 'Calls': '0'}
wrtr.writerow(row)
That writes just the rows with 0 calls, which is what your text description said - you could tweak it if you wanted to write something else for non-0 calls.
Thanks everyone for the help. I was able to nest another with statement inside of my outer loop, and add a variable to test whether or not the name from the master list was found in the compare list. This is my final working code.
import csv
import sys
masterlst = open('masterlist.csv')
comparelst = open(sys.argv[1])
masterrdr = csv.DictReader(masterlst, dialect='excel')
comparerdr = csv.DictReader(comparelst, dialect='excel')
headers = comparerdr.fieldnames
with open('callcounts.csv', 'w') as outfile:
wrtr = csv.DictWriter(outfile, fieldnames=headers, dialect='excel', quoting=csv.QUOTE_MINIMAL, delimiter=',', escapechar='\n')
wrtr.writerow(dict((fn,fn) for fn in headers))
for line in masterrdr:
found = False
with open(sys.argv[1]) as loopfile:
looprdr = csv.DictReader(loopfile, dialect='excel')
for row in looprdr:
if row['Names'] == line['Names']:
line['Calls'] = row['Calls']
wrtr.writerow(line)
found = True
break
if found == False:
line['Calls'] = '0'
wrtr.writerow(line)
masterlst.close()
comparelst.close()