trying to delete symbol , from my csv file but I am getting quotes and also nothing deletes at all
import csv
import string
input_file = open('/Users/gfidarov/Desktop/crosscheckmmm/Sheet1', 'r')
output_file = open('/Users/gfidarov/Desktop/crosscheckmmm/Sheet01', 'w')
data = csv.reader(input_file)
writer = csv.writer(output_file, quoting=csv.QUOTE_NONE, dialect='excel')
specials = ','
for line in data:
line = [value.replace(specials, '') for value in line]
print(line)
writer.writerow(line)
input_file.close()
output_file.close()
When I try to do it I have this error_csv.Error: need to escape, but no escapechar set
Maybe you should try to consider each line as a string in order to replace/delete a character in it:
import csv
import string
input_file = open('/Users/gfidarov/Desktop/crosscheckmmm/Sheet1', 'r')
output_file = open('/Users/gfidarov/Desktop/crosscheckmmm/Sheet01', 'w')
data = csv.reader(input_file)
writer = csv.writer(output_file,quoting=csv.QUOTE_ALL)# dialect='excel')
specials = ','
for line in data:
line = str(line)
new_line = str.replace(line,specials,'')
writer.writerow(new_line.split(','))
input_file.close()
output_file.close()
For the problem with escapechar, you can temporarily set an escapechar, then unset it after all your processing:
writer = csv.writer(output_file,quoting=csv.QUOTE_ALL, escapechar='\\')# dialect='excel')
Related
I have a python program that converts csv files to '^' delimiter files. The output files also has new headers.
cc = input("Enter Code \t")
cn = input("Enter Item Number \t")
input_file = input("Enter input file \t")
act = input("Enter Action (Update- N/Delete -Y) \t")
def file_conversion(input_file, output_file_pattern, chunksize):
with open(input_file,"r+") as fin:
# ignore headers of input files
for i in range(1):
fin.__next__()
reader = csv.reader(fin, delimiter=',')
for i, chunk in enumerate(chunked(reader, chunksize)):
with open(output_file_pattern.format(i), 'w', newline='') as fout:
writer = csv.writer(fout,reader,delimiter='^')
writer.writerow(headers)
writer.writerows(chunk)
print("Successfully converted into", output_file_pattern)
The program works now I want to add act,cc and cn in every line of output delimiter files. The lines of output files should start like the line below
act^cc^cn^restofcsvdelimitierfiles.
This solved my problem
def add_column_in_csv(input_file, output_file, transform_row,transform_row1,transform_row2):
with open(input_file, 'r') as read_obj:
with open(output_file, 'w', newline='') as write_obj:
reader = csv.reader(read_obj,delimiter=',')
writer = csv.writer(write_obj)
for row in reader:
transform_row(row,reader.line_num)
transform_row1(row,reader.line_num)
transform_row2(row,reader.line_num)
writer.writerow(row)
add_column_in_csv(path0,path1, lambda row, line_num: row.insert(0, act),lambda row, line_num: row.insert(1,cc),lambda row, line_num: row.insert(2,cn))
I have a big csv file. After some items there is a newline character which is not supposed to be there. It is always after a specific item, let's say it's called 'foo'. I need to remove every newline character after foo. I figured out this is kind of what should happen:
for line in sys.stdin:
if line.split(",")[-1] == "foo":
line = line.rstrip()
How do I make sure I output the result back to the file?
You can't write line back to your original file but assuming you will use your script like python script.py < input_file.csv > output_file.csv you can simply print the lines you need:
import sys
for line in sys.stdin:
if line.split(",")[-1] == "foo":
line = line.rstrip()
# print() will append '\n' by default - we prevent it
print(line, end='')
I haven't tested this, but it should do what you need it to. This assumes there are no other items (other than foo) that has trailing white space that you don't want to strip. Otherwise, a simple conditional will fix that.
import csv
with open("/path/to/file", newline='') as f:
reader = csv.reader(f)
for row in reader:
for i, item in enumerate(row):
row[i] = item.rstrip()
with open("/path/to/file", 'w', newline='') as f:
writer = csv.writer(f)
writer.writerows(reader)
This answer just saves to a new csv file.
with open("test.csv", "r", newline="") as csvfile:
my_reader = csv.reader(csvfile, delimiter=',', quotechar='"')
with open("new.csv", "w", newline="") as csvfile2:
last_line = []
writer = csv.writer(csvfile2, delimiter=',', quotechar='"')
for line in my_reader:
if last_line != []:
writer.writerow(last_line + line)
last_line = []
elif line[-1] == "foo":
last_line = line
else:
writer.writerow(line)
if last_line != []: # when the last line also contain "foo"
writer.writerow(last_line)
Tested on a test.csv file:
this,"is,a ",book
this,is,foo
oh,my
this,foo
And gained a new.csv file:
this,"is,a ",book
this,is,foo,oh,my
this,foo
I am able to change the data to lowercase and remove all the punctuation but I have trouble saving the corrected data in CSV file.
import csv
import re
import os
input_file=raw_input("Name of the CSV file:")
output_file=raw_input("Output Name:")
reg_test=input_file
result = ''
with open(input_file,'r') as csvfile:
with open(output_file,'w') as csv_out_file:
filereader = csv.reader(csvfile)
filewriter =csv.writer(csv_out_file)
for row in filereader:
row = re.sub('[^A-Za-z0-9]+', '', str(row))
result += row + ','
lower = (result).lower()
csvfile.close()
csv_out_file.close()
You do not have to close the files, this is done automatically after the context of the with statement is over and you have to actually write something after you create the csv.writer, e.g. with writerow:
import csv
import re
input_file = 'in.csv'
output_file = 'out.csv'
with open(input_file, 'r') as csvfile, open(output_file, 'w') as csv_out_file:
filereader = csv.reader(csvfile)
filewriter = csv.writer(csv_out_file)
for row in filereader:
new_row = re.sub('[^A-Za-z0-9]+', '', str(row)) # manipulate the row
filewriter.writerow([new_row.lower()]) # write the new row to the out file
# the files are closed automatically after the context of the with statement is over
This saves the manipulated content of the first csv file to the second.
This is one file result.csv:
M11251TH1230
M11543TH4292
M11435TDS144
This is another file sample.csv:
M11435TDS144,STB#1,Router#1
M11543TH4292,STB#2,Router#1
M11509TD9937,STB#3,Router#1
M11543TH4258,STB#4,Router#1
Can I write a Python program to compare both the files and if line in result.csv matches with the first word in the line in sample.csv, then append 1 else append 0 at every line in sample.csv?
import pandas as pd
d1 = pd.read_csv("1.csv",names=["Type"])
d2 = pd.read_csv("2.csv",names=["Type","Col2","Col3"])
d2["Index"] = 0
for x in d1["Type"] :
d2["Index"][d2["Type"] == x] = 1
d2.to_csv("3.csv",header=False)
Considering "1.csv" and "2.csv" are your csv input files and "3.csv" is the result you needed
The solution using csv.reader and csv.writer (csv module):
import csv
newLines = []
# change the file path to the actual one
with open('./data/result.csv', newline='\n') as csvfile:
data = csv.reader(csvfile)
items = [''.join(line) for line in data]
with open('./data/sample.csv', newline='\n') as csvfile:
data = list(csv.reader(csvfile))
for line in data:
line.append(1 if line[0] in items else 0)
newLines.append(line)
with open('./data/sample.csv', 'w', newline='\n') as csvfile:
writer = csv.writer(csvfile)
writer.writerows(newLines)
The sample.csv contents:
M11435TDS144,STB#1,Router#1,1
M11543TH4292,STB#2,Router#1,1
M11509TD9937,STB#3,Router#1,0
M11543TH4258,STB#4,Router#1,0
With only one column, I wonder why you made it as a result.csv. If it is not going to have any more columns, a simple file read operation would suffice. Along with converting the data from result.csv to dictionary will help in quick run as well.
result_file = "result.csv"
sample_file = "sample.csv"
with open(result_file) as fp:
result_data = fp.read()
result_dict = dict.fromkeys(result_data.split("\n"))
"""
You can change the above logic, in case you have very few fields on csv like this:
result_data = fp.readlines()
result_dict = {}
for result in result_data:
key, other_field = result.split(",", 1)
result_dict[key] = other_field.strip()
"""
#Since sample.csv is a real csv, using csv reader and writer
with open(sample_file, "rb") as fp:
sample_data = csv.reader(fp)
output_data = []
for data in sample_data:
output_data.append("%s,%d" % (data, data[0] in result_dict))
with open(sample_file, "wb") as fp:
data_writer = csv.writer(fp)
data_writer.writerows(output_data)
The following snippet of code will work for you
import csv
with open('result.csv', 'rb') as f:
reader = csv.reader(f)
result_list = []
for row in reader:
result_list.extend(row)
with open('sample.csv', 'rb') as f:
reader = csv.reader(f)
sample_list = []
for row in reader:
if row[0] in result_list:
sample_list.append(row + [1])
else:
sample_list.append(row + [0]
with open('sample.csv', 'wb') as f:
writer = csv.writer(f)
writer.writerows(sample_list)
Some example data:
title1|title2|title3|title4|merge
test|data|here|and
test|data|343|AND
",3|data|343|and
My attempt at coding this:
import csv
import StringIO
storedoutput = StringIO.StringIO()
fields = ('title1', 'title2', 'title3', 'title4', 'merge')
with open('file.csv', 'rb') as input_csv:
reader = csv.DictReader(input_csv, fields, delimiter='|')
for counter, row in enumerate(reader):
counter += 1
#print row
if counter != 1:
for field in fields:
if field == "merge":
row['merge'] = ("%s%s%s" % (row["title1"], row["title3"], row["title4"]))
print row
storedoutput.writelines(','.join(map(str, row)) + '\n')
contents = storedoutput.getvalue()
storedoutput.close()
print "".join(contents)
with open('file.csv', 'rb') as input_csv:
input_csv = input_csv.read().strip()
output_csv = []
output_csv.append(contents.strip())
if "".join(output_csv) != input_csv:
with open('file.csv', 'wb') as new_csv:
new_csv.write("".join(output_csv))
Output should be
title1|title2|title3|title4|merge
test|data|here|and|testhereand
test|data|343|AND|test343AND
",3|data|343|and|",3343and
For your reference upon running this code the first print it prints the rows as I would hope then to appear in the output csv. However the second print prints the title row x times where x is the number of rows.
Any input or corrections or working code would be appreciated.
I think we can make this a lot simpler. Dealing with the rogue " was a bit of a nuisance, I admit, because you have to work hard to tell Python you don't want to worry about it.
import csv
with open('file.csv', 'rb') as input_csv, open("new_file.csv", "wb") as output_csv:
reader = csv.DictReader(input_csv, delimiter='|', quoting=csv.QUOTE_NONE)
writer = csv.DictWriter(output_csv, reader.fieldnames, delimiter="|",quoting=csv.QUOTE_NONE, quotechar=None)
merge_cols = "title1", "title3", "title4"
writer.writeheader()
for row in reader:
row["merge"] = ''.join(row[col] for col in merge_cols)
writer.writerow(row)
produces
$ cat new_file.csv
title1|title2|title3|title4|merge
test|data|here|and|testhereand
test|data|343|AND|test343AND
",3|data|343|and|",3343and
Note that even though you wanted the original file updated, I refused. Why? It's a bad idea, because then you can destroy your data while working on it.
How can I be so sure? Because that's exactly what I did when I first ran your code, and I know better. ;^)
That double quote in the last line is definitely messing up the csv.DictReader().
This works:
new_lines = []
with open('file.csv', 'rb') as f:
# skip the first line
new_lines.append(f.next().strip())
for line in f:
# the newline and split the fields
line = line.strip().split('|')
# exctract the field data you want
title1, title3, title4 = line[0], line[2], line[3]
# turn the field data into a string and append in to the rest
line.append(''.join([title1, title3, title4]))
# save the new line for later
new_lines.append('|'.join(line))
with open('file.csv', 'w') as f:
# make one long string and write it to the new file
f.write('\n'.join(new_lines))
import csv
import StringIO
stored_output = StringIO.StringIO()
with open('file.csv', 'rb') as input_csv:
reader = csv.DictReader(input_csv, delimiter='|', quoting=csv.QUOTE_NONE)
writer = csv.DictWriter(stored_output, reader.fieldnames, delimiter="|",quoting=csv.QUOTE_NONE, quotechar=None)
merge_cols = "title1", "title3", "title4"
writer.writeheader()
for row in reader:
row["merge"] = ''.join(row[col] for col in merge_cols)
writer.writerow(row)
contents = stored_output.getvalue()
stored_output.close()
print contents
with open('file.csv', 'rb') as input_csv:
input_csv = input_csv.read().strip()
if input_csv != contents.strip():
with open('file.csv', 'wb') as new_csv:
new_csv.write("".join(contents))