How to keep CSV headers on the chunk files after script split? - python

I need help on modifying this script to include headers on the output files chunks. The script uses some input to determined how many rows per file will the process split the file by. The output files does not contain headers from the original file. I'm am seeking advice on how to implement.
import csv
import os
import sys
os_path = os.path
csv_writer = csv.writer
sys_exit = sys.exit
if __name__ == '__main__':
try:
chunk_size = int(input('Input number of rows of one chunk file: '))
except ValueError:
print('Number of rows must be integer. Close.')
sys_exit()
file_path = input('Input path to .tsv file for splitting on chunks: ')
if (
not os_path.isfile(file_path) or
not file_path.endswith('.tsv')
):
print('You must input path to .tsv file for splitting.')
sys_exit()
file_name = os_path.splitext(file_path)[0]
with open(file_path, 'r', newline='', encoding='utf-8') as tsv_file:
chunk_file = None
writer = None
counter = 1
reader = csv.reader(tsv_file, delimiter='\t', quotechar='\'')
for index, chunk in enumerate(reader):
if index % chunk_size == 0:
if chunk_file is not None:
chunk_file.close()
chunk_name = '{0}_{1}.tsv'.format(file_name, counter)
chunk_file = open(chunk_name, 'w', newline='', encoding='utf-8')
counter += 1
writer = csv_writer(chunk_file, delimiter='\t', quotechar='\'')
print('File "{}" complete.'.format(chunk_name))
writer.writerow(chunk)

You can do it by reading the header row manually when opening the input file, and then writing it at the beginning of each output file — see the ADDED comments in the code below:
...
with open(file_path, 'r', newline='', encoding='utf-8') as tsv_file:
chunk_file = None
writer = None
counter = 1
reader = csv.reader(tsv_file, delimiter='\t', quotechar="'")
header = next(reader) # Read and save header row. (ADDED)
for index, chunk in enumerate(reader):
if index % chunk_size == 0:
if chunk_file is not None:
chunk_file.close()
chunk_name = '{0}_{1}.tsv'.format(file_name, counter)
chunk_file = open(chunk_name, 'w', newline='', encoding='utf-8')
writer = csv_writer(chunk_file, delimiter='\t', quotechar="'")
writer.writerow(header) # ADDED.
print('File "{}" complete.'.format(chunk_name))
counter += 1
writer.writerow(chunk)
Note that using single-quote characters for quoting means the output files are not adhering to the CSV standard: RFC 4180

Related

Data is adjacent to headers in csv

how can I put my first row of data in the csv under the header and not in the same row as header?
This is the results.
And down here is my coding.
import os
# ...
filename = 'C:/Desktop/GPS_Trial/Trial6/' + str(d1) + '_' + str(file_counter) +'.csv'
#check whether the file exist or not
rows_to_be_written = []
if not os.path.exists(filename):
rows_to_be_written.append(header1)
rows_to_be_written.append(header2)
rows_to_be_written.append(header3)
rows_to_be_written.append(gps)
rows_to_be_written.append(gps2)
rows_to_be_written.append(gps3)
#write the data into csv
with open(filename, 'a', newline='', encoding='UTF8') as f:
writer = csv.writer(f, delimiter=',')
writer.writerow(rows_to_be_written)
print(gps, gps2, gps3)
You write header with values in one row if it file not exists.
You should write it separately
rows_to_be_written = []
header = None
if not os.path.exists(filename):
header = [header1, header2, header3]
rows_to_be_written.append(gps)
rows_to_be_written.append(gps2)
rows_to_be_written.append(gps3)
# write the data into csv
with open(filename, 'a', newline='', encoding='UTF8') as f:
writer = csv.writer(f, delimiter=',')
if header:
writer.writerow(header)
writer.writerow(rows_to_be_written)
print(gps, gps2, gps3)
Also may be you tried write rows, but you write only one row with header in it. Then change code like this
rows_to_be_written = []
if not os.path.exists(filename):
rows_to_be_written.append([header1, header2, header3])
rows_to_be_written.append([gps, gps2, gps3])
# write the data into csv
with open(filename, 'a', newline='', encoding='UTF8') as f:
writer = csv.writer(f, delimiter=',')
for row in rows_to_be_written:
writer.writerow(row)
print(gps, gps2, gps3)
You need to add the headings separately, and only if they are not there already:
# check whether the file exist or not
if not os.path.exists(filename):
headings = [header1, header2, header3]
else:
headings = None
rows_to_be_written = [gps, gps2, gps3]
# write the data into csv
with open(filename, 'a', newline='', encoding='UTF8') as f:
writer = csv.writer(f)
# Write headings if exist
if headings != None:
writer.writerow(headings)
# Write rows
writer.writerow(rows_to_be_written)
print(gps, gps2, gps3)
I suggest you consider this approach
# Open file to see if there are headings
with open(filename, "r") as f:
try:
has_headings = csv.Sniffer().has_header(f.read(1024))
except csv.Error:
# The file seems to be empty
has_headings = False
# Open to write. In append mode ("a")
with open(filename, "a") as f:
writer = csv.writer(f)
if has_headings:
# Write the rows at the top
writer.writerow(headings_list)
# Use writerows if youe have a 2D list, else use a for loop of writer.writerow
writer.writerows(lists_of_rows)

How to add columns in the starting position of a csv file in python?

I have a python program that converts csv files to '^' delimiter files. The output files also has new headers.
cc = input("Enter Code \t")
cn = input("Enter Item Number \t")
input_file = input("Enter input file \t")
act = input("Enter Action (Update- N/Delete -Y) \t")
def file_conversion(input_file, output_file_pattern, chunksize):
with open(input_file,"r+") as fin:
# ignore headers of input files
for i in range(1):
fin.__next__()
reader = csv.reader(fin, delimiter=',')
for i, chunk in enumerate(chunked(reader, chunksize)):
with open(output_file_pattern.format(i), 'w', newline='') as fout:
writer = csv.writer(fout,reader,delimiter='^')
writer.writerow(headers)
writer.writerows(chunk)
print("Successfully converted into", output_file_pattern)
The program works now I want to add act,cc and cn in every line of output delimiter files. The lines of output files should start like the line below
act^cc^cn^restofcsvdelimitierfiles.
This solved my problem
def add_column_in_csv(input_file, output_file, transform_row,transform_row1,transform_row2):
with open(input_file, 'r') as read_obj:
with open(output_file, 'w', newline='') as write_obj:
reader = csv.reader(read_obj,delimiter=',')
writer = csv.writer(write_obj)
for row in reader:
transform_row(row,reader.line_num)
transform_row1(row,reader.line_num)
transform_row2(row,reader.line_num)
writer.writerow(row)
add_column_in_csv(path0,path1, lambda row, line_num: row.insert(0, act),lambda row, line_num: row.insert(1,cc),lambda row, line_num: row.insert(2,cn))

Split CSV files with headers in windows using python and remove text qualifiers from line start and end

I have a large csv file for which i need to split the file. I have
managed to split the file using the below python code:
import csv
divisor = 500000
outfileno = 1 outfile = None
with open('file_temp.txt', 'r') as infile:
for index, row in enumerate(csv.reader(infile)):
if index % divisor == 0:
if outfile is not None:
outfile.close()
outfilename = 'big-{}.csv'.format(outfileno)
outfile = open(outfilename, 'w')
outfileno += 1
writer = csv.writer(outfile)
writer.writerow(row)
The problem i'm facing is that the file header is not getting copied
to the rest of the files. Can you please let me know how can i modify
my code to add the headers in the different splitted files.
You just need to cache the header row and then write it out for each CSV file, something like:
import csv
divisor = 500000
outfileno = 1
outfile = None
try:
with open('file_temp.txt', 'r') as infile:
infile_iter = csv.reader(infile)
header = next(infile_iter)
for index, row in enumerate(infile_iter):
if index % divisor == 0:
if outfile is not None:
outfile.close()
outfilename = 'big-{}.csv'.format(outfileno)
outfile = open(outfilename, 'w')
outfileno += 1
writer = csv.writer(outfile)
writer.writerow(header)
writer.writerow(row)
finally:
# Don't forget to close the last file
if outfile is not None:
outfile.close()
Since you're only working with lines, you don't really need to use the CSV module, here's a version that works without it:
divisor = 500000
outfileno = 1
outfile = None
try:
with open('file_temp.txt', 'r') as infile:
header = next(infile)
for index, row in enumerate(infile):
if index % divisor == 0:
if outfile is not None:
outfile.close()
outfilename = 'big-{}.csv'.format(outfileno)
outfile = open(outfilename, 'w')
outfileno += 1
outfile.write(header)
outfile.write(row)
finally:
# Don't forget to close the last file
if outfile is not None:
outfile.close()

Python csv - replace any columns with specified value

I have the following input file with a header row:
test_in.csv
LON,LAT,NUMBER,STREET,UNIT,CITY,DISTRICT,REGION,POSTCODE,ID,HASH
-72.5708234,41.4155142,39,HICKORY LA,,,,,,,8a0df668e0d49b02
-72.5647745,41.4160301,1213,KILLINGWORTH RD,,,,,,,b3ecaab86e476f46
I need to replace any of the columns with a specified string
for example CITY column's data should be replaced from "" to "MyCity"
My code only outputs the header and first row
python test_forcefld.py test_in.csv MyCity CITY out_test.csv
import csv
import sys
in_file_name = sys.argv[1]
force_data = sys.argv[2]
force_fld = sys.argv[3]
out_file_name = sys.argv[4]
# First read top row/header from input file
fieldnames = []
for filename in [in_file_name]:
with open(filename, "rb") as f_in:
reader = csv.reader(f_in)
headers = next(reader)
for h in headers:
fieldnames.append(h)
#print headers to output file
with open(out_file_name, 'w') as fou:
dw = csv.DictWriter(fou, delimiter=',', fieldnames=fieldnames)
dw.writeheader()
f_in2 = open(in_file_name, "rb")
reader2 = csv.DictReader(f_in2) # Uses the field names in this file
datarow = next(reader2)
datarow[force_fld] = force_data
with open(out_file_name, 'wa') as fou:
dw2 = csv.DictWriter(fou, delimiter=',', fieldnames=fieldnames)
dw2.writeheader()
dw2.writerow(data row)
Output shows
LON,LAT,NUMBER,STREET,UNIT,CITY,DISTRICT,REGION,POSTCODE,ID,HASH
-72.5708234,41.4155142,39,HICKORY LA,,MyCity,,,,,8a0df668e0d49b02
Your code is a little difficult to read, but assuming datarow is a dictionary containing your records:
In your last row, change
dw2.writerow(datarow)
Into
dw2.writerows(datarow)
While you're at it, you should also consider using datarow.keys() for your fieldnames, for conciseness.
This should do it, you just need pandas:
import pandas as pd
df = pd.read_csv(in_file_name, sep=',')
df['CITY'].fillna('MyCity', inplace=True)
And to save it:
df.to_csv(out_file_name)
You can try somthing like this in order to have your desired file:
I'm assuming your input file is called f_input.txt and your output file is called f_output.txt:
data = list(k.rstrip().split(',') for k in open("f_input.txt", 'r'))
with open("f_output.txt", 'a+') as f:
f.write(",".join(data[0]) + '\n')
for k in data[1:]:
# Modify the positions of k[:n] + your data + k[n+1]
# if you need to handle another position
f.write(",".join(k[:6]) + "MyCity" + ",".join(k[7:]) + "\n")
This worked in the end:
import csv
import sys
in_file_name = sys.argv[1]
force_data = sys.argv[2]
force_fld = sys.argv[3]
out_file_name = sys.argv[4]
# First read top row/header from input file
fieldnames = []
for filename in [in_file_name]:
with open(filename, "rb") as f_in:
reader = csv.reader(f_in)
headers = next(reader)
for h in headers:
fieldnames.append(h)
f_in2 = open(in_file_name, "r")
#print headers to output file
fou = open(out_file_name, 'wa')
dw = csv.DictWriter(fou, delimiter=',', fieldnames=fieldnames)
dw.writeheader()
reader2 = csv.DictReader(f_in2) # Uses the field names in this file
for row in reader2:
row[force_fld] = force_data
dw2 = csv.DictWriter(fou, delimiter=',', fieldnames=fieldnames)
dw2.writerow(row)

Python file matching and appending

This is one file result.csv:
M11251TH1230
M11543TH4292
M11435TDS144
This is another file sample.csv:
M11435TDS144,STB#1,Router#1
M11543TH4292,STB#2,Router#1
M11509TD9937,STB#3,Router#1
M11543TH4258,STB#4,Router#1
Can I write a Python program to compare both the files and if line in result.csv matches with the first word in the line in sample.csv, then append 1 else append 0 at every line in sample.csv?
import pandas as pd
d1 = pd.read_csv("1.csv",names=["Type"])
d2 = pd.read_csv("2.csv",names=["Type","Col2","Col3"])
d2["Index"] = 0
for x in d1["Type"] :
d2["Index"][d2["Type"] == x] = 1
d2.to_csv("3.csv",header=False)
Considering "1.csv" and "2.csv" are your csv input files and "3.csv" is the result you needed
The solution using csv.reader and csv.writer (csv module):
import csv
newLines = []
# change the file path to the actual one
with open('./data/result.csv', newline='\n') as csvfile:
data = csv.reader(csvfile)
items = [''.join(line) for line in data]
with open('./data/sample.csv', newline='\n') as csvfile:
data = list(csv.reader(csvfile))
for line in data:
line.append(1 if line[0] in items else 0)
newLines.append(line)
with open('./data/sample.csv', 'w', newline='\n') as csvfile:
writer = csv.writer(csvfile)
writer.writerows(newLines)
The sample.csv contents:
M11435TDS144,STB#1,Router#1,1
M11543TH4292,STB#2,Router#1,1
M11509TD9937,STB#3,Router#1,0
M11543TH4258,STB#4,Router#1,0
With only one column, I wonder why you made it as a result.csv. If it is not going to have any more columns, a simple file read operation would suffice. Along with converting the data from result.csv to dictionary will help in quick run as well.
result_file = "result.csv"
sample_file = "sample.csv"
with open(result_file) as fp:
result_data = fp.read()
result_dict = dict.fromkeys(result_data.split("\n"))
"""
You can change the above logic, in case you have very few fields on csv like this:
result_data = fp.readlines()
result_dict = {}
for result in result_data:
key, other_field = result.split(",", 1)
result_dict[key] = other_field.strip()
"""
#Since sample.csv is a real csv, using csv reader and writer
with open(sample_file, "rb") as fp:
sample_data = csv.reader(fp)
output_data = []
for data in sample_data:
output_data.append("%s,%d" % (data, data[0] in result_dict))
with open(sample_file, "wb") as fp:
data_writer = csv.writer(fp)
data_writer.writerows(output_data)
The following snippet of code will work for you
import csv
with open('result.csv', 'rb') as f:
reader = csv.reader(f)
result_list = []
for row in reader:
result_list.extend(row)
with open('sample.csv', 'rb') as f:
reader = csv.reader(f)
sample_list = []
for row in reader:
if row[0] in result_list:
sample_list.append(row + [1])
else:
sample_list.append(row + [0]
with open('sample.csv', 'wb') as f:
writer = csv.writer(f)
writer.writerows(sample_list)

Categories