Read File headers and delimiters using Python - python

I am reading all the files from a given folder (contains Dir, Sub dir and files of type .csv, .txt ..)
I need to get the following information into an output file in the following format:
FileLocation, FileName, Delimiter, Columns
(All columns needed in a cell separated by delimiter)
I am using the following script which works fine except delimiter. I have tried using csv.sniffer but it does not work.
import sys,os,csv
ofilew = open('D:\OutputFile/Columns_Info.csv', 'w')
ofile = open('D:\OutputFile/Columns_Info.csv', 'a')
root = 'D:\UnZipFiles'
path = os.path.join(root)
columninfo = 'FolderLocation, FileName, Delimiter, Columns' + '\n'
ofilew.write(columninfo)
for r,d,f in os.walk(path):
for file in f:
fullfilepath = os.path.join(r,file)
with open(fullfilepath,'r') as f:
columninfo = f.readline()
columninfo = columninfo.replace(",", ";")
output = file +','+ columninfo
outputfinal = r + ',' + output
ofile.write(outputfinal)

The following approach should work for you, it uses Python's csv.sniffer feature to attempt to determine the correct dialect to use for reading the file. This also contains the delimiter that is used.
import os, csv
header_output = ['FolderLocation', 'FileName', 'Delimiter', 'Columns']
path = r'D:\UnZipFiles'
with open(r'D:\OutputFile\Columns_Info.csv', 'wb') as f_output:
csv_output = csv.writer(f_output)
csv_output.writerow(header_output)
for root, folders, files in os.walk(path):
for file in files:
full_file_path = os.path.join(root, file)
with open(full_file_path, 'rb') as f_input:
try:
dialect = csv.Sniffer().sniff(f_input.read(1024))
f_input.seek(0)
csv_input = csv.reader(f_input, dialect)
header_input = next(csv_input)
csv_output.writerow([root, file, dialect.delimiter] + header_input)
except csv.Error as e:
print "{} - could not determine the delimiter".format(file)
As an alternative to csv.sniffer, you could devise your own, but the Python one is much more powerful than this:
def get_delimiter(file_name):
cols_found = []
for delim in [',', ';', '|', '\t']:
with open(file_name, 'rb') as f_in:
cols_found.append([len(next(csv.reader(f_in, delimiter=delim))), delim])
if cols_found[-1][0] > 1:
return sorted(cols_found)[-1][1]
else:
return None
print get_delimiter('my.csv')
This returns a possible delimiter by counting which delimiter results in the most columns in the first row. If only one column is found, it returns None to indicate no matching delimiter was found. It could instead raise an exception.

Related

Concatenate all files that map values in the same key

I have a dictionnary that group different pattern :
dico_cluster={'cluster_1': ['CUX2', 'CUX1'], 'cluster_2': ['RFX3', 'RFX2'],'cluster_3': ['REST']}
Then I have files in a folder :
"/path/to/test/files/CUX1.txt"
"/path/to/test/files/CUX2.txt"
"/path/to/test/files/RFX3.txt"
"/path/to/test/files/RFX2.txt"
"/path/to/test/files/REST.txt"
"/path/to/test/files/ZEB.txt"
"/path/to/test/files/TEST.txt"
I'm trying to concatenate the files that are in the same cluster. The output file name should be the name of pattern join by underscore "_"
I tried this :
filenames = glob.glob('/path/to/test/files/*.txt')
for clee in dico_cluster.keys():
fname='_'.join(dico_cluster[clee])
outfilename ='/path/to/test/outfiles/'+ fname + ".txt"
for file in filenames:
tf_file=file.split('/')[-1].split('.')[0]
if tf_file in dico_cluster[clee]:
with open(outfilename, 'wb') as outfile:
for filename in filenames:
if filename == outfilename:
# don't want to copy the output into the output
continue
with open(filename, 'rb') as readfile:
shutil.copyfileobj(readfile, outfile)
But it's not working. I'm just concatenating all the files.
I want to cat the file that are in the same cluster.
I would recommend to use os package, it's easier to use.
If I understood your problem I would try to do this by loading the whole content of your files before writing it.
import os
for clee in dico_cluster.keys():
my_clusters =list(set(dico_cluster[clee]))
fname = "_".join(my_clusters)
data = list()
outfilename = os.path.join("/path/to/test/outfiles", fname + ".txt")
for file in filenames:
tmp_dict = dict()
tf_file = os.path.basename(file).split(".")[0]
if tf_file in my_clusters:
with open(file, 'rb') as f1:
data.extend([elm for elm in f1.readlines()])
with open(outfilename, "wb") as _output_file:
for elm in data:
_output_file.write(elm)

python csv add column name

Can you help me please. how to add column name to csv file with python.
dirname = os.path.dirname(os.path.abspath(__file__))
csvfilename = os.path.join(dirname, 'csvfile.csv')
file_exists = os.path.isfile(csvfilename)
f = open(csvfilename,'a')
f.write(list[0] + ';' + '\r\n')
f.close()
may be you can add a header like this?
with open(csvfilename, 'wt', newline ='') as file:
write_header = csv.writer(file, delimiter=',')
write_header.writerow(i for i in list[0])
Since you just want to modify a single line of the file there isn't a need to run this all through a CSV processor. Its generally best not to read and write to the same file. You can create a temporary file and make the changes to the top of the file before copying the bulk.
import shutil
dirname = os.path.dirname(os.path.abspath(__file__))
csvfilename = os.path.join(dirname, 'csvfile.csv')
tmpfilename = os.path.join(dirname, 'csvfile.csv.tmp')
file_exists = os.path.isfile(csvfilename)
with open(csvfilename, 'rb') as f_in, open(tmpfilename, 'wb') as f_out:
# discard current header
next(f_in)
# write new header
f_out.write("colname\r\n".encode())
# copy the rest of the file
shutil.copyfileobj(f_in, f_out)
# replace original csv with fixed
shutil.move(tmpfilename, csvfilename)

Python 3.4 - Add file name + line number to all files in a folder

I'm not a programmer and I've been doing my best to create some small scripts with Python 3.4 that help me with different tasks at work.
I have several .txt files and I to every line in the file I would need to append:
the file name
the file name+ line number
save it as a UTF-8 csv with all fields separated by commas.
I managed to do this for one particular file, but I'm struggling to do it for all the files in the folder. I've tried import glob but with no success.
This is the code right now (a mess... that partially works):
with open('Ruth.txt', 'r') as program:
data = program.readlines()
with open('Ruth.txt', 'w') as program:
for (number, line) in enumerate(data):
program.write('%d","%s' % (number + 1, line))
files = 'Ruth.txt'
all_lines = []
for f in files.split():
lines = open(f, 'r').readlines()
for line in lines:
all_lines.append('"' + f + '"' + ',' + '"' + f + line.strip() + '"')
fout = open(f + 'out.csv', 'w')
fout.write('\n'.join(all_lines))
fout.close()
Try this:
import os
def add_numbers(filename):
with open(filename, 'r') as readfile:
data = readfile.readlines()
with open(filename, 'w') as writefile:
for i, line in enumerate(data):
writefile.write('%d. %s' % (i + 1, line))
for path, _, filenames in os.walk(folder):
for filename in filenames:
add_numbers(os.path.join(path, filename))
This will add numbers to each file in the directory and each file in all sub-directories. If you don't want it to check all sub-directories, change the for loop to this:
path, _, filenames = next(os.walk(folder))
for filename in filenames:
add_numbers(os.path.join(path, filename))
here the complete script that take one positional argument (folder) and create a new .csv file at the same level than the file.
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from pathlib import Path
def get_files(folder_path, suffix=".txt"):
return Path(folder_path).glob("**/*%s" % suffix)
def write_lines(file_):
with file_.with_suffix(".csv").open("w") as fout, file_.open(encoding="utf-8") as fin:
for i, line in enumerate(fin, 1):
# line number, file name, line
new_line = ",".join(["%d." % i, file_.name, line])
fout.write(new_line)
def main(folder):
for file_ in get_files(folder):
print(file_)
write_lines(file_)
if __name__ == '__main__':
try:
main(sys.argv[1])
except IndexError:
print("usage: %s foldername" % sys.argv[0])
This will take all text files in current folder and turn them into utf-8 encoded 'csv-style' files so that space in the text is turned into a comma with filename and line number also comma-separated.
from glob import glob
filenames = glob("*.txt")
text = ''
for fn in filenames:
with open(fn,'r') as f:
for i,line in enumerate(f):
line=','.join(line.split())
text += ','.join((line,fn,i+1)) + '\n'
fnew = fn.rsplit('.',1)[0]+'.csv'
with open(fnew,'w', encoding='utf-8') as f:
f.write(text)

How to split text file by id in python

I have a bunch of text files containing tab separated tables. The second column contains an id number, and each file is already sorted by that id number. I want to separate each file into multiple files by the id number in column 2. Here's what I have.
readpath = 'path-to-read-file'
writepath = 'path-to-write-file'
for filename in os.listdir(readpath):
with open(readpath+filename, 'r') as fh:
lines = fh.readlines()
lastid = 0
f = open(writepath+'checkme.txt', 'w')
f.write(filename)
for line in lines:
thisid = line.split("\t")[1]
if int(thisid) <> lastid:
f.close()
f = open(writepath+thisid+'-'+filename,'w')
lastid = int(thisid)
f.write(line)
f.close()
What I get is simply a copy of all the read files with the first id number from each file in front of the new filenames. It is as if
thisid = line.split("\t")[1]
is only done once in the loop. Any clue to what is going on?
EDIT
The problem was my files used \r rather than \r\n to terminate lines. Corrected code (simply adding 'rU' when opening the read file and swapping != for <>):
readpath = 'path-to-read-file'
writepath = 'path-to-write-file'
for filename in os.listdir(readpath):
with open(readpath+filename, 'rU') as fh:
lines = fh.readlines()
lastid = 0
f = open(writepath+'checkme.txt', 'w')
f.write(filename)
for line in lines:
thisid = line.split("\t")[1]
if int(thisid) != lastid:
f.close()
f = open(writepath+thisid+'-'+filename,'w')
lastid = int(thisid)
f.write(line)
f.close()
If you're dealing with tab delimited files, then you can use the csv module, and take advantage of the fact that itertools.groupby will do the previous/current tracking of the id for you. Also utilise os.path.join to make sure your filenames end up joining correctly.
Untested:
import os
import csv
from itertools import groupby
readpath = 'path-to-read-file'
writepath = 'path-to-write-file'
for filename in os.listdir(readpath):
with open(os.path.join(readpath, filename)) as fin:
tabin = csv.reader(fin, delimiter='\t')
for file_id, rows in groupby(tabin, lambda L: L[1]):
with open(os.path.join(writepath, file_id + '-' + filename), 'w') as fout:
tabout = csv.writer(fout, delimiter='\t')
tabout.writerows(rows)

Read many csv file and write it to encoding to utf8 using python

I'm using python code to read from many csv files and set encoding to utf8.I meet the problem when I read the file I can read all lines but when I write it, it can write only 1 line. Please help me to check my code as below:
def convert_files(files, ascii, to="utf-8"):
for name in files:
#print ("Convert {0} from {1} to {2}").format(name, ascii, to)
with open(name) as f:
print(name)
count = 0
lineno = 0
#this point I want to write the below text into my each new file at the first line
#file_source.write('id;nom;prenom;nom_pere;nom_mere;prenom_pere;prenom_mere;civilite (1=homme 2=f);date_naissance;arrondissement;adresse;ville;code_postal;pays;telephone;email;civilite_demandeur (1=homme 2=f);nom_demandeur;prenom_demandeur;qualite_demandeur;type_acte;nombre_actes\n')
for line in f.readlines():
lineno +=1
if lineno == 1 :
continue
file_source = open(name, mode='w', encoding='utf-8', errors='ignore')
#pass
#print (line)
# start write data to to new file with encode
file_source.write(line)
#file_source.close
#print unicode(line, "cp866").encode("utf-8")
csv_files = find_csv_filenames('./csv', ".csv")
convert_files(csv_files, "cp866")
You're reopening the file during every iteration.
for line in f.readlines():
lineno +=1
if lineno == 1 :
continue
#move the following line outside of the for block
file_source = open(name, mode='w', encoding='utf-8', errors='ignore')
If all you need is to change the character encoding of the files then it doesn't matter that they are csv files unless the conversion may change what characters are interpreted as delimiter, quotechar, etc:
def convert(filename, from_encoding, to_encoding):
with open(filename, newline='', encoding=from_encoding) as file:
data = file.read().encode(to_encoding)
with open(filename, 'wb') as outfile:
outfile.write(data)
for path in csv_files:
convert(path, "cp866", "utf-8")
Add errors parameter to change how encoding/decoding errors are handled.
If files may be large then you could convert data incrementally:
import os
from shutil import copyfileobj
from tempfile import NamedTemporaryFile
def convert(filename, from_encoding, to_encoding):
with open(filename, newline='', encoding=from_encoding) as file:
with NamedTemporaryFile('w', encoding=to_encoding, newline='',
dir=os.path.dirname(filename)) as tmpfile:
copyfileobj(file, tmpfile)
tmpfile.delete = False
os.replace(tmpfile.name, filename) # rename tmpfile -> filename
for path in csv_files:
convert(path, "cp866", "utf-8")
You can do this
def convert_files(files, ascii, to="utf-8"):
for name in files:
with open(name, 'r+') as f:
data = ''.join(f.readlines())
data.decode(ascii).encode(to)
f.seek(0)
f.write(data)
f.truncate()

Categories