Building list of lists from CSV file - python

I have an Excel file(that I am exporting as a csv) that I want to parse, but I am having trouble with finding the best way to do it. The csv is a list of computers in my network, and what accounts are in the local administrator group for each one. I have done something similar with tuples, but the number of accounts for each computer range from 1 to 30. I want to build a list of lists, then go through each list to find the accounts that should be there(Administrator, etc.) and delete them, so that I can then export a list of only accounts that shouldn't be a local admin, but are. The csv file is formatted as follows:
"computer1" Administrator localadmin useraccount
"computer2" localadmin Administrator
"computer3" localadmin Administrator user2account
Any help would be appreciated
EDIT: Here is the code I am working with
import csv
import sys #used for passing in the argument
file_name = sys.argv[1] #filename is argument 1
with open(file_name, 'rU') as f: #opens PW file
reader = csv.reader(f)
data = list(list(rec) for rec in csv.reader(f, delimiter=',')) #reads csv into a list of lists
f.close() #close the csv
for i in range(len(data)):
print data[i][0] #this alone will print all the computer names
for j in range(len(data[i])) #Trying to run another for loop to print the usernames
print data[i][j]
The issue is with the second for loop. I want to be able to read across each line and for now, just print them.

This should get you on the right track:
import csv
import sys #used for passing in the argument
file_name = sys.argv[1] #filename is argument 1
with open(file_name, 'rU') as f: #opens PW file
reader = csv.reader(f)
data = list(list(rec) for rec in csv.reader(f, delimiter=',')) #reads csv into a list of lists
for row in data:
print row[0] #this alone will print all the computer names
for username in row: #Trying to run another for loop to print the usernames
print username
Last two lines will print all of the row (including the "computer"). Do
for x in range(1, len(row)):
print row[x]
... to avoid printing the computer twice.
Note that f.close() is not required when using the "with" construct because the resource will automatically be closed when the "with" block is exited.
Personally, I would just do:
import csv
import sys #used for passing in the argument
file_name = sys.argv[1] #filename is argument 1
with open(file_name, 'rU') as f: #opens PW file
reader = csv.reader(f)
# Print every value of every row.
for row in reader:
for value in row:
print value
That's a reasonable way to iterate through the data and should give you a firm basis to add whatever further logic is required.

This is how I opened a .csv file and imported columns of data as numpy arrays - naturally, you don't need numpy arrays, but...
data = {}
app = QApplication( sys.argv )
fname = unicode ( QFileDialog.getOpenFileName() )
app.quit()
filename = fname.strip('.csv') + ' for release.csv'
#open the file and skip the first two rows of data
imported_array = np.loadtxt(fname, delimiter=',', skiprows = 2)
data = {'time_s':imported_array[:,0]}
data['Speed_RPM'] = imported_array[:,1]

It can be done using the pandas library.
import pandas as pd
df = pd.read_csv(filename)
list_of_lists = df.values.tolist()
This approach applies to other kinds of data like .tsv, etc.

Related

Write filtered json values to csv

I am looping through a json line files where i am just filtering for sender id and status nd outputting this to the terminal. There are multiple_sender id which are within a list whilst the sender is are just a string. I want to be able to write the output on one csv file where the first column is STATUS and the second one is SENDER_ID. I have attempted this at the top of my script but not sure if this is the right way of doing so.
My script is as follows. At which point would i need to write it to csv.I have read through the documentation but still a little unsure.
import json_lines
text_file = open("senderv1.csv", "a")
with open('specifications.jsonl', 'rb') as f:
for item in json_lines.reader(f):
Using pandas you can create the dataframe and thereby save it as csv. Hope this will solve your problem.
import json_lines
import pandas as pd
# text_file = open("senderv1.csv", "a")
single_sender_status=[]
single_sender=[]
with open('specifications.jsonl', 'rb') as f:
for item in json_lines.reader(f):
if 'sender_id' in item:
single_sender_status.append(item['status'])
single_sender.append(item['sender_id'])
# text_file.write(single_sender_status)
# text_file.write('\t')
# text_file.write(single_sender)
# text_file.write('\n')
# print("Single ID " + str(single_sender))
else:
single_sender_status.append(item['status'])
single_sender.append([sender['id'] for sender in item['senders']])
# text_file.write(single_sender_status)
# text_file.write('\t')
# text_file.write(multiple_sender_ids)
# print("Multiple Sender ID'S " + str(multiple_sender_ids))
df=pd.DataFrame({'STATUS':single_sender_status,'SENDER_ID':single_sender})
df.to_csv('senderv1.csv',index=False)
Here is code to write a CSV file with the csv module from the standard library. If the first column contains the status and the following columns the senders:
#!/usr/bin/env python3
import csv
import json_lines
def main():
with json_lines.open("specifications.jsonl") as reader:
with open("senderv1.csv", "w", encoding="utf8") as csv_file:
writer = csv.writer(csv_file, delimiter="\t")
for item in reader:
row = [item["status"]]
if "sender_id" in item:
row.append(item["sender_id"])
elif "senders" in item:
row.extend(sender["id"] for sender in item["senders"])
else:
raise ValueError("item with no sender information")
writer.writerow(row)
if __name__ == "__main__":
main()
To have the same information spread across different columns isn't really good, but putting more than one value into a single cell isn't good either. CSV is best suited for two dimensional tabular data. Maybe you want JSON (Lines) for the result too‽

Pulling out data from CSV files' specific columns in Python

I need a quick help with reading CSV files using Python and storing it in a 'data-type' file to use the data to graph after storing all the data in different files.
I have searched it, but in all cases I found, there was headers in the data. My data does not header part. They are tab separated. And I need to store only specific columns of the data. Ex:
12345601 2345678#abcdef 1 2 365 places
In this case, as an example, I would want to store only "2345678#abcdef" and "365" in the new python file in order to use it in the future to create a graph.
Also, I have more than 1 csv file in a folder and I need to do it in each of them. The sources I found did not talk about it and only referred to:
# open csv file
with open(csv_file, 'rb') as csvfile:
Could anyone refer me to already answered question or help me out with it?
. . . and storing it in a PY file to use the data to graph after storing all the data in different files . . .
. . . I would want to store only "2345678#abcdef" and "365" in the new python file . . .
Are you sure that you want to store the data in a python file? Python files are supposed to hold python code and they should be executable by the python interpreter. It would be a better idea to store your data in a data-type file (say, preprocessed_data.csv).
To get a list of files matching a pattern, you can use python's built-in glob library.
Here's an example of how you could read multiple csv files in a directory and extract the desired columns from each one:
import glob
# indices of columns you want to preserve
desired_columns = [1, 4]
# change this to the directory that holds your data files
csv_directory = '/path/to/csv/files/*.csv'
# iterate over files holding data
extracted_data = []
for file_name in glob.glob(csv_directory):
with open(file_name, 'r') as data_file:
while True:
line = data_file.readline()
# stop at the end of the file
if len(line) == 0:
break
# splits the line by whitespace
tokens = line.split()
# only grab the columns we care about
desired_data = [tokens[i] for i in desired_columns]
extracted_data.append(desired_data)
It would be easy to write the extracted data to a new file. The following example shows how you might save the data to a csv file.
output_string = ''
for row in extracted_data:
output_string += ','.join(row) + '\n'
with open('./preprocessed_data.csv', 'w') as csv_file:
csv_file.write(output_string)
Edit:
If you don't want to combine all the csv files, here's a version that can process one at a time:
def process_file(input_path, output_path, selected_columns):
extracted_data = []
with open(input_path, 'r') as in_file:
while True:
line = in_file.readline()
if len(line) == 0: break
tokens = line.split()
extracted_data.append([tokens[i] for i in selected_columns])
output_string = ''
for row in extracted_data:
output_string += ','.join(row) + '\n'
with open(output_path, 'w') as out_file:
out_file.write(output_string)
# whenever you need to process a file:
process_file(
'/path/to/input.csv',
'/path/to/processed/output.csv',
[1, 4])
# if you want to process every file in a directory:
target_directory = '/path/to/my/files/*.csv'
for file in glob.glob(target_directory):
process_file(file, file + '.out', [1, 4])
Edit 2:
The following example will process every file in a directory and write the results to a similarly-named output file in another directory:
import os
import glob
input_directory = '/path/to/my/files/*.csv'
output_directory = '/path/to/output'
for file in glob.glob(input_directory):
file_name = os.path.basename(file) + '.out'
out_file = os.path.join(output_directory, file_name)
process_file(file, out_file, [1, 4])
If you want to add headers to the output, then process_file could be modified like this:
def process_file(input_path, output_path, selected_columns, column_headers=[]):
extracted_data = []
with open(input_path, 'r') as in_file:
while True:
line = in_file.readline()
if len(line) == 0: break
tokens = line.split()
extracted_data.append([tokens[i] for i in selected_columns])
output_string = ','.join(column_headers) + '\n'
for row in extracted_data:
output_string += ','.join(row) + '\n'
with open(output_path, 'w') as out_file:
out_file.write(output_string)
Here's another approach using a namedtuple that will help extract selected fields from a csv file and then let you write them out to a new csv file.
from collections import namedtuple
import csv
# Setup named tuple to receive csv data
# p1 to p5 are arbitrary field names associated with the csv file
SomeData = namedtuple('SomeData', 'p1, p2, p3, p4, p5, p6')
# Read data from the csv file and create a generator object to hold a reference to the data
# We use a generator object rather than a list to reduce the amount of memory our program will use
# The captured data will only have data from the 2nd & 5th column from the csv file
datagen = ((d.p2, d.p5) for d in map(SomeData._make, csv.reader(open("mydata.csv", "r"))))
# Write the data to a new csv file
with open("newdata.csv","w", newline='') as csvfile:
cvswriter = csv.writer(csvfile, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
# Use the generator created earlier to access the filtered data and write it out to a new csv file
for d in datagen:
cvswriter.writerow(d)
Original Data in "mydata.csv":
12345601,2345678#abcdef,1,2,365,places
4567,876#def,0,5,200,noplaces
Output Data in "newdata.csv":
2345678#abcdef,365
876#def,200
EDIT 1:
For tab delimited data make the following changes to the code:
change
datagen = ((d.p2, d.p5) for d in map(SomeData._make, csv.reader(open("mydata.csv", "r"))))
to
datagen = ((d.p2, d.p5) for d in map(SomeData._make, csv.reader(open("mydata2.csv", "r"), delimiter='\t', quotechar='"')))
and
cvswriter = csv.writer(csvfile, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
to
cvswriter = csv.writer(csvfile, delimiter='\t', quotechar='"', quoting=csv.QUOTE_MINIMAL)

os.walk-ing through a directory structure to read many CSV headers and write them to an output CSV

I have a folder that contains 60 folders, each of which contains about 60 CSVs (and 1 or 2 non-CSVs).
I need to compare the header rows of all of these CSVs, so I am trying to go through the directories and write to an output CSV (1) the filepath of the file in question and (2) the header row in the subsequent cells in the row in the output CSV.
Then go to the next file, and write the same information in the next row of the output CSV.
I am lost in the part where I am writing the header rows to the CSV -- and am too lost to have even generated an error message.
Can anyone advise on what to do next?
import os
import sys
import csv
csvfile = '/Users/username/Documents/output.csv'
def main(args):
# Open a CSV for writing outputs to
with open(csvfile, 'w') as out:
writer = csv.writer(out, lineterminator='\n')
# Walk through the directory specified in cmd line
for root, dirs, files in os.walk(args):
for item in files:
# Check if the item is a CSV
if item.endswith('.csv'):
# If yes, read the first row
with open(item, newline='') as f:
reader = csv.reader(f)
row1 = next(reader)
# Write the first cell as the file name
f.write(os.path.realpath(item))
f.write(f.readline())
f.write('\n')
# Write this row to a new line in the csvfile var
# Go to next file
# If not a CSV, go to next file
else:
continue
# Write each file to the CSV
# writer.writerow([item])
if __name__ == '__main__':
main(sys.argv[1])
IIUC you need a new csv file with 2 columns: file_path and headers.
If the header that you need is just a list of column names from that csv, then it will be easier if you use a pandas dataframe to store these values first and then write the dataframe to a csv.
import pandas as pd
res = []
for root, dirs, files in os.walk(args):
for item in files:
# Check if the item is a CSV
if item.endswith('.csv'):
# If yes, read the first row
df = pd.read_csv(item)
row = {}
row['file_path'] = os.path.realpath(item)
row['headers'] = df.columns
res.append(row)
res_df = pd.DataFrame(res)
res_df.to_csv(csvfile)
You seem to be getting confused between which file you're reading and writing to. Confusion is normal when you try to do everything in one big function. The whole point of functions is to break things down so it's easy to follow, understand and debug.
Here is some code, which doesn't work, but you can easily print out what each function is returning, and once you know that's correct, you feed it to the next function. Each function is small, with very few variables, so not much can go wrong.
And most importantly, the variables in each function are local to it, meaning they cannot interfere with what's happening elsewhere, or even confuse you into thinking they might be interfering (and that makes a huge difference).
def collect_csv_data():
results = []
for root, dirs, files in os.walk(args):
for file in files:
if file.endswith('.csv'):
headers = extract_headers(os.path.join(root, file))
results.append((file, headers))
return results
def extract_headers(filepath):
with open(filepath) as f:
reader = csv.reader(f)
headers = reader.next()
return headers
def write_results(result, filepath):
with open(filepath, 'w') as f:
writer = csv.writer(f)
for result in results:
writer.writerow(result)
if __name__ == '__main__':
directory = sys.argv[1]
results = collect_csv_data(directory)
write_results(results, 'results.csv')

Use Python to split a CSV file with multiple headers

I have a CSV file that is being constantly appended. It has multiple headers and the only common thing among the headers is that the first column is always "NAME".
How do I split the single CSV file into separate CSV files, one for each header row?
here is a sample file:
"NAME","AGE","SEX","WEIGHT","CITY"
"Bob",20,"M",120,"New York"
"Peter",33,"M",220,"Toronto"
"Mary",43,"F",130,"Miami"
"NAME","COUNTRY","SPORT","NUMBER","SPORT","NUMBER"
"Larry","USA","Football",14,"Baseball",22
"Jenny","UK","Rugby",5,"Field Hockey",11
"Jacques","Canada","Hockey",19,"Volleyball",4
"NAME","DRINK","QTY"
"Jesse","Beer",6
"Wendel","Juice",1
"Angela","Milk",3
If the size of the csv files is not huge -- so all can be in memory at once -- just use read() to read the file into a string and then use a regex on this string:
import re
with open(ur_csv) as f:
data=f.read()
chunks=re.finditer(r'(^"NAME".*?)(?=^"NAME"|\Z)',data,re.S | re.M)
for i, chunk in enumerate(chunks, 1):
with open('/path/{}.csv'.format(i), 'w') as fout:
fout.write(chunk.group(1))
If the size of the file is a concern, you can use mmap to create something that looks like a big string but is not all in memory at the same time.
Then use the mmap string with a regex to separate the csv chunks like so:
import mmap
import re
with open(ur_csv) as f:
mf=mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
chunks=re.finditer(r'(^"NAME".*?)(?=^"NAME"|\Z)',mf,re.S | re.M)
for i, chunk in enumerate(chunks, 1):
with open('/path/{}.csv'.format(i), 'w') as fout:
fout.write(chunk.group(1))
In either case, this will write all the chunks in files named 1.csv, 2.csv etc.
Copy the input to a new output file each time you see a header line. Something like this (not checked for errors):
partNum = 1
outHandle = None
for line in open("yourfile.csv","r").readlines():
if line.startswith('"NAME"'):
if outHandle is not None:
outHandle.close()
outHandle = open("part%d.csv" % (partNum,), "w")
partNum += 1
outHandle.write(line)
outHandle.close()
The above will break if the input does not begin with a header line or if the input is empty.
You can use the python csv package to read your source file and write multile csv files based on the rule that if element 0 in your row == "NAME", spawn off a new file. Something like this...
import csv
outfile_name = "out_%.csv"
out_num = 1
with open('nameslist.csv', 'rb') as csvfile:
csvreader = csv.reader(csvfile, delimiter=',')
csv_buffer = []
for row in csvreader:
if row[0] != "NAME":
csv_buffer.append(row)
else:
with open(outfile_name % out_num, 'wb') as csvout:
for b_row in csv_buffer:
csvout.writerow(b_row)
out_num += 1
csv_buffer = [row]
P.S. I haven't actually tested this but that's the general concept
Given the other answers, the only modification that I would suggest would be to open using csv.DictReader. pseudo code would be like this. Assuming that the first line in the file is the first header
Note that this assumes that there is no blank line or other indicator between the entries so that a 'NAME' header occurs right after data. If there were a blank line between appended files the you could use that as an indicator to use infile.fieldnames() on the next row. If you need to handle the inputs as a list, then the previous answers are better.
ifile = open(filename, 'rb')
infile = cvs.Dictreader(ifile)
infields = infile.fieldnames
filenum = 1
ofile = open('outfile'+str(filenum), 'wb')
outfields = infields # This allows you to change the header field
outfile = csv.DictWriter(ofile, fieldnames=outfields, extrasaction='ignore')
outfile.writerow(dict((fn, fn) for fn in outfields))
for row in infile:
if row['NAME'] != 'NAME':
#process this row here and do whatever is needed
else:
close(ofile)
# build infields again from this row
infields = [row["NAME"], ...] # This assumes you know the names & order
# Dict cannot be pulled as a list and keep the order that you want.
filenum += 1
ofile = open('outfile'+str(filenum), 'wb')
outfields = infields # This allows you to change the header field
outfile = csv.DictWriter(ofile, fieldnames=outfields, extrasaction='ignore')
outfile.writerow(dict((fn, fn) for fn in outfields))
# This is the end of the loop. All data has been read and processed
close(ofile)
close(ifile)
If the exact order of the new header does not matter except for the name in the first entry, then you can transfer the new list as follows:
infileds = [row['NAME']
for k in row.keys():
if k != 'NAME':
infields.append(row[k])
This will create the new header with NAME in entry 0 but the others will not be in any particular order.

How can I get a specific field of a csv file?

I need a way to get a specific item(field) of a CSV. Say I have a CSV with 100 rows and 2 columns (comma seperated). First column emails, second column passwords. For example I want to get the password of the email in row 38. So I need only the item from 2nd column row 38...
Say I have a csv file:
aaaaa#aaa.com,bbbbb
ccccc#ccc.com,ddddd
How can I get only 'ddddd' for example?
I'm new to the language and tried some stuff with the csv module, but I don't get it...
import csv
mycsv = csv.reader(open(myfilepath))
for row in mycsv:
text = row[1]
Following the comments to the SO question here, a best, more robust code would be:
import csv
with open(myfilepath, 'rb') as f:
mycsv = csv.reader(f)
for row in mycsv:
text = row[1]
............
Update: If what the OP actually wants is the last string in the last row of the csv file, there are several aproaches that not necesarily needs csv. For example,
fulltxt = open(mifilepath, 'rb').read()
laststring = fulltxt.split(',')[-1]
This is not good for very big files because you load the complete text in memory but could be ok for small files. Note that laststring could include a newline character so strip it before use.
And finally if what the OP wants is the second string in line n (for n=2):
Update 2: This is now the same code than the one in the answer from J.F.Sebastian. (The credit is for him):
import csv
line_number = 2
with open(myfilepath, 'rb') as f:
mycsv = csv.reader(f)
mycsv = list(mycsv)
text = mycsv[line_number][1]
............
#!/usr/bin/env python
"""Print a field specified by row, column numbers from given csv file.
USAGE:
%prog csv_filename row_number column_number
"""
import csv
import sys
filename = sys.argv[1]
row_number, column_number = [int(arg, 10)-1 for arg in sys.argv[2:])]
with open(filename, 'rb') as f:
rows = list(csv.reader(f))
print rows[row_number][column_number]
Example
$ python print-csv-field.py input.csv 2 2
ddddd
Note: list(csv.reader(f)) loads the whole file in memory. To avoid that you could use itertools:
import itertools
# ...
with open(filename, 'rb') as f:
row = next(itertools.islice(csv.reader(f), row_number, row_number+1))
print row[column_number]
import csv
def read_cell(x, y):
with open('file.csv', 'r') as f:
reader = csv.reader(f)
y_count = 0
for n in reader:
if y_count == y:
cell = n[x]
return cell
y_count += 1
print (read_cell(4, 8))
This example prints cell 4, 8 in Python 3.
There is an interesting point you need to catch about csv.reader() object. The csv.reader object is not list type, and not subscriptable.
This works:
for r in csv.reader(file_obj): # file not closed
print r
This does not:
r = csv.reader(file_obj)
print r[0]
So, you first have to convert to list type in order to make the above code work.
r = list( csv.reader(file_obj) )
print r[0]
Finaly I got it!!!
import csv
def select_index(index):
csv_file = open('oscar_age_female.csv', 'r')
csv_reader = csv.DictReader(csv_file)
for line in csv_reader:
l = line['Index']
if l == index:
print(line[' "Name"'])
select_index('11')
"Bette Davis"
Following may be be what you are looking for:
import pandas as pd
df = pd.read_csv("table.csv")
print(df["Password"][row_number])
#where row_number is 38 maybe
import csv
inf = csv.reader(open('yourfile.csv','r'))
for row in inf:
print row[1]

Categories