How to use multiprocess in csv.DictReader? - python

This is a script to calculate histogram, and I find the lib csv.py takes most time. How can I run it paralleled ?
The size of input file samtools.depth.gz is 14G, contains about 3 billion lines.
SamplesList = ('Sample_A', 'Sample_B', 'Sample_C', 'Sample_D')
from collections import Counter
cDepthCnt = {key:Counter() for key in SamplesList}
cDepthStat = {key:[0,0] for key in SamplesList} # x and x^2
RecordCnt,MaxDepth = inStat('samtools.depth.gz')
print('xxx')
def inStat(inDepthFile):
import gzip
import csv
RecordCnt = 0
MaxDepth = 0
with gzip.open(inDepthFile, 'rt') as tsvfin:
tsvin = csv.DictReader(tsvfin, delimiter='\t', fieldnames=('ChrID','Pos')+SamplesList )
RecordCnt += 1
for row in tsvin:
for k in SamplesList:
theValue = int(row[k])
if theValue > MaxDepth:
MaxDepth = theValue
cDepthCnt[k][theValue] += 1
cDepthStat[k][0] += theValue
cDepthStat[k][1] += theValue * theValue
return RecordCnt,MaxDepth
There are ways to read huge file into chunks and distribute them with list, like https://stackoverflow.com/a/30294434/159695 :
bufsize = 65536
with open(path) as infile:
while True:
lines = infile.readlines(bufsize)
if not lines:
break
for line in lines:
process(line)
However, csv.DictReader only accepts file handles.
There is a way to split to temporary files at https://gist.github.com/jbylund/c37402573a896e5b5fc8 , I wonder whether I can use fifo to do it on-the-fly.
I just find csv.DictReader accepts any object which supports the iterator protocol and returns a string each time its next() method is called — file objects and list objects are both suitable.
I have modify inStat() to accept lines. Would you please help me to complete statPool() ?
def statPool(inDepthFile):
import gzip
RecordCnt = 0
MaxDepth = 0
cDepthCnt = {key:Counter() for key in SamplesList}
cDepthStat = {key:[0,0,0,0,0] for key in SamplesList} # x and x^2
with gzip.open(inDepthFile, 'rt') as tsvfin:
while True:
lines = tsvfin.readlines(ChunkSize)
if not lines:
break
with Pool(processes=4) as pool:
res = pool.apply_async(inStat,[lines])
iRecordCnt,iMaxDepth,icDepthCnt,icDepthStat = res.get()
RecordCnt += iRecordCnt
if iMaxDepth > MaxDepth:
MaxDepth = iMaxDepth
for k in SamplesList:
cDepthCnt[k].update(icDepthCnt[k])
cDepthStat[k][0] += icDepthStat[k][0]
cDepthStat[k][1] += icDepthStat[k][1]
return RecordCnt,MaxDepth,cDepthCnt,cDepthStat
I think asyncio.Queue seems be a good way to pipe to multiple csv.DictReader workers.

Looking up things in global scope takes longer then looking up stuff in local scope.
You do a lot of lookups - I suggest changing your code to:
cDepthCnt = {key:Counter() for key in SamplesList}
cDepthStat = {key:[0,0] for key in SamplesList} # x and x^2
RecordCnt,MaxDepth = inStat('samtools.depth.gz', cDepthCnt, cDepthStat)
print('xxx')
def inStat(inDepthFile, depthCount, depthStat):
# use the local depthCount, depthStat
to speed that part up by some.
Running parallellized when accessing the same keys over and over will introduce locks on those values to avoid mishaps - locking/unlocking takes time as well. You would have to see if it is faster.
All you do is summing up values - you could partition your data and use the 4 parts for 4(times 2) different dictionarys and afterwards add up the 4 dicts into your global one to avoid locks.

Related

How to save and load a large dictionary to storage in python?

I have a 1.5GB size dictionary that it takes about 90 seconds to calculate so I want to save it once to storage and load it every time I want to use it again. This creates two challenges:
Loading the file has to take less than 90 seconds.
As RAM is limited (in pycharm) at ~4GB it cannot be memory-intensive.
I also need it to be utf-8 capable.
I have tried solutions such as pickle but they always end up throwing a Memory Error.
Notice that my dictionary is made of Strings and thus solutions like in this post do not apply.
Things I do not care about:
Saving time (as long as it's not more than ~20 minutes, as I'm looking to do it once).
How much space it takes in storage to save the dictionary.
How can I do that? thanks
Edit:
I forgot to mention it's a dictionary containing sets, so json.dump() doesn't work as it can't handle sets.
If the dict consumes a lot of memory because it has many items, you could try dump many smaller dicts and combine them with update:
mk_pickle.py
import pickle
CHUNKSIZE = 10 #You will make this number of course bigger
def mk_chunks(d, chunk_size):
chunk = {}
ctr = chunk_size
for key, val in d.items():
chunk[key] = val
ctr -= 1
if ctr == 0:
yield chunk
ctr = chunk_size
chunk = {}
if chunk:
yield chunk
def dump_big_dict(d):
with open("dump.pkl", "wb") as fout:
for chunk in mk_chunks(d, CHUNKSIZE):
pickle.dump(chunk, fout)
# For testing:
N = 1000
big_dict = dict()
for n in range(N):
big_dict[n] = "entry_" + str(n)
dump_big_dict(big_dict)
read_dict.py
import pickle
d= {}
with open("dump.pkl", "rb") as fin:
while True:
try:
small_dict = pickle.load(fin)
except EOFError:
break
d.update(small_dict)
You could try to generate and save it by parts in several files. I mean generate some key value pairs, store them in a file with pickle, and delete the dict from memory, then continue until all key value pair are exausted.
Then to load the whole dict use dict.update for each part, but that could also run in memory trouble, so instead you can make a class derived from dict which reads the corresponding file on demand according to the key (I mean overriding __getitiem__), something like this:
class Dict(dict):
def __init__(self):
super().__init__()
self.dict = {}
def __getitiem__(key):
if key in self.dict:
return self.dict[key]
else:
del self.dict # destroy the old before the new is created
self.dict = pickle.load(self.getFileName(key))
return self.dict[key]
filenames = ['key1', 'key1000', 'key2000']
def getFileName(key):
'''assuming the keys are separated in files by alphabetical order,
each file name taken from its first key'''
if key in filenames:
return key
else:
A = list(sorted(filenames + [key]))
return A[A.index(key) - 1]
Have in count that smaller dicts will be loaded faster, so you should experiment and find the right amount of files.
Also you can let reside in memory more than one dict according to memory resource.

Ordering data from returned pool.apply_async

I am currently writing a steganography program. I currently have the majority of the things I want working. However I want to rebuild my message using multiple processes, this obviously means the bits returned from the processes need to be ordered. So currently I have:
Ok im home now I will put some actual code up.
def message_unhide(data):
inp = cv.LoadImage(data[0]) #data[0] path to image
steg = LSBSteg(inp)
bin = steg.unhideBin()
return bin
#code in main program underneath
count = 0
f = open(files[2], "wb") #files[2] = name of file to rebuild
fat = open("fat.txt", 'w+')
inp = cv.LoadImage(files[0][count]) # files[0] directory path of images
steg = LSBSteg(inp)
bin = steg.unhideBin()
fat.write(bin)
fat.close()
fat = open("fat.txt", 'rb')
num_files = fat.read() #amount of images message hidden across
fat.close()
count += 1
pool = Pool(5)
binary = []
''' Just something I was testing
for x in range(int(num_files)):
binary.append(0)
print (binary)
'''
while count <= int(num_files):
data = [files[0][count], count]
#f.write(pool.apply(message_unhide, args=(data, ))) #
#binary[count - 1] = [pool.apply_async(message_unhide, (data, ))] #
#again just another few ways i was trying to overcome
binary = [pool.apply_async(message_unhide, (data, ))]
count += 1
pool.close()
pool.join()
bits = [b.get() for b in binary]
print(binary)
#for b in bits:
# f.write(b)
f.close()
This method just overwrites binary
binary = [pool.apply_async(message_unhide, (data, ))]
This method fills the entire binary, however I loose the .get()
binary[count - 1] = [pool.apply_async(message_unhide, (data, ))]
Sorry for sloppy coding I am certainly no expert.
Your main issue has to do with overwriting binary in the loop. You only have one item in the list because you're throwing away the previous list and recreating it each time. Instead, you should use append to modify the existing list:
binary.append(pool.apply_async(message_unhide, (data, )))
But you might have a much nicer time if you use pool.map instead of rolling your own version. It expects an iterable yielding a single argument to pass to the function on each iteration, and it returns a list of the return values. The map call blocks until all the values are ready, so you don't need any other synchronization logic.
Here's an implementation using a generator expression to build the data argument items on the fly. You could simplify things and just pass files[0] to map if you rewrote message_unhide to accept the filename as its argument directly, without indexing a list (you never use the index, it seems):
# no loop this time
binary = pool.map(message_unhide, ([file, i] for i, file in enumerate(files[0])))

Python compare every line in file with all others

I am implementing a statistical program and have created a performance bottleneck and was hoping that I could obtain some help from the community to possibly point me in the direction of optimization.
I am creating a set for each row in a file and finding the intersection of that set by comparing the set data of each row in the same file. I then use the size of that intersection to filter certain sets from the output. The problem is that I have a nested for loop (O(n2)) and the standard size of the files incoming into the program are just over 20,000 lines long. I have timed the algorithm and for under 500 lines it runs in about 20 minutes but for the big files it takes about 8 hours to finish.
I have 16GB of RAM at disposal and a significantly quick 4-core Intel i7 processor. I have noticed no significant difference in memory use by copying the list1 and using a second list for comparison instead of opening the file again(maybe this is because I have an SSD?). I thought the 'with open' mechanism reads/writes directly to the HDD which is slower but noticed no difference when using two lists. In fact, the program rarely uses more than 1GB of RAM during operation.
I am hoping that other people have used a certain datatype or maybe better understands multiprocessing in Python and that they might be able to help me speed things up. I appreciate any help and I hope my code isn't too poorly written.
import ast, sys, os, shutil
list1 = []
end = 0
filterValue = 3
# creates output file with filterValue appended to name
with open(arg2 + arg1 + "/filteredSets" + str(filterValue) , "w") as outfile:
with open(arg2 + arg1 + "/file", "r") as infile:
# create a list of sets of rows in file
for row in infile:
list1.append(set(ast.literal_eval(row)))
infile.seek(0)
for row in infile:
# if file only has one row, no comparisons need to be made
if not(len(list1) == 1):
# get the first set from the list and...
set1 = set(ast.literal_eval(row))
# ...find the intersection of every other set in the file
for i in range(0, len(list1)):
# don't compare the set with itself
if not(pos == i):
set2 = list1[i]
set3 = set1.intersection(set2)
# if the two sets have less than 3 items in common
if(len(set3) < filterValue):
# and you've reached the end of the file
if(i == len(list1)):
# append the row in outfile
outfile.write(row)
# increase position in infile
pos += 1
else:
break
else:
outfile.write(row)
Sample input would be a file with this format:
[userID1, userID2, userID3]
[userID5, userID3, userID9]
[userID10, userID2, userID3, userID1]
[userID8, userID20, userID11, userID1]
The output file if this were the input file would be:
[userID5, userID3, userID9]
[userID8, userID20, userID11, userID1]
...because the two sets removed contained three or more of the same user id's.
This answer is not about how to split code in functions, name variables etc. It's about faster algorithm in terms of complexity.
I'd use a dictionary. Will not write exact code, you can do it yourself.
Sets = dict()
for rowID, row in enumerate(Rows):
for userID in row:
if Sets.get(userID) is None:
Sets[userID] = set()
Sets[userID].add(rowID)
So, now we have a dictionary, which can be used to quickly obtain rownumbers of rows containing given userID.
BadRows = set()
for rowID, row in enumerate(Rows):
Intersections = dict()
for userID in row:
for rowID_cmp in Sets[userID]:
if rowID_cmp != rowID:
Intersections[rowID_cmp] = Intersections.get(rowID_cmp, 0) + 1
# Now Intersections contains info about how many "times"
# row numbered rowID_cmp intersectcs current row
filteredOut = False
for rowID_cmp in Intersections:
if Intersections[rowID_cmp] >= filterValue:
BadRows.add(rowID_cmp)
filteredOut = True
if filteredOut:
BadRows.add(rowID)
Having rownumbers of all filtered out rows saved to BadRows, now we do iteration one last time:
for rowID, row in enumerate(Rows):
if rowID not in BadRows:
# output row
This works in 3 scans and in O(nlogn) time. Maybe you'd have to rework iterating Rows array, because it's a file in your case, but doesn't really change much.
Not sure about python syntax and details, but you get the idea behind my code.
First of all, please pack your the code into functions which do one thing well.
def get_data(*args):
# get the data.
def find_intersections_sets(list1, list2):
# do the intersections part.
def loop_over_some_result(result):
# insert assertions so that you don't end up looping in infinity:
assert result is not None
...
def myfunc(*args):
source1, source2 = args
L1, L2 = get_data(source1), get_data(source2)
intersects = find_intersections_sets(L1,L2)
...
if __name__ == "__main__":
myfunc()
then you can easily profile the code using:
if __name__ == "__main__":
import cProfile
cProfile.run('myfunc()')
which gives you invaluable insight into your code behaviour and allows you to track down logical bugs. For more on cProfile, see How can you profile a python script?
An option to track down a logical flaw (we're all humans, right?) is to user a timeout function in a decorate like this (python2) or this (python3):
Hereby myfunc can be changed to:
def get_data(*args):
# get the data.
def find_intersections_sets(list1, list2):
# do the intersections part.
def myfunc(*args):
source1, source2 = args
L1, L2 = get_data(source1), get_data(source2)
#timeout(10) # seconds <---- the clever bit!
intersects = find_intersections_sets(L1,L2)
...
...where the timeout operation will raise an error if it takes too long.
Here is my best guess:
import ast
def get_data(filename):
with open(filename, 'r') as fi:
data = fi.readlines()
return data
def get_ast_set(line):
return set(ast.literal_eval(line))
def less_than_x_in_common(set1, set2, limit=3):
if len(set1.intersection(set2)) < limit:
return True
else:
return False
def check_infile(datafile, savefile, filtervalue=3):
list1 = [get_ast_set(row) for row in get_data(datafile)]
outlist = []
for row in list1:
if any([less_than_x_in_common(set(row), set(i), limit=filtervalue) for i in outlist]):
outlist.append(row)
with open(savefile, 'w') as fo:
fo.writelines(outlist)
if __name__ == "__main__":
datafile = str(arg2 + arg1 + "/file")
savefile = str(arg2 + arg1 + "/filteredSets" + str(filterValue))
check_infile(datafile, savefile)

Python : Why this scripts gets very slow after some point of time?

I am running the below script to extract ip addresses from file f for domains in file g. It is worth to mention that they are 11 files in the path and each have about 800 million lines (each file f). In this script I am loading file g in a dictionary d in the memory and then I am comparing lines of file f, with the items in the dictionary d, if there, I check if the bl_date in d is between dates in f, then write it to another dictionary dns_dic. Here is how my script looks like:
path = '/data/data/2014*.M.mtbl.A.1'
def process_file(file):
start = time()
dns_dic=defaultdict(set)
d = defaultdict(set)
filename =file.split('/')[-1]
print(file)
g = open ('/data/data/ap2014-2dom.txt','r')
for line in g:
line = line.strip('\n')
domain, bl_date= line.split('|')
bl_date = int(bl_date)
if domain in d:
d[domain].add(bl_date)
else:
d[domain] = set([bl_date])
print("loaded APWG in %.fs" % (time()-start))
stat_d, stat_dt = 0, 0
f = open(file,'r')
with open ('/data/data/overlap_last_%s.txt' % filename,'a') as w:
for n, line in enumerate(f):
line=line.strip('')
try:
jdata = json.loads(line)
dom = jdata.get('bailiwick')[:-1]
except:
pass
if dom in d:
stat_d += 1
for bl_date in d.get(dom):
if jdata.get('time_first') <= bl_date <= jdata.get('time_last'):
stat_dt += 1
dns_dic[dom].update(jdata.get('rdata', []))
for domain,ips in dns_dic.items():
for ip in ips:
w.write('%s|%s\n' % (domain,ip))
w.flush()
if __name__ == "__main__":
files_list = glob(path)
cores = 11
print("Using %d cores" % cores)
pp = Pool(processes=cores)
pp.imap_unordered(process_file, files_list)
pp.close()
pp.join()
Here is file f:
{"bailiwick":"ou.ac.","time_last": 1493687431,"time_first": 1393687431,"rdata": ["50.21.180.100"]}
{"bailiwick": "ow.ac.","time_last": 1395267335,"time_first": 1395267335,"rdata": ["50.21.180.100"]}
{"bailiwick":"ox.ac.","time_last": 1399742959,"time_first": 1393639617,"rdata": ["65.254.35.122", "216.180.224.42"]}
Here is file g:
ou.ac|1407101455
ox.ac|1399553282
ox.ac|1300084462
ox.ac|1400243222
Expected result:
ou.ac|["50.21.180.100"]
ox.ac|["65.254.35.122", "216.180.224.42"]
Can somebody help me find out why at some point of time the script become really slow although memory usage is all the time about 400 MG.
Even though it doesn't change the overall computational complexity, I would start with avoiding redundant dict lookup operations. For instance, instead of
if domain in d:
d[domain].add(bl_date)
else:
d[domain] = set([bl_date])
you might want to do
d.setdefault(domain, set()).add(bl_date)
in order to perform one lookup instead of two. But actually, it seems like a set is not the perfect choice for storing a domain's access timestamps. If you used lists instead, you could sort each domain's timestamps before you start matching them to the session data from f. That way, you would simply compare each session's fields time_last and time_first to the first and last element in the domain's timestamp list to determine if the IP addresses are to be put into dns_dic[dom].
In general, you are doing a lot of unnecessary work in the for bl_date in d.get(dom): loop. At least, at the first bl_date that lies between the time_last and time_first fields, you should terminate the loop. Depending on the length of g, this might be your bottleneck.

Upper memory limit?

Is there a limit to memory for python? I've been using a python script to calculate the average values from a file which is a minimum of 150mb big.
Depending on the size of the file I sometimes encounter a MemoryError.
Can more memory be assigned to the python so I don't encounter the error?
EDIT: Code now below
NOTE: The file sizes can vary greatly (up to 20GB) the minimum size of the a file is 150mb
file_A1_B1 = open("A1_B1_100000.txt", "r")
file_A2_B2 = open("A2_B2_100000.txt", "r")
file_A1_B2 = open("A1_B2_100000.txt", "r")
file_A2_B1 = open("A2_B1_100000.txt", "r")
file_write = open ("average_generations.txt", "w")
mutation_average = open("mutation_average", "w")
files = [file_A2_B2,file_A2_B2,file_A1_B2,file_A2_B1]
for u in files:
line = u.readlines()
list_of_lines = []
for i in line:
values = i.split('\t')
list_of_lines.append(values)
count = 0
for j in list_of_lines:
count +=1
for k in range(0,count):
list_of_lines[k].remove('\n')
length = len(list_of_lines[0])
print_counter = 4
for o in range(0,length):
total = 0
for p in range(0,count):
number = float(list_of_lines[p][o])
total = total + number
average = total/count
print average
if print_counter == 4:
file_write.write(str(average)+'\n')
print_counter = 0
print_counter +=1
file_write.write('\n')
(This is my third answer because I misunderstood what your code was doing in my original, and then made a small but crucial mistake in my second—hopefully three's a charm.
Edits: Since this seems to be a popular answer, I've made a few modifications to improve its implementation over the years—most not too major. This is so if folks use it as template, it will provide an even better basis.
As others have pointed out, your MemoryError problem is most likely because you're attempting to read the entire contents of huge files into memory and then, on top of that, effectively doubling the amount of memory needed by creating a list of lists of the string values from each line.
Python's memory limits are determined by how much physical ram and virtual memory disk space your computer and operating system have available. Even if you don't use it all up and your program "works", using it may be impractical because it takes too long.
Anyway, the most obvious way to avoid that is to process each file a single line at a time, which means you have to do the processing incrementally.
To accomplish this, a list of running totals for each of the fields is kept. When that is finished, the average value of each field can be calculated by dividing the corresponding total value by the count of total lines read. Once that is done, these averages can be printed out and some written to one of the output files. I've also made a conscious effort to use very descriptive variable names to try to make it understandable.
try:
from itertools import izip_longest
except ImportError: # Python 3
from itertools import zip_longest as izip_longest
GROUP_SIZE = 4
input_file_names = ["A1_B1_100000.txt", "A2_B2_100000.txt", "A1_B2_100000.txt",
"A2_B1_100000.txt"]
file_write = open("average_generations.txt", 'w')
mutation_average = open("mutation_average", 'w') # left in, but nothing written
for file_name in input_file_names:
with open(file_name, 'r') as input_file:
print('processing file: {}'.format(file_name))
totals = []
for count, fields in enumerate((line.split('\t') for line in input_file), 1):
totals = [sum(values) for values in
izip_longest(totals, map(float, fields), fillvalue=0)]
averages = [total/count for total in totals]
for print_counter, average in enumerate(averages):
print(' {:9.4f}'.format(average))
if print_counter % GROUP_SIZE == 0:
file_write.write(str(average)+'\n')
file_write.write('\n')
file_write.close()
mutation_average.close()
You're reading the entire file into memory (line = u.readlines()) which will fail of course if the file is too large (and you say that some are up to 20 GB), so that's your problem right there.
Better iterate over each line:
for current_line in u:
do_something_with(current_line)
is the recommended approach.
Later in your script, you're doing some very strange things like first counting all the items in a list, then constructing a for loop over the range of that count. Why not iterate over the list directly? What is the purpose of your script? I have the impression that this could be done much easier.
This is one of the advantages of high-level languages like Python (as opposed to C where you do have to do these housekeeping tasks yourself): Allow Python to handle iteration for you, and only collect in memory what you actually need to have in memory at any given time.
Also, as it seems that you're processing TSV files (tabulator-separated values), you should take a look at the csv module which will handle all the splitting, removing of \ns etc. for you.
Python can use all memory available to its environment. My simple "memory test" crashes on ActiveState Python 2.6 after using about
1959167 [MiB]
On jython 2.5 it crashes earlier:
239000 [MiB]
probably I can configure Jython to use more memory (it uses limits from JVM)
Test app:
import sys
sl = []
i = 0
# some magic 1024 - overhead of string object
fill_size = 1024
if sys.version.startswith('2.7'):
fill_size = 1003
if sys.version.startswith('3'):
fill_size = 497
print(fill_size)
MiB = 0
while True:
s = str(i).zfill(fill_size)
sl.append(s)
if i == 0:
try:
sys.stderr.write('size of one string %d\n' % (sys.getsizeof(s)))
except AttributeError:
pass
i += 1
if i % 1024 == 0:
MiB += 1
if MiB % 25 == 0:
sys.stderr.write('%d [MiB]\n' % (MiB))
In your app you read whole file at once. For such big files you should read the line by line.
No, there's no Python-specific limit on the memory usage of a Python application. I regularly work with Python applications that may use several gigabytes of memory. Most likely, your script actually uses more memory than available on the machine you're running on.
In that case, the solution is to rewrite the script to be more memory efficient, or to add more physical memory if the script is already optimized to minimize memory usage.
Edit:
Your script reads the entire contents of your files into memory at once (line = u.readlines()). Since you're processing files up to 20 GB in size, you're going to get memory errors with that approach unless you have huge amounts of memory in your machine.
A better approach would be to read the files one line at a time:
for u in files:
for line in u: # This will iterate over each line in the file
# Read values from the line, do necessary calculations
Not only are you reading the whole of each file into memory, but also you laboriously replicate the information in a table called list_of_lines.
You have a secondary problem: your choices of variable names severely obfuscate what you are doing.
Here is your script rewritten with the readlines() caper removed and with meaningful names:
file_A1_B1 = open("A1_B1_100000.txt", "r")
file_A2_B2 = open("A2_B2_100000.txt", "r")
file_A1_B2 = open("A1_B2_100000.txt", "r")
file_A2_B1 = open("A2_B1_100000.txt", "r")
file_write = open ("average_generations.txt", "w")
mutation_average = open("mutation_average", "w") # not used
files = [file_A2_B2,file_A2_B2,file_A1_B2,file_A2_B1]
for afile in files:
table = []
for aline in afile:
values = aline.split('\t')
values.remove('\n') # why?
table.append(values)
row_count = len(table)
row0length = len(table[0])
print_counter = 4
for column_index in range(row0length):
column_total = 0
for row_index in range(row_count):
number = float(table[row_index][column_index])
column_total = column_total + number
column_average = column_total/row_count
print column_average
if print_counter == 4:
file_write.write(str(column_average)+'\n')
print_counter = 0
print_counter +=1
file_write.write('\n')
It rapidly becomes apparent that (1) you are calculating column averages (2) the obfuscation led some others to think you were calculating row averages.
As you are calculating column averages, no output is required until the end of each file, and the amount of extra memory actually required is proportional to the number of columns.
Here is a revised version of the outer loop code:
for afile in files:
for row_count, aline in enumerate(afile, start=1):
values = aline.split('\t')
values.remove('\n') # why?
fvalues = map(float, values)
if row_count == 1:
row0length = len(fvalues)
column_index_range = range(row0length)
column_totals = fvalues
else:
assert len(fvalues) == row0length
for column_index in column_index_range:
column_totals[column_index] += fvalues[column_index]
print_counter = 4
for column_index in column_index_range:
column_average = column_totals[column_index] / row_count
print column_average
if print_counter == 4:
file_write.write(str(column_average)+'\n')
print_counter = 0
print_counter +=1

Categories