python script taking long time to run - python

I am writing a script a in python to parse ldap logs and then get the number of searches/binds by each user. I was testing my code on sample files and for smaller files till size of 5-10MB it runs quick and completes within a 1 minute on my local PC. However when i ran the script on a file worth 18M having around 150000 lines in it, it takes around 5 minutes, i want to run this script on file sizes of 100M and maybe be 5-6 files in each run so that means script has to parse almost of 600-700M of data in each run. But i suppose it would take long time to run, so i would need some advise from you guys if my below code can be fine tuned for better performance in terms of execution time.
import os,re,datetime
from collections import defaultdict
d=defaultdict(list)
k=defaultdict(list)
start_time=datetime.datetime.now()
fh = open("C:\\Rohit\\ECD Utilization Script - Copy\\logdir\\access","r").read()
pat=re.compile(' BIND REQ .*conn=([\d]*).*dn=(.*")')
srchStr='\n'.join(re.findall(r' SEARCH REQ .*',fh))
bindlist=re.findall(pat,fh)
for entry in bindlist:
d[entry[-1].split(",")[0]].append(entry[0])
for key in d:
for con in d[key]:
count = re.findall(con,srchStr)
k[key].append((con,len(count)))
#
for key in k:
print("Number of searches by ",key, " : ",sum([i[1] for i in k[key]]))
for key in d:
print("No of bind ",key," = ",len(d[key]))
end_time=datetime.datetime.now()
print("Total time taken - {}".format(end_time-start_time))

You are doing several scans on entire file on the line
count = re.findall('SEARCH REQ.*'+conid,fh1)
Avoid this. This is your major problem. Get all conids in a list and iterate on file again and list while your inner loop should consist of conids. Bring it out of outer loop. You will be doing two scans of file.
Also since it is plain Python run with PyPy for faster runs.
You can do this better with a FSM and by spending a bit more RAM. This is a hint and you will have to do your FSM yourself.
Edit 1: This is the version of script I wrote after seeing the log file. Please correct if there is any mistake:
#!/usr/bin/env python
import sys
import re
def parse(filepath):
d = {}
regex1 = re.compile(r'(.*)?BIND\sREQ(.*)uid=(\w+)')
regex2 = re.compile(r'(.*)?SEARCH\sREQ(.*)uid=(\w+)')
with open(filepath, 'r') as f:
for l in f:
m = re.search(regex1, l)
if m:
# print (m.group(3))
uid = m.group(3)
if uid in d:
d[uid]['bind_count'] += 1
else:
d[uid] = {}
d[uid]['bind_count'] = 1
d[uid]['search_count'] = 0
m = re.search(regex2, l)
if m:
# print (m.group(3))
uid = m.group(3)
if uid in d:
d[uid]['search_count'] += 1
else:
d[uid] = {}
d[uid]['search_count'] = 1
d[uid]['bind_count'] = 0
for k in d:
print('user id = ' + k, 'Bind count = ' + str(d[k]['bind_count']), 'Search count = ' + str(d[k]['search_count']))
def process_args():
if sys.argv < 2:
print('Usage: parse_ldap_log.py log_filepath')
exit(1)
if __name__ == '__main__':
process_args()
parse(sys.argv[1])
Thank the Gods that it was not complicated enough to warrant an FSM.

Use itertools library instead of so many loops.

Your script has a quadratic complexity: for each line in the file you are making a read again to match the log entry.
My suggestion is to read the file only one time and counting the occurrences of the needed entry (the one matching (" BIND REQ ")).

I was able to solve my problem with below code.
import os,re,datetime
from collections import defaultdict
start_time=datetime.datetime.now()
bind_count=defaultdict(int)
search_conn=defaultdict(int)
bind_conn=defaultdict(str)
j=defaultdict(int)
fh = open("C:\\access","r")
total_searches=0
total_binds=0
for line in fh:
reg1=re.search(r' BIND REQ .*conn=(\d+).*dn=(.*")', line)
reg2=re.search(r' SEARCH REQ .*conn=(\d+).*', line)
if reg1:
total_binds+=1
uid,con=reg1.group(2,1)
bind_count[uid]=bind_count[uid]+1
bind_conn[con]=uid
if reg2:
total_searches+=1
skey=reg2.group(1)
search_conn[skey] = search_conn[skey]+1
for conid in search_conn:
if conid in bind_conn:
new_key=bind_conn[conid]
j[new_key]=j[new_key]+search_conn[conid]
for k,v in bind_count.items():
print(k," = ",v)
print("*"*80)
for k,v in j.items():
print(k,"-->",v)
fh.close()
del search_conn
del bind_conn
end_time=datetime.datetime.now()
print("Total time taken - {}".format(end_time-start_time))

Related

Is the for loop in my code the speed bottleneck?

The following code looks through 2500 markdown files with a total of 76475 lines, to check each one for the presence of two strings.
#!/usr/bin/env python3
# encoding: utf-8
import re
import os
zettelkasten = '/Users/will/Dropbox/zettelkasten'
def zsearch(s, *args):
for x in args:
r = (r"(?=.* " + x + ")")
p = re.search(r, s, re.IGNORECASE)
if p is None:
return None
return s
for filename in os.listdir(zettelkasten):
if filename.endswith('.md'):
with open(os.path.join(zettelkasten, filename),"r") as fp:
for line in fp:
result_line = zsearch(line, "COVID", "vaccine")
if result_line != None:
UUID = filename[-15:-3]
print(f'›[[{UUID}]] OR', end=" ")
This correctly gives output like:
›[[202202121717]] OR ›[[202003311814]] OR
, but it takes almost two seconds to run on my machine, which I think is much too slow. What, if anything, can be done to make it faster?
The main bottleneck is the regular expressions you're building.
If we print(f"{r=}") inside the zsearch function:
>>> zsearch("line line covid line", "COVID", "vaccine")
r='(?=.* COVID)'
r='(?=.* vaccine)'
The (?=.*) lookahead is what is causing the slowdown - and it's also not needed.
You can achieve the same result by searching for:
r=' COVID'
r=' vaccine'

Drawing multiple sequences from 1 file, based on shared fields in another file

I'm trying to run a python script to draw sequences from a separate file (merged.fas), in respect to a list (gene_fams_eggnog.txt) I have as output from another program.
The code is as follows:
from Bio import SeqIO
import os, sys, re
from collections import defaultdict
sequences = "merged.fas"
all_seqs = SeqIO.index(sequences, "fasta")
gene_fams = defaultdict(list)
gene_fams_file = open("gene_fams_eggnog.txt")
for line in gene_fams_file:
fields = re.split("\t", line.rstrip())
gene_fams[fields[0]].append[fields[1]]
for fam in gene_fams.keys():
output_filename = str(fam) + ".fasta"
outh = open(output_filename, "w")
for id in gene_fams[fam]:
if id in all_seqs:
outh.write(">" + all_seqs[id].description + "\n" + str(all_seqs[id].seq) + "\n")
else:
print "Uh oh! Sequence with ID " + str(id) + " is not in the all_seqs file!"
quit()
outh.close()
The list looks like this:
1 Saccharomycescerevisiae_DAA09367.1
1 bieneu_EED42827.1
1 Asp_XP_749186.1
1 Mag_XP_003717339.1
2 Mag_XP_003716586.1
2 Mag_XP_003709453.1
3 Asp_XP_749329.1
The field 0 denotes a grouping based by a similarity between the sequences. The script was meant to take all the sequences from merged.fas that correspond to the code in the field 1 and write them into a file base on field 0.
So in the case of the portion of the list I have shown, all the sequences that have a 1 in field 0 (Saccharomycescerevisiae_DAA09367.1, bieneu_EED42827.1, Asp_XP_749186.1, Mag_XP_003717339.1) would have been written into a file called 1.fasta. This should continue from 2.fasta-however many groups there are.
So this has worked, however it doesn't include all the sequences that are in the group, it'll only include the last one to be listed as a part of that group. Using my example above, I'd only have a file (1.fasta) with one sequence (Mag_XP_003717339.1), instead of all four.
Any and all help is appreciated,
Thanks,
JT
Although I didn't spot the cause of the issue you complained about, I'm surprised your code runs at all with this error:
gene_fams[fields[0]].append[fields[1]]
i.e. append[...] instead of append(...). But perhaps that's also, "not there in the actual script I'm running". I rewrote your script below, and it works fine for me. One issue was your use of the variable name id which is a Python builtin. You'll see I go to an extreme to avoid such errors:
from Bio import SeqIO
from collections import defaultdict
SEQUENCE_FILE_NAME = "merged.fas"
FAMILY_FILE_NAME = "gene_families_eggnog.txt"
all_sequences = SeqIO.index(SEQUENCE_FILE_NAME, "fasta")
gene_families = defaultdict(list)
with open(FAMILY_FILE_NAME) as gene_families_file:
for line in gene_families_file:
family_id, gene_id = line.rstrip().split()
gene_families[family_id].append(gene_id)
for family_id, gene_ids in gene_families.items():
output_filename = family_id + ".fasta"
with open(output_filename, "w") as output:
for gene_id in gene_ids:
assert gene_id in all_sequences, "Sequence {} is not in {}!".format(gene_id, SEQUENCE_FILE_NAME)
output.write(all_sequences[gene_id].format("fasta"))

Python text file manipulation, add delta time to each line in seconds

I am a beginner at python and trying to solve the below:
I have a text file that each line starts like this:
<18:12:53.972>
<18:12:53.975>
<18:12:53.975>
<18:12:53.975>
<18:12:54.008>
etc
Instead of above I would like to add the elapsed time in seconds in the beginning of each line, but only if the line starts with '<'.
<0.0><18:12:53.972>
<0.003><18:12:53.975>
<0.003><18:12:53.975>
<0.003><18:12:53.975>
<0.036><18:12:54.008>
etc
Here comes a try :-)
#import datetime
from datetime import timedelta
from sys import argv
#get filename as argument
run, input, output = argv
#get number of lines for textfile
nr_of_lines = sum(1 for line in open(input))
#read in file
f = open(input)
lines = f.readlines()
f.close
#declarations
do_once = True
time = []
delta_to_list = []
i = 0
#read in and translate all timevalues from logfile to delta time.
while i < nr_of_lines:
i += 1
if lines[i-1].startswith('<'):
get_lines = lines[i-1] #get one line
get_time = (get_lines[1:13]) #get the time from that line
h = int(get_time[0:2])
m = int(get_time[3:5])
s = int(get_time[6:8])
ms = int(get_time[9:13])
time = timedelta(hours = h, minutes = m, seconds = s, microseconds = 0, milliseconds = ms)
sec_time = time.seconds + (ms/1000)
if do_once:
start_value = sec_time
do_once = False
delta = float("{0:.3f}".format(sec_time - start_value))
delta_to_list.append(delta)
#write back values to logfile.
k=0
s = str(delta_to_list[k])
with open(output, 'w') as out_file:
with open(input, 'r') as in_file:
for line in in_file:
if line.startswith('<'):
s = str(delta_to_list[k])
out_file.write("<" + s + ">" + line)
else:
out_file.write(line)
k += 1
As it is now, it works fine, but the last two lines is not written to the new file. It says: "s = str(delta_to_list[k]) IndexError: list index out of range.
At first I would like to get my code working, and second a suggestions for improvements. Thank you!
First point: never read a full file in memory when you don't have too (and specially when you don't know whether you have enough free memory).
Second point: learn to use python's for loop and iteration protocol. The way to iterate over a list and any other iterable is:
for item in some_iterable:
do_something_with(item)
This avoids messing with indexes and getting it wrong ;)
One of the nice things with Python file objects is that they actually are iterables, so to iterate over a file lines, the simplest way is:
for line in my_opened_file:
do_something_with(line)
Here's a simple yet working and mostly pythonic (nb: python 2.7.x) way to write your program:
# -*- coding: utf-8 -*-
import os
import sys
import datetime
import re
import tempfile
def totime(timestr):
""" returns a datetime object for a "HH:MM:SS" string """
# we actually need datetime objects for substraction
# so let's use the first available bogus date
# notes:
# `timestr.split(":")` will returns a list `["MM", "HH", "SS]`
# `map(int, ...)` will apply `int()` on each item
# of the sequence (second argument) and return
# the resulting list, ie
# `map(int, "01", "02", "03")` => `[1, 2, 3]`
return datetime.datetime(1900, 1, 1, *map(int, timestr.split(":")))
def process(instream, outstream):
# some may consider that regexps are not that pythonic
# but as far as I'm concerned it seems like a sensible
# use case.
time_re = re.compile("^<(?P<time>\d{2}:\d{2}:\d{2})\.")
first = None
# iterate over our input stream lines
for line in instream:
# should we handle this line at all ?
# (nb a bit redundant but faster than re.match)
if not line.startswith("<"):
continue
# looks like a candidate, let's try and
# extract the 'time' value from it
match = time_re.search(line)
if not match:
# starts with '<' BUT not followed by 'HH:MM:SS.' ?
# unexpected from the sample source but well, we
# can't do much about it either
continue
# retrieve the captured "time" (HH:MM:SS) part
current = totime(match.group("time"))
# store the first occurrence so we can
# compute the elapsed time
if first is None:
first = current
# `(current - first)` yields a `timedelta` object
# we now just have to retrieve it's `seconds` attribute
seconds = (current - first).seconds
# inject the seconds before the line
# and write the whole thing tou our output stream
newline = "{}{}".format(seconds, line)
outstream.write(newline)
def usage(err=None):
if err:
print >> sys.stderr, err
print >> sys.stderr, "usage: python retime.py <filename>"
# unix standards process exit codes
return 2 if err else 0
def main(*args):
# our entry point...
# gets the source filename, process it
# (storing the results in a temporary file),
# and if everything's ok replace the source file
# by the temporary file.
try:
sourcename = args[0]
except IndexError as e:
return usage("missing <filename> argument")
# `delete=False` prevents the tmp file to be
# deleted on closing.
dest = tempfile.NamedTemporaryFile(delete=False)
with open(sourcename) as source:
try:
process(source, dest)
except Exception as e:
dest.close()
os.remove(dest)
raise
# ok done
dest.close()
os.rename(dest.name, sourcename)
return 0
if __name__ == "__main__":
# only execute main() if we are called as a script
# (so we can also import this file as a module)
sys.exit(main(*sys.argv[1:]))
It gives the expected results on your sample data (running on linux - but it should be ok on any other supported OS afaict).
Note that I wrote it to work like your original code (replace the source file with the processed one), but if it were my code I would instead either explicitely provide a destination filename or as a default write to sys.stdout instead (and redirect stdout to another file). The process function can deal with any of those solution FWIW - it's only a matter of a couple edits in main().

Python script for parsing ldap logs for getting Searcches/Binds [duplicate]

I am writing a script a in python to parse ldap logs and then get the number of searches/binds by each user. I was testing my code on sample files and for smaller files till size of 5-10MB it runs quick and completes within a 1 minute on my local PC. However when i ran the script on a file worth 18M having around 150000 lines in it, it takes around 5 minutes, i want to run this script on file sizes of 100M and maybe be 5-6 files in each run so that means script has to parse almost of 600-700M of data in each run. But i suppose it would take long time to run, so i would need some advise from you guys if my below code can be fine tuned for better performance in terms of execution time.
import os,re,datetime
from collections import defaultdict
d=defaultdict(list)
k=defaultdict(list)
start_time=datetime.datetime.now()
fh = open("C:\\Rohit\\ECD Utilization Script - Copy\\logdir\\access","r").read()
pat=re.compile(' BIND REQ .*conn=([\d]*).*dn=(.*")')
srchStr='\n'.join(re.findall(r' SEARCH REQ .*',fh))
bindlist=re.findall(pat,fh)
for entry in bindlist:
d[entry[-1].split(",")[0]].append(entry[0])
for key in d:
for con in d[key]:
count = re.findall(con,srchStr)
k[key].append((con,len(count)))
#
for key in k:
print("Number of searches by ",key, " : ",sum([i[1] for i in k[key]]))
for key in d:
print("No of bind ",key," = ",len(d[key]))
end_time=datetime.datetime.now()
print("Total time taken - {}".format(end_time-start_time))
You are doing several scans on entire file on the line
count = re.findall('SEARCH REQ.*'+conid,fh1)
Avoid this. This is your major problem. Get all conids in a list and iterate on file again and list while your inner loop should consist of conids. Bring it out of outer loop. You will be doing two scans of file.
Also since it is plain Python run with PyPy for faster runs.
You can do this better with a FSM and by spending a bit more RAM. This is a hint and you will have to do your FSM yourself.
Edit 1: This is the version of script I wrote after seeing the log file. Please correct if there is any mistake:
#!/usr/bin/env python
import sys
import re
def parse(filepath):
d = {}
regex1 = re.compile(r'(.*)?BIND\sREQ(.*)uid=(\w+)')
regex2 = re.compile(r'(.*)?SEARCH\sREQ(.*)uid=(\w+)')
with open(filepath, 'r') as f:
for l in f:
m = re.search(regex1, l)
if m:
# print (m.group(3))
uid = m.group(3)
if uid in d:
d[uid]['bind_count'] += 1
else:
d[uid] = {}
d[uid]['bind_count'] = 1
d[uid]['search_count'] = 0
m = re.search(regex2, l)
if m:
# print (m.group(3))
uid = m.group(3)
if uid in d:
d[uid]['search_count'] += 1
else:
d[uid] = {}
d[uid]['search_count'] = 1
d[uid]['bind_count'] = 0
for k in d:
print('user id = ' + k, 'Bind count = ' + str(d[k]['bind_count']), 'Search count = ' + str(d[k]['search_count']))
def process_args():
if sys.argv < 2:
print('Usage: parse_ldap_log.py log_filepath')
exit(1)
if __name__ == '__main__':
process_args()
parse(sys.argv[1])
Thank the Gods that it was not complicated enough to warrant an FSM.
Use itertools library instead of so many loops.
Your script has a quadratic complexity: for each line in the file you are making a read again to match the log entry.
My suggestion is to read the file only one time and counting the occurrences of the needed entry (the one matching (" BIND REQ ")).
I was able to solve my problem with below code.
import os,re,datetime
from collections import defaultdict
start_time=datetime.datetime.now()
bind_count=defaultdict(int)
search_conn=defaultdict(int)
bind_conn=defaultdict(str)
j=defaultdict(int)
fh = open("C:\\access","r")
total_searches=0
total_binds=0
for line in fh:
reg1=re.search(r' BIND REQ .*conn=(\d+).*dn=(.*")', line)
reg2=re.search(r' SEARCH REQ .*conn=(\d+).*', line)
if reg1:
total_binds+=1
uid,con=reg1.group(2,1)
bind_count[uid]=bind_count[uid]+1
bind_conn[con]=uid
if reg2:
total_searches+=1
skey=reg2.group(1)
search_conn[skey] = search_conn[skey]+1
for conid in search_conn:
if conid in bind_conn:
new_key=bind_conn[conid]
j[new_key]=j[new_key]+search_conn[conid]
for k,v in bind_count.items():
print(k," = ",v)
print("*"*80)
for k,v in j.items():
print(k,"-->",v)
fh.close()
del search_conn
del bind_conn
end_time=datetime.datetime.now()
print("Total time taken - {}".format(end_time-start_time))

how to create an index to parse big text file

I have two files A and B in FASTQ format, which are basically several hundred million lines of text organized in groups of 4 lines starting with an # as follows:
#120412_SN549_0058_BD0UMKACXX:5:1101:1156:2031#0/1
GCCAATGGCATGGTTTCATGGATGTTAGCAGAAGACATGAGACTTCTGGGACAGGAGCAAAACACTTCATGATGGCAAAAGATCGGAAGAGCACACGTCTGAACTCN
+120412_SN549_0058_BD0UMKACXX:5:1101:1156:2031#0/1
bbbeee_[_ccdccegeeghhiiehghifhfhhhiiihhfhghigbeffeefddd]aegggdffhfhhihbghhdfffgdb^beeabcccabbcb`ccacacbbccB
I need to compare the
5:1101:1156:2031#0/
part between files A and B and write the groups of 4 lines in file B that matched to a new file. I got a piece of code in python that does that, but only works for small files as it parses through the entire #-lines of file B for every #-line in file A, and both files contain hundreds of millions of lines.
Someone suggested that I should create an index for file B; I have googled around without success and would be very grateful if someone could point out how to do this or let me know of a tutorial so I can learn. Thanks.
==EDIT==
In theory each group of 4 lines should only exist once in each file. Would it increase the speed enough if breaking the parsing after each match or do I need a different algorithm altogether?
An index is just a shortened version of the information you are working with. In this case, you will want the "key" - the text between the first colon(':') on the #-line and the final slash('/') near the end - as well as some kind of value.
Since the "value" in this case is the entire contents of the 4-line block, and since our index is going to store a separate entry for each block, we would be storing the entire file in memory if we used the actual value in the index.
Instead, let's use the file position of the beginning of the 4-line block. That way, you can move to that file position, print 4 lines, and stop. Total cost is the 4 or 8 or however many bytes it takes to store an integer file position, instead of however-many bytes of actual genome data.
Here is some code that does the job, but also does a lot of validation and checking. You might want to throw stuff away that you don't use.
import sys
def build_index(path):
index = {}
for key, pos, data in parse_fastq(path):
if key not in index:
# Don't overwrite duplicates- use first occurrence.
index[key] = pos
return index
def error(s):
sys.stderr.write(s + "\n")
def extract_key(s):
# This much is fairly constant:
assert(s.startswith('#'))
(machine_name, rest) = s.split(':', 1)
# Per wikipedia, this changes in different variants of FASTQ format:
(key, rest) = rest.split('/', 1)
return key
def parse_fastq(path):
"""
Parse the 4-line FASTQ groups in path.
Validate the contents, somewhat.
"""
f = open(path)
i = 0
# Note: iterating a file is incompatible with fh.tell(). Fake it.
pos = offset = 0
for line in f:
offset += len(line)
lx = i % 4
i += 1
if lx == 0: # #machine: key
key = extract_key(line)
len1 = len2 = 0
data = [ line ]
elif lx == 1:
data.append(line)
len1 = len(line)
elif lx == 2: # +machine: key or something
assert(line.startswith('+'))
data.append(line)
else: # lx == 3 : quality data
data.append(line)
len2 = len(line)
if len2 != len1:
error("Data length mismatch at line "
+ str(i-2)
+ " (len: " + str(len1) + ") and line "
+ str(i)
+ " (len: " + str(len2) + ")\n")
#print "Yielding #%i: %s" % (pos, key)
yield key, pos, data
pos = offset
if i % 4 != 0:
error("EOF encountered in mid-record at line " + str(i));
def match_records(path, index):
results = []
for key, pos, d in parse_fastq(path):
if key in index:
# found a match!
results.append(key)
return results
def write_matches(inpath, matches, outpath):
rf = open(inpath)
wf = open(outpath, 'w')
for m in matches:
rf.seek(m)
wf.write(rf.readline())
wf.write(rf.readline())
wf.write(rf.readline())
wf.write(rf.readline())
rf.close()
wf.close()
#import pdb; pdb.set_trace()
index = build_index('afile.fastq')
matches = match_records('bfile.fastq', index)
posns = [ index[k] for k in matches ]
write_matches('afile.fastq', posns, 'outfile.fastq')
Note that this code goes back to the first file to get the blocks of data. If your data is identical between files, you would be able to copy the block from the second file when a match occurs.
Note also that depending on what you are trying to extract, you may want to change the order of the output blocks, and you may want to make sure that the keys are unique, or perhaps make sure the keys are not unique but are repeated in the order they match. That's up to you - I'm not sure what you're doing with the data.
these guys claim to parse a few gigs file while using a dedicated library, see http://www.biostars.org/p/15113/
fastq_parser = SeqIO.parse(fastq_filename, "fastq")
wanted = (rec for rec in fastq_parser if ...)
SeqIO.write(wanted, output_file, "fastq")
a better approach IMO would be to parse it once and load the data to some database instead of that output_file (i.e mysql) and latter run the queries there

Categories