python subprocess continuously feed stdin and read stdout which is possibly empty - python

I am trying to write a python script that optimises the parameters of a compiled c script that I wrote. The program (dups) is taking an input (using fgets) and prints the input if it is not in the set of previously seen inputs.
My python script should continuously feed input to dups followed by an evaluation of it's output (which can be either the input or nothing).
I am using subprocess writing to its stdin and reading from its stdout. However, using various solutions, the script hangs after reading the first line.
The C program only writes to stdout and I know that it is flushing. Furthermore I am able to reproduce my error using a python script simulating dups.
Sample code:
Sample dups for easy reproduction:
#!/usr/bin/python
import sys
names = set()
while True:
try:
x = raw_input()
except EOFError:
print "Unexpected EOF, quitting"
break
if x not in names:
print x
sys.stdout.flush()
names.add(x)
Main script:
import subprocess
import pty, os #for solution 4
inputs = ['Alice', 'Alice', 'Bob', 'Jane', 'Bob', 'Jane', 'Alice', 'Mike']
solution = 4
arguments = ['./sample_script.py']
popen_nargs = dict(stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True,
)
if solution == 3:
out_fname = "output.txt"
f_w = open(out_fname,"wt")
f_r = open(out_fname, "rt")
popen_nargs['stdout'] = f_w
popen_nargs['bufsize']=0
if solution == 4:
master, slave = pty.openpty()
popen_nargs['stdout'] = slave
stdout_handle = os.fdopen(master)
child = subprocess.Popen(arguments, **popen_nargs)
for inpt in inputs:
print "Testing '" + inpt + "'"
child.stdin.write(inpt +" \n")
child.stdin.flush()
print "Result:"
#http://stackoverflow.com/questions/4417546/constantly-print-subprocess-output-while-process-is-running
if solution == 0:
for stdout_line in iter(child.stdout.readline, ""):
print "-> '", stdout_line, "'"
#http://blog.endpoint.com/2015/01/getting-realtime-output-using-python.html
elif solution == 1:
while True:
output = child.stdout.readline()
if output == '' and child.poll() is not None:
break
if output:
print "-> '", output, "'"
#https://gist.github.com/zhenyi2697/7229421
elif solution == 2:
output=''
while True:
result = child.poll()
delta = child.stdout.read(1)
if result is not None:
print 'terminated'
break
if delta != ' ':
output = output + delta
else:
if '%' in output:
print 'percentage is:'
print output
elif '/s' in output:
print 'speed is:'
print output
print "-> '", output, "'"
output = ''
#http://stackoverflow.com/questions/5419888/reading-from-a-frequently-updated-file
elif solution == 3:
f_w.flush()
print "-> '", f_r.read(), "'"
print "end iteration"
#http://stackoverflow.com/questions/13603334/repeatedly-write-to-stdin-and-read-stdout-of-a-subprocess-without-closing-it
if solution == 4:
print "-> '", stdout_handle.readline(), "'"
if solution == 3:
f_w.close()
f_r.close()
# Close the program
child.communicate()
Output:
Solutions 0, 1 (don't terminate):
Testing 'Alice'
Result:
-> ' Alice
'
Solution 2 (doest terminate):
Testing 'Alice'
Result:
-> ' Alice '
Solution 3 (output.txt seems to be updated only in the end, despite bufsize=0):
Testing 'Alice'
Result:
-> ' '
end iteration
Testing 'Alice'
Result:
-> ' '
end iteration
...
Testing 'Mike'
Result:
-> ' '
end iteration
Solution 4 (seems promising, but read and readline do not work when there is no output hence it does not terminate):
Testing 'Alice'
Result:
-> ' Alice
'
end iteration
Testing 'Alice'
Result:
-> '

Related

Python sys.argv and input combination with a definition NOT working

I have a python script like below that I was trying to run in shell. I want to use either argv or input in order to give specific inputs called index1 and index2 (ACCGTCG and TTCCAGC) and a file name to process. I tried two ways (with sys.argv and input separately) as below but I get no output.
With sys.argv:
#!/usr/bin/python
import sys
from Bio import SeqIO
def dual_index_positions():
script=sys.argv[0]
index1=sys.argv[1]
index2=sys.argv[2]
input_file=sys.argv[3]
count=0
with open(input_file, "r") as Fastq:
for record in SeqIO.parse(Fastq,'fastq'):
if index1 in record.seq and index2 in record.seq:
print(record.name)
ind1_rec=record.seq.find(index1)
ind2_rec=record.seq.find(index2)
rp_ind2=ind2_rec+len(index2)
dist=(ind1_rec)-(rp_ind2)
print('Index1 and index2 positions are '+ str(ind1_rec+1) + ' and ' + str(ind2_rec+1) + ' respectively' +
'; distance is: ' + str(dist))
count+=1
print('The total number of hits is: '+ str(count))
if __name__ == '__dual_index_positions__':
dual_index_positions()
with taking inputs:
#!/usr/bin/python
from Bio import SeqIO
def dual_index_positions():
input_file=input('please enter your input_file: ')
index1=str(input('please enter your index 1: '))
index2=str(input('please enter your index 2: '))
count=0
with open(input_file, "r") as Fastq:
for record in SeqIO.parse(Fastq,'fastq'):
if index1 in record.seq and index2 in record.seq:
print(record.name)
ind1_rec=record.seq.find(index1)
ind2_rec=record.seq.find(index2)
rp_ind2=ind2_rec+len(index2)
dist=(ind1_rec)-(rp_ind2)
print('Index1 and index2 positions are '+ str(ind1_rec+1) + ' and ' + str(ind2_rec+1) + ' respectively' +
'; distance is: ' + str(dist))
count+=1
print('The total number of hits is: '+ str(count))
if __name__ == '__dual_index_positions__':
dual_index_positions()
Can somebody help me with this to find where is the bug? Thank you in advance.
Instead of this
if __name__ == '__dual_index_positions__':
dual_index_positions()
Use this
if __name__ == '__main__':
dual_index_positions()
__name__ == '__dual_index_positions__' the RHS of that is not the function name you have to call, that is the name of the module and by default it has the value __main__

remove similar lines in text file

I am not using Python but I have script in python:
part of script
elif line.find("CONECT") > -1:
con = line.split()
line_value = line_value + 1
#print line_value
#print con[2]
try:
line_j = "e" + ', ' + str(line_value) + ', ' + con[2] + "\n"
output_file.write(line_j)
print(line_j)
line_i = "e" + ', ' + str(line_value) + ', ' + con[3] + "\n"
output_file.write(line_i)
print(line_i)
line_k = "e"+ ', ' + str(line_value) + ', ' + con[4] + "\n"
print(line_k)
output_file.write(line_k)
except IndexError:
continue
which give .txt output in format
e, 1, 2
e, 1, 3
e, 1, 4
e, 2, 1
e, 2, 3
etc.
I need remove similar lines with the same numbers, but no matter on order this numbers
i.e. line e, 2, 1..
Is it possible?
Of course, it is better to modify your code to remove that lines BEFORE you're writing them to file. You can use a list to store already saved values, and on each itereation, perfom a search if the values you're want to add is already exists in that list. The code below isn't tested and optimized, but it explains an idea:
# 'added = []' should be placed somewhere before 'if'
added = []
# you part of code
elif line.find("CONECT") > -1:
con = line.split()
line_value = line_value + 1
try:
line_j = "e, %s, %s\n" % (str(line_value),con[2])
tmp = sorted((str(line_value),con[2]))
if tmp not in added:
added.append(tmp)
output_file.write(line_j)
print(line_j)
line_i = "e, %s, %s\n" % (str(line_value),con[3])
tmp = sorted((str(line_value),con[3]))
if tmp not in added:
added.append(tmp)
output_file.write(line_i)
print(line_i)
line_k = "e, %s, %s\n" % (str(line_value),con[4])
tmp = sorted((str(line_value),con[4]))
if tmp not in added:
added.append(tmp)
print(line_k)
output_file.write(line_k)
except IndexError:
continue
Here is a comparison method for two lines of your file:
def compare(line1, line2):
els1 = line1.strip().split(', ')
els2 = line2.strip().split(', ')
return Counter(els1) == Counter(els2)
See the documentation for the Counter class.
If the count of elements doesn't matter you can replace the Counter class with set instead
The following approach should work. First add the following line further up in your code:
seen = set()
Then replace everything inside the try with the following code:
for con_value in con[2:5]:
entry = frozenset((line_value, con_value))
if entry not in seen:
seen.append(entry)
line_j = "e" + ', ' + str(line_value) + ', ' + con_value + "\n"
output_file.write(line_j)
print(line_j)
Make sure this code is indented to the same level as the code it replaces.

separate line output by groups

My python script checks mysqldump and if any problems script prints :
Dump is old for db;
Dump is not complete for db;
Dump is empty for db;
MySQL dump does not exist for db;
Script logs these records to the file line by line.
My question is there are a way to format output in the file like:
Dump is old for db;
Dump is old for db;
Dump is old for db;
Dump is not complete for db;
Dump is not complete for db;
Dump is not complete for db;
Dump is empty for db;
Dump is empty for db;
Dump is empty for db;
Because now my file looks like:
Dump is old for db;
Dump is empty for db;
Dump is old for db;
MySQL dump does not exist for db;
...
etc
Here my small script :)
#!/bin/env python
import psycopg2
import sys,os
from subprocess import Popen, PIPE
from datetime import datetime
import smtplib
con = None
today = datetime.now().strftime("%Y-%m-%d")
log_dump_fail = '/tmp/mysqldump_FAIL'
log_fail = open(log_dump_fail,'w').close()
log_fail = open(log_dump_fail, 'a')
sender = 'PUT_SENDER_NAME_HERE'
receiver = ['receiver_name']
smtp_daemon_host = 'localhost'
def db_backup_file_does_not_exist(db_backup_file):
if not os.path.exists(db_backup_file): return True
else: return False
def dump_health(last_dump_row, file_name,db):
last_row = last_dump_row.rsplit(" ")
tms = ''.join(last_row[4:5])
status = last_row[1:3]
if (status) and (tms != today):
log_fail.write("\nDB is old for "+ str(db) + str(file_name) + ", \nDump finished at " + str(''.join(tms)))
log_fail.write("\n-------------------------------------------")
elif not (status) and (tms == None):
log_fail.write("\nDump is not complete for "+str(db) + str(file_name) + " , end of file is not correct")
log_fail.write("\n-------------------------------------------")
suffixes = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
def humansize(nbytes):
if nbytes == 0: return '0 B'
i = 0
while nbytes >= 1024 and i < len(suffixes)-1:
nbytes /= 1024.
i += 1
f = ('%.2f' % nbytes).rstrip('0').rstrip('.')
return '%s %s' % (f, suffixes[i])
def dump_size(dump_file, file_name,db):
size = os.path.getsize(dump_file)
if (size < 1024):
human_readable = humansize(size)
log_fail.write("\nDump is empty for " +str(db) + "\n" +"\t" + str (file_name)+", file size is " + str(human_readable))
log_fail.write("\n-------------------------------------------")
def report_to_noc(isubject,text):
TEXT = text
SUBJECT = subject
message = 'Subject: %s\n\n%s' % (SUBJECT, TEXT)
server = smtplib.SMTP(smtp_daemon_host)
server.sendmail(sender, receiver, message)
server.quit()
try:
con = psycopg2.connect(database='**', user='***', password='***', host='****')
cur = con.cursor()
cur.execute("""\
select ad.servicename, (select name from servers where id = ps.server_id) as servername
from packages as p, account_data as ad, package_servers as ps
where p.id=ad.package_id and
p.date_deleted IS NULL and
p.id=ps.package_id and
p.aktuel IS NULL and
p.pre_def_package_id = 4 and
p.mother_package_id !=0 and
ps.subservice_id=5 and
p.mother_package_id NOT IN (select id from packages where date_deleted IS NOT NULL)
ORDER BY servername;
""")
while (1):
row = cur.fetchone ()
if row == None:
break
db = row[0]
server_name = str(row[1])
if (''.join(server_name) == 'SKIP_THIS') or (''.join(server_name) == 'SKIP_THIS'):
continue
else:
db_backup_file = '/storage/backup/db/mysql/' + str(db) + '/current/' + str(db) + '.mysql.gz'
db_backup_file2 = '/storage/backup/' + str(''.join(server_name.split("DB"))) + '/mysql/' + str(db) + '/current/'+ str(db) + '.mysql.gz'
db_file_does_not_exist = False
db_file2_does_not_exist = False
if db_backup_file_does_not_exist(db_backup_file):
db_file_does_not_exist = True
if db_backup_file_does_not_exist(db_backup_file2):
db_file2_does_not_exist = True
if db_file_does_not_exist and db_file2_does_not_exist:
log_fail.write("\nMySQL dump does not exist for " + str(db) + "\n" + "\t" + str(db_backup_file2) + "\n" + "\t" + str(db_backup_file))
log_fail.write("\n-------------------------------------------")
continue
elif (db_file_does_not_exist) and not (db_file2_does_not_exist):
p_zcat = Popen(["zcat", db_backup_file2], stdout=PIPE)
p_tail = Popen(["tail", "-2"], stdin=p_zcat.stdout, stdout=PIPE)
dump_status = str(p_tail.communicate()[0])
dump_health(dump_status,db_backup_file2,db)
dump_size(db_backup_file2, db_backup_file2,db)
elif (db_file2_does_not_exist) and not (db_file_does_not_exist):
p_zcat = Popen(["zcat", db_backup_file], stdout=PIPE)
p_tail = Popen(["tail", "-2"], stdin=p_zcat.stdout, stdout=PIPE)
dump_status = str(p_tail.communicate()[0])
dump_health(dump_status,db_backup_file,db)
dump_size(db_backup_file,db_backup_file,db)
con.close()
except psycopg2.DatabaseError, e:
print 'Error %s' % e
sys.exit(1)
log_fail.close()
if os.path.getsize(log_dump_fail) > 0:
subject = "Not all MySQL dumps completed successfully. Log file backup:" + str(log_dump_fail)
fh = open(log_dump_fail, 'r')
text = fh.read()
fh.close()
report_to_noc(subject,text)
else:
subject = "MySQL dump completed successfullyi for all DBs, listed in PC"
text = "Hello! \nI am notifying you that I checked mysqldump files this morning.\nThere are nothing to worry about. :)"
report_to_noc(subject,text)
You can process your log file after it has been written.
One option is to read your file and sort the lines:
lines = open('log.txt').readlines()
lines.sort()
open('log_sorted.txt', 'w').write("\n".join(lines))
This won't emit an empty line between log types.
Another option is to use a Counter:
from collections import Counter
lines = open('log.txt').readlines()
counter = Counter()
for line in lines:
counter[line] += 1
out_file = open('log_sorted.txt', 'w')
for line, num in counter.iteritems():
out_file.write(line * num + "\n")
Looks like you want to group the output of the script, rather than log the info as it comes while searching.
Easiest would be to maintain 4 lists, on each for empty, not empty and so on. In the script add the db names to appropriate list instead of logging, and then dump the lists one by one into the file with appropriate prefixes("not empty for" + dbname).
For example, remove all the log_fail.write() from the functions and replace them with list.append() and write a separate function that writes to the log file as you like:
Add lists:
db_dump_is_old_list = []
db_dump_is_empty_list = []
db_dump_is_not_complete_list = []
db_dump_does_not_exist_list = []
Modify the Functions:
def dump_health(last_dump_row, file_name,db):
last_row = last_dump_row.rsplit(" ")
tms = ''.join(last_row[4:5])
status = last_row[1:3]
if (status) and (tms != today):
db_dump_is_old_list.append(str(db))
#log_fail.write("\nDB is old for "+ str(db) + str(file_name) + ", \nDump finished at " + str(''.join(tms)))
#log_fail.write("\n-------------------------------------------")
elif not (status) and (tms == None):
db_dump_is_not_complete_list.append(str(db)
#log_fail.write("\nDump is not complete for "+str(db) + str(file_name) + " , end of file is not correct")
#log_fail.write("\n-------------------------------------------")
def dump_size(dump_file, file_name,db):
size = os.path.getsize(dump_file)
if (size < 1024):
human_readable = humansize(size)
db_dump_is_empty_list.append(str(db))
#log_fail.write("\nDump is empty for " +str(db) + "\n" +"\t" + str (file_name)+", file size is " + str(human_readable))
#log_fail.write("\n-------------------------------------------")
if db_file_does_not_exist and db_file2_does_not_exist:
db_dump_does_not_exist_list.append(str(db))
#log_fail.write("\nMySQL dump does not exist for " + str(db) + "\n" + "\t" + str(db_backup_file2) + "\n" + "\t" + str(db_backup_file))
#log_fail.write("\n-------------------------------------------")
continue
And add a logger function:
def dump_info_to_log_file():
log_dump_fail = '/tmp/mysqldump_FAIL'
log_fail = open(log_dump_fail,'w').close()
log_fail = open(log_dump_fail, 'a')
for dbname in db_dump_is_old_list:
log_fail.write("Dump is Old for" + str(dbname))
log_fail.write("\n\n")
for dbname in db_dump_is_empty_list:
log_fail.write("Dump is Empty for" + str(dbname))
log_fail.write("\n\n")
for dbname in db_dump_is_not_complete_list:
log_fail.write("Dump is Not Complete for" + str(dbname))
log_fail.write("\n\n")
for dbname in db_dump_does_not_exist_list:
log_fail.write("Dump Does Not Exist for" + str(dbname))
log_fail.close()
Or you could simply log as you are doing, and then read in the file, sort and write back the file.
Thank you all for all interesting ideas.
I have really tried all options :)
To my mind:
With Counter object the pros is to few lines of code.
But cons are - many read\write operations. Log file is not big, however, I decided to decrease read(s) \ write(s)
With array the cons are to many lines of code :) but the pros is - write to the file only once.
So I implemented arrays.. :)
Thank you guys!!!

Python script that performs line matching over stale files generates inconsistent output

I created a python script to parse mail (exim) logfiles and execute pattern matching in order to get a top 100 list for most send domains on my smtp servers.
However, everytime I execute the script I get a different count.
These are stale logfiles, and I cannot find a functional flaw in my code.
Example output:
1:
70353 gmail.com
68337 hotmail.com
53657 yahoo.com
2:
70020 gmail.com
67741 hotmail.com
54397 yahoo.com
3:
70191 gmail.com
67917 hotmail.com
54438 yahoo.com
Code:
#!/usr/bin/env python
import os
import datetime
import re
from collections import defaultdict
class DomainCounter(object):
def __init__(self):
self.base_path = '/opt/mail_log'
self.tmp = []
self.date = datetime.date.today() - datetime.timedelta(days=14)
self.file_out = '/var/tmp/parsed_exim_files-' + str(self.date.strftime('%Y%m%d')) + '.decompressed'
def parse_log_files(self):
sub_dir = os.listdir(self.base_path)
for directory in sub_dir:
if re.search('smtp\d+', directory):
fileInput = self.base_path + '/' + directory + '/maillog-' + str(self.date.strftime('%Y%m%d')) + '.bz2'
if not os.path.isfile(self.file_out):
os.popen('touch ' + self.file_out)
proccessFiles = os.popen('/bin/bunzip2 -cd ' + fileInput + ' > ' + self.file_out)
accessFileHandle = open(self.file_out, 'r')
readFileHandle = accessFileHandle.readlines()
print "Proccessing %s." % fileInput
for line in readFileHandle:
if '<=' in line and ' for ' in line and '<>' not in line:
distinctLine = line.split(' for ')
recipientAddresses = distinctLine[1].strip()
recipientAddressList = recipientAddresses.strip().split(' ')
if len(recipientAddressList) > 1:
for emailaddress in recipientAddressList:
# Since syslog messages are transmitted over UDP some messages are dropped and needs to be filtered out.
if '#' in emailaddress:
(login, domein) = emailaddress.split("#")
self.tmp.append(domein)
continue
else:
try:
(login, domein) = recipientAddressList[0].split("#")
self.tmp.append(domein)
except Exception as e:
print e, '<<No valid email address found, skipping line>>'
accessFileHandle.close()
os.unlink(self.file_out)
return self.tmp
if __name__ == '__main__':
domainCounter = DomainCounter()
result = domainCounter.parse_log_files()
domainCounts = defaultdict(int)
top = 100
for domain in result:
domainCounts[domain] += 1
sortedDict = dict(sorted(domainCounts.items(), key=lambda x: x[1], reverse=True)[:int(top)])
for w in sorted(sortedDict, key=sortedDict.get, reverse=True):
print '%-3s %s' % (sortedDict[w], w)
proccessFiles = os.popen('/bin/bunzip2 -cd ' + fileInput + ' > ' + self.file_out)
This line is non-blocking. Therefore it will start the command, but the few following lines are already reading the file. This is basically a concurrency issue. Try to wait for the command to complete before reading the file.
Also see:
Python popen command. Wait until the command is finished since os.popen is deprecated since python-2.6 (depending on which version you are using).
Sidenote - The same happens to the line below. The file may, or may not, exist after executing the following line:
os.popen('touch ' + self.file_out)

Python: File Writing Adding Unintentional Newlines on Linux Only

I am using Python 2.7.9. I'm working on a program that is supposed to produce the following output in a .csv file per loop:
URL,number
Here's the main loop of the code I'm using:
csvlist = open(listfile,'w')
f = open(list, "r")
def hasQuality(item):
for quality in qualities:
if quality in item:
return True
return False
for line in f:
line = line.split('\n')
line = line[0]
# print line
itemname = urllib.unquote(line).decode('utf8')
# print itemhash
if hasQuality(itemname):
try:
looptime = time.time()
url = baseUrl + line
results = json.loads(urlopen(url).read())
# status = results.status_code
content = results
if 'median_price' in content:
medianstr = str(content['median_price']).replace('$','')
medianstr = medianstr.replace('.','')
median = float(medianstr)
volume = content['volume']
print url+'\n'+itemname
print 'Median: $'+medianstr
print 'Volume: '+str(volume)
if (median > minprice) and (volume > minvol):
csvlist.write(line + ',' + medianstr + '\n')
print '+ADDED TO LIST'
else:
print 'No median price given for '+itemname+'.\nGiving up on item.'
print "Finished loop in " + str(round(time.time() - looptime,3)) + " seconds."
except ValueError:
print "we blacklisted fool?? cause we skippin beats"
else:
print itemname+'is a commodity.\nGiving up on item.'
csvlist.close()
f.close()
print "Finished script in " + str(round(time.time() - runtime, 3)) + " seconds."
It should be generating a list that looks like this:
AWP%20%7C%20Asiimov%20%28Field-Tested%29,3911
M4A1-S%20%7C%20Hyper%20Beast%20%28Field-Tested%29,4202
But it's actually generating a list that looks like this:
AWP%20%7C%20Asiimov%20%28Field-Tested%29
,3911
M4A1-S%20%7C%20Hyper%20Beast%20%28Field-Tested%29
,4202
Whenever it is ran on a Windows machine, I have no issue. Whenever I run it on my EC2 instance, however, it adds that extra newline. Any ideas why? Running commands on the file like
awk 'NR%2{printf $0" ";next;}1' output.csv
do not do anything. I have transferred it to my Windows machine and it still reads the same. However, when I paste the output into Steam's chat client it concatenates it in the way that I want.
Thanks in advance!
This is where the problem occurs
code:
csvlist.write(line + ',' + medianstr + '\n')
This can be cleared is you strip the space
modified code:
csvlist.write(line.strip() + ',' + medianstr + '\n')
Problem:
The problem is due to the fact you are reading raw lines from the input file
Raw_lines contain \n to indicate there is a new line for every line which is not the last and for the last line it just ends with the given character .
for more details:
Just type print(repr(line)) before writing and see the output

Categories