I am trying to write a program which communicates to an external program through named pipes. The python script does not know when the external program opens up the named pipe/file for reading, so the python script need to open the pipe in blocking mode, see open. If the python script opens in non-blocking mode and the external program has not opened for reading, the open statement will result in an error.
So to open in blocking mode, the python script could open the named pipe in a separate thread, and I have tried the threading module. In the below example I just read from the named pipe in the main thread, but it produces the same error:
import threading
import os
pipe_name = 'pipe_test'
class WriterNamedPipe(threading.Thread):
def __init__(self, filepath, input):
'''
Write: generate that will output each line of input
'''
# Inherit
threading.Thread.__init__(self, verbose = True)
self.daemon = False
self.filepath = filepath
self.input = input
self.start()
def run(self):
# Open blockingly
with open(self.filepath, 'w') as f:
f.write(self.input)
if not os.path.exists(pipe_name):
os.mkfifo(pipe_name)
WriterNamedPipe(pipe_name, '1\n' * 100)
with open(pipe_name, 'r') as f:
print f.read()
This would results in a hang/freeze:
MainThread: <WriterNamedPipe(Thread-1, initial)>.start(): starting thread
Thread-1: <WriterNamedPipe(Thread-1, started 1078922160)>.__bootstrap(): thread started
Thread-1: <WriterNamedPipe(Thread-1, started 1078922160)>.__bootstrap(): normal return
Compilation hangup
However, a similar example from here works, but with os.fork:
import os, time, sys
pipe_name = 'pipe_test'
def child( ):
pipeout = os.open(pipe_name, os.O_WRONLY)
counter = 0
while True:
time.sleep(1)
os.write(pipeout, 'Number %03d\n' % counter)
counter = (counter+1) % 5
def parent( ):
pipein = open(pipe_name, 'r')
while True:
line = pipein.readline()[:-1]
print 'Parent %d got "%s" at %s' % (os.getpid(), line, time.time( ))
if not os.path.exists(pipe_name):
os.mkfifo(pipe_name)
pid = os.fork()
if pid != 0:
parent()
else:
child()
Why is the example with the threading module hanging?
This does probably not work due to the GIL. The open statement in the thread blocks the whole program. This could be avoided by using multiprocessing module instead.
sure as #hakanc pointed out due to GIL it will block your process. The os.fork is working because it actually spawn an subprocess instead of thread, which is not blocked by GIL.
Related
I am trying to work with subprocess routine that spawns an interactive child process which expects user inputs. This process normally hangs immediately if I try to read its stdout stream directly.
I read through many solutions using fcntl, asynchronous operations, pexpect and file output and reading redirections. Although temporary log files should work, I don't want to go through that route as I would like to keep the process interactive within the Python interface. From all of those, threads seemed to be the most easiest and straightforward way (I could not get pexpect to work properly, although it seemed to be a good option, too).
Indeed, when I implemented the following code (stolen from Non-blocking read on a subprocess.PIPE in python):
import os
import subprocess as sp
from threading import Thread
from queue import Queue, Empty
class App:
def __init__(self):
proc = sp.Popen(['app'], stdin=sp.PIPE, stdout=sp.PIPE, stderr=sp.PIPE, encoding='utf8')
out = NonBlockingStreamReader(proc.stdout)
print(out.readline(1))
class NonBlockingStreamReader:
def __init__(self, stream):
self.s = stream
self.q = Queue()
def populateQueue(stream, queue):
while True:
line = stream.readline()
if line:
queue.put(line)
else:
raise UnexpectedEndOfStream
self.t = Thread(target = populateQueue, args = (self.s, self.q))
self.t.daemon = True
self.t.start()
def readline(self, timeout = None):
try:
return self.q.get(block = timeout is not None, timeout = timeout)
except Empty:
return None
class UnexpectedEndOfStream(Exception):
pass
everything worked, flawlessly. Well, the problem is -- it worked on Linux only, even though the solution should be Windows compatible.
When I try to run this implementation on Windows, the newly created thread hangs the moment it tries to execute stream.readline(), never gets to actually populate the queue and thus the output of out.readline(1) read from the main thread is None.
How can I make this work on Windows?
Ok, so the situation is this: I am building a macOS GUI App using Python and wx (wxphoenix). The user can use the GUI (say: script1) to launch a file-deletion process (contained in script2). In order to run successfully script2 needs to run with sudo rights.
script2 will itterate over a long list of files and delete them. But I need it to communicate with the GUI contained in script1 after each round so that script1 can update the progressbar.
In it's absolute most basic form my current working setup looks like this:
Script1:
import io
from threading import Thread
import subprocess
import wx
# a whole lot of wx GUI stuff
def get_password():
"""Retrieve user password via a GUI"""
# A wx solution using wx.PasswordEntryDialog()
# Store password in a variable
return variable
class run_script_with_sudo(Thread):
"""Launch a script with administrator privileges"""
def __init__(self, path_to_script, wx_pubsub_sendmessage):
"""Set variables to self"""
self.path = path_to_script
self.sender = wx_pubsub_sendmessage
self.password = get_password()
Thread.__init__(self)
self.start()
def run(self):
"""Run thread"""
prepare_script = subprocess.Popen(["echo", password], stdout=subprocess.PIPE)
prepare_script.wait()
launch_script = subprocess.Popen(['sudo', '-S', '/usr/local/bin/python3.6', '-u', self.path], stdin=prepare_script.stdout, stdout=subprocess.PIPE)
for line in io.TextIOWrapper(launch_script.stdout, encoding="utf-8"):
print("Received line: ", line.rstrip())
# Tell progressbar to add another step:
wx.CallAfter(self.sender, "update", msg="")
Script2:
import time
# This is a test setup, just a very simple loop that produces an output.
for i in range(25):
time.sleep(1)
print(i)
The above setup works in that script1 receives the output of script2 in real-time and acts on it. (So in the given example: after each second script1 adds another step to the progress bar until it reaches 25 steps).
What I want to achieve = not storing the password in a variable and using macOS it's native GUI to retrieve the password.
However when I change:
prepare_script = subprocess.Popen(["echo", password], stdout=subprocess.PIPE)
prepare_script.wait()
launch_script = subprocess.Popen(['sudo', '-S', '/usr/local/bin/python3.6', '-u', self.path], stdin=prepare_script.stdout, stdout=subprocess.PIPE)
for line in io.TextIOWrapper(launch_script.stdout, encoding="utf-8"):
print("Received line: ", line.rstrip())
# Tell progressbar to add another step:
wx.CallAfter(self.sender, "update", msg="")
Into:
command = r"""/usr/bin/osascript -e 'do shell script "/usr/local/bin/python3.6 -u """ + self.path + """ with prompt "Sart Deletion Process " with administrator privileges'"""
command_list = shlex.split(command)
launch_script = subprocess.Popen(command_list, stdout=subprocess.PIPE)
for line in io.TextIOWrapper(launch_script.stdout, encoding="utf-8"):
print("Received line: ", line.rstrip())
# Tell progressbar to add another step:
wx.CallAfter(self.sender, "update", msg="")
It stops working because osascript apparently runs in a non-interactive shell. This means script2 doesn't sent any output until it is fully finished, causing the progress bar in script1 to stall.
My question thus becomes: How can I make sure to use macOS native GUI to ask for the sudo password, thus preventing having to store it in a variable, while still maintaining the possibility to catch the stdout from the privileged script in an interactive / real-time stream.
Hope that makes sense.
Would appreciate any insights!
My question thus becomes: How can I make sure to use macOS native GUI
to ask for the sudo password, thus preventing having to store it in a
variable, while still maintaining the possibility to catch the stdout
from the privileged script in an interactive / real-time stream.
I have found a solution myself, using a named pipe (os.mkfifo()).
That way, you can have 2 python scripts communicate with each other while 1 of them is launched with privileged rights via osascript (meaning: you get a native GUI window that asks for the users sudo password).
Working solution:
mainscript.py
import os
from pathlib import Path
import shlex
import subprocess
import sys
from threading import Thread
import time
class LaunchDeletionProcess(Thread):
def __init__(self):
Thread.__init__(self)
def run(self):
launch_command = r"""/usr/bin/osascript -e 'do shell script "/usr/local/bin/python3.6 -u /path/to/priviliged_script.py" with prompt "Sart Deletion Process " with administrator privileges'"""
split_command = shlex.split(launch_command)
print("Thread 1 started")
testprogram = subprocess.Popen(split_command)
testprogram.wait()
print("Thread1 Finished")
class ReadStatus(Thread):
def __init__(self):
Thread.__init__(self)
def run(self):
while not os.path.exists(os.path.expanduser("~/p1")):
time.sleep(0.1)
print("Thread 2 started")
self.wfPath = os.path.expanduser("~/p1")
rp = open(self.wfPath, 'r')
response = rp.read()
self.try_pipe(response)
def try_pipe(self, response):
rp = open(self.wfPath, 'r')
response = rp.read()
print("Receiving response: ", response)
rp.close()
if response == str(self.nr_of_steps-1):
print("Got to end")
os.remove(os.path.expanduser("~/p1"))
else:
time.sleep(1)
self.try_pipe(response)
if __name__ == "__main__":
thread1 = LaunchDeletionProcess()
thread2 = ReadStatus()
thread1.start()
thread2.start()
priviliged_script.py
import os
import time
import random
wfPath = os.path.expanduser("~/p1")
try:
os.mkfifo(wfPath)
except OSError:
print("error")
pass
result = 10
nr = 0
while nr < result:
random_nr = random.random()
wp = open(wfPath, 'w')
print("writing new number: ", random_nr)
wp.write("Number: " + str(random_nr))
wp.close()
time.sleep(1)
nr += 1
wp = open(wfPath, 'w')
wp.write("end")
wp.close()
I'm using a strategy based around os.dup2 (similar to examples on this site) to redirect C/fortran level output into a temporary file for capturing.
The only problem I've noticed is, if you use this code from an interactive shell in windows (either python.exe or ipython) it has the strange side effect of enabling output buffering in the console.
Before capture sys.stdout is some kind of file object that returns True for istty(). Typing print('hi') causes hi to be output directly.
After capture sys.stdout points to exactly the same file object but print('hi') no longer shows anything until sys.stdout.flush() is called.
Below is a minimal example script "test.py"
import os, sys, tempfile
class Capture(object):
def __init__(self):
super(Capture, self).__init__()
self._org = None # Original stdout stream
self._dup = None # Original system stdout descriptor
self._file = None # Temporary file to write stdout to
def start(self):
self._org = sys.stdout
sys.stdout = sys.__stdout__
fdout = sys.stdout.fileno()
self._file = tempfile.TemporaryFile()
self._dup = None
if fdout >= 0:
self._dup = os.dup(fdout)
os.dup2(self._file.fileno(), fdout)
def stop(self):
sys.stdout.flush()
if self._dup is not None:
os.dup2(self._dup, sys.stdout.fileno())
os.close(self._dup)
sys.stdout = self._org
self._file.seek(0)
out = self._file.readlines()
self._file.close()
return out
def run():
c = Capture()
c.start()
os.system('echo 10')
print('20')
x = c.stop()
print(x)
if __name__ == '__main__':
run()
Opening a command prompt and running the script works fine. This produces the expected output:
python.exe test.py
Running it from a python shell does not:
python.exe
>>> import test.py
>>> test.run()
>>> print('hello?')
No output is shown until stdout is flushed:
>>> import sys
>>> sys.stdout.flush()
Does anybody have any idea what's going on?
Quick info:
The issue appears on Windows, not on linux (so probably not on mac).
Same behaviour in both Python 2.7.6 and Python 2.7.9
The script should capture C/fortran output, not just python output
It runs without errors on windows, but afterwards print() no longer flushes
I could confirm a related problem with Python 2 in Linux, but not with Python 3
The basic problem is
>>> sys.stdout is sys.__stdout__
True
Thus you are using the original sys.stdout object all the time. And when you do the first output, in Python 2 it executes the isatty() system call once for the underlying file, and stores the result.
You should open an altogether new file and replace sys.stdout with it.
Thus the proper way to write the Capture class would be
import sys
import tempfile
import time
import os
class Capture(object):
def __init__(self):
super(Capture, self).__init__()
def start(self):
self._old_stdout = sys.stdout
self._stdout_fd = self._old_stdout.fileno()
self._saved_stdout_fd = os.dup(self._stdout_fd)
self._file = sys.stdout = tempfile.TemporaryFile(mode='w+t')
os.dup2(self._file.fileno(), self._stdout_fd)
def stop(self):
os.dup2(self._saved_stdout_fd, self._stdout_fd)
os.close(self._saved_stdout_fd)
sys.stdout = self._old_stdout
self._file.seek(0)
out = self._file.readlines()
self._file.close()
return out
def run():
c = Capture()
c.start()
os.system('echo 10')
x = c.stop()
print(x)
time.sleep(1)
print("finished")
run()
With this program, in both Python 2 and Python 3, the output will be:
['10\n']
finished
with the first line appearing on the terminal instantaneously, and the second after one second delay.
This would fail for code that import stdout from sys, however. Luckily not much code does that.
I'm writing an appindicator for Grive, a daemon for syncing Google Drive files. Since I have little programming experience, I decided to write a Python script that calls Grive as a subprocess instead of integrating it in its C++ source code.
I've adapted Stefaan Lippens' code for asynchronously reading subprocess pipes to both show a notification and change the indicator's icon when something important happens (e.g. a new file is added, or a network error). Notifications work well; however, the indicator's icon changes only when the whole process has finished, which is useless because I need to change it many times after it finishes.
Here is the code I'm using:
async.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
import subprocess
import time
import threading
import Queue
class AsynchronousFileReader(threading.Thread):
'''
Helper class to implement asynchronous reading of a file
in a separate thread. Pushes read lines on a queue to
be consumed in another thread.
'''
def __init__(self, fd, queue):
assert isinstance(queue, Queue.Queue)
assert callable(fd.readline)
threading.Thread.__init__(self)
self._fd = fd
self._queue = queue
def run(self):
'''The body of the tread: read lines and put them on the queue.'''
for line in iter(self._fd.readline, ''):
self._queue.put(line)
def eof(self):
'''Check whether there is no more content to expect.'''
return not self.is_alive() and self._queue.empty()
def run(command, show):
'''
Main function to consume the output of a command.
command = The command to be run
show = Function that will process output
'''
# Launch the command as subprocess.
process = subprocess.Popen(command, shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
# Launch the asynchronous readers of the process' stdout and stderr.
stdout_queue = Queue.Queue()
stdout_reader = AsynchronousFileReader(process.stdout, stdout_queue)
stdout_reader.start()
stderr_queue = Queue.Queue()
stderr_reader = AsynchronousFileReader(process.stderr, stderr_queue)
stderr_reader.start()
# Check the queues if we received some output (until there is nothing more to get).
while not stdout_reader.eof() or not stderr_reader.eof():
# Show what we received from standard output.
while not stdout_queue.empty():
line = stdout_queue.get()
show(line)
# Show what we received from standard error.
while not stderr_queue.empty():
line = stderr_queue.get()
show(line)
# Sleep a bit before asking the readers again.
time.sleep(.1)
# Let's be tidy and join the threads we've started.
stdout_reader.join()
stderr_reader.join()
# Close subprocess' file descriptors.
process.stdout.close()
process.stderr.close()
return True
grive.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
import subprocess
import time
import async
from gi.repository import Gtk
from gi.repository import Gio
from gi.repository import GObject
from gi.repository import Notify
from gi.repository import AppIndicator3 as AppIndicator
GRIVE_FOLDER = "/home/ilhuitemoc/Publike/Google Drive"
def show(line):
line = line.replace("\n", "")
print line
if line.startswith("[gr::expt::MsgTag*] = "):
line = line.replace("[gr::expt::MsgTag*] = ","",1)
n = Notify.Notification.new("Error", line, icon)
n.show()
indicator.set_icon("ubuntuone-client-offline")
if line.startswith("sync "):
line = line.replace("sync ","",1)
line = line.replace('"','<b>',1)
line = line.replace('"','</b>',1)
n = Notify.Notification.new("Sync in progress", line, icon)
n.show()
indicator.set_icon("ubuntuone-client-updating")
if "Finished!" in line:
#n = Notify.Notification.new(line, line, icon)
#n.show()
indicator.set_icon("ubuntuone-client-idle")
def openinfolder(obj):
subprocess.call(["xdg-open",GRIVE_FOLDER])
def openinbrowser(obj):
subprocess.call(["xdg-open","http://drive.google.com/"])
if __name__ == '__main__':
subprocess.call(["killall","pantheon-notify"])
time.sleep(1)
indicator = AppIndicator.Indicator.new('grive', 'ubuntuone-client-offline', AppIndicator.IndicatorCategory.APPLICATION_STATUS)
indicator.set_status(AppIndicator.IndicatorStatus.ACTIVE)
menu = Gtk.Menu()
status = Gtk.MenuItem("Connecting...") #Not finished yet
status.set_sensitive(False)
menu.append(status)
sp = Gtk.SeparatorMenuItem()
menu.append(sp)
mi = Gtk.MenuItem("Open Google Drive folder")
mi.connect('activate',openinfolder)
menu.append(mi)
mi = Gtk.MenuItem("Go to Google Drive webpage")
mi.connect('activate',openinbrowser)
menu.append(mi)
sp = Gtk.SeparatorMenuItem()
menu.append(sp)
mi = Gtk.ImageMenuItem("Quit")
img = Gtk.Image.new_from_stock(Gtk.STOCK_QUIT, Gtk.IconSize.MENU)
mi.set_image(img)
mi.connect('activate',Gtk.main_quit)
menu.append(mi)
menu.show_all()
indicator.set_menu(menu)
Notify.init('grive')
icon = 'google-drive'
#async.run('cd "%s" && grive' % GRIVE_FOLDER, show)
GObject.timeout_add(5*60000, async.run, 'cd "%s" && grive' % GRIVE_FOLDER, show)
#GObject.timeout_add(5000, async.run, "test.sh", show)
Gtk.main()
I think I'm doing something wrong, but it's not obvious to me. Is it right to modify the indicator using a subprocess? Or is any other way I can correctly do this?
I've been writing a small Python script that executes some shell commands using the subprocess module and a helper function:
import subprocess as sp
def run(command, description):
"""Runs a command in a formatted manner. Returns its return code."""
start=datetime.datetime.now()
sys.stderr.write('%-65s' % description)
s=sp.Popen(command, shell=True, stderr=sp.PIPE, stdout=sp.PIPE)
out,err=s.communicate()
end=datetime.datetime.now()
duration=end-start
status='Done' if s.returncode==0 else 'Failed'
print '%s (%d seconds)' % (status, duration.seconds)
The following lines reads the standard output and error:
s=sp.Popen(command, shell=True, stderr=sp.PIPE, stdout=sp.PIPE)
out,err=s.communicate()
As you can see, stdout and stderr are not used. Suppose that I want to write the output and error messages to a log file, in a formatted way, e.g.:
[STDOUT: 2011-01-17 14:53:55] <message>
[STDERR: 2011-01-17 14:53:56] <message>
My question is, what's the most Pythonic way to do it? I thought of three options:
Inherit the file object and override the write method.
Use a Delegate class which implements write.
Connect to the PIPE itself in some way.
UPDATE : reference test script
I'm checking the results with this script, saved as test.py:
#!/usr/bin/python
import sys
sys.stdout.write('OUT\n')
sys.stdout.flush()
sys.stderr.write('ERR\n')
sys.stderr.flush()
Any ideas?
1 and 2 are reasonable solutions, but overriding write() won't be enough.
The problem is that Popen needs file handles to attach to the process, so Python file objects doesn't work, they have to be OS level. To solve that you have to have a Python object that has a os level file handle. The only way I can think of solving that is to use pipes, so you have an os level file handle to write to. But then you need another thread that sits and polls that pipe for things to read in so it can log it. (So this is more strictly an implementation of 2, as it delegates to logging).
Said and done:
import io
import logging
import os
import select
import subprocess
import time
import threading
LOG_FILENAME = 'output.log'
logging.basicConfig(filename=LOG_FILENAME,level=logging.DEBUG)
class StreamLogger(io.IOBase):
def __init__(self, level):
self.level = level
self.pipe = os.pipe()
self.thread = threading.Thread(target=self._flusher)
self.thread.start()
def _flusher(self):
self._run = True
buf = b''
while self._run:
for fh in select.select([self.pipe[0]], [], [], 0)[0]:
buf += os.read(fh, 1024)
while b'\n' in buf:
data, buf = buf.split(b'\n', 1)
self.write(data.decode())
time.sleep(1)
self._run = None
def write(self, data):
return logging.log(self.level, data)
def fileno(self):
return self.pipe[1]
def close(self):
if self._run:
self._run = False
while self._run is not None:
time.sleep(1)
os.close(self.pipe[0])
os.close(self.pipe[1])
So that class starts a os level pipe that Popen can attach the stdin/out/error to for the subprocess. It also starts a thread that polls the other end of that pipe once a second for things to log, which it then logs with the logging module.
Possibly this class should implement more things for completeness, but it works in this case anyway.
Example code:
with StreamLogger(logging.INFO) as out:
with StreamLogger(logging.ERROR) as err:
subprocess.Popen("ls", stdout=out, stderr=err, shell=True)
output.log ends up like so:
INFO:root:output.log
INFO:root:streamlogger.py
INFO:root:and
INFO:root:so
INFO:root:on
Tested with Python 2.6, 2.7 and 3.1.
I would think any implementation of 1 and 3 would need to use similar techniques. It is a bit involved, but unless you can make the Popen command log correctly itself, I don't have a better idea).
I would suggest option 3, with the logging standard library package. In this case I'd say the other 2 were overkill.
1 and 2 won't work. Here's an implementation of the principle:
import subprocess
import time
FileClass = open('tmptmp123123123.tmp', 'w').__class__
class WrappedFile(FileClass):
TIMETPL = "%Y-%m-%d %H:%M:%S"
TEMPLATE = "[%s: %s] "
def __init__(self, name, mode='r', buffering=None, title=None):
self.title = title or name
if buffering is None:
super(WrappedFile, self).__init__(name, mode)
else:
super(WrappedFile, self).__init__(name, mode, buffering)
def write(self, s):
stamp = time.strftime(self.TIMETPL)
if not s:
return
# Add a line with timestamp per line to be written
s = s.split('\n')
spre = self.TEMPLATE % (self.title, stamp)
s = "\n".join(["%s %s" % (spre, line) for line in s]) + "\n"
super(WrappedFile, self).write(s)
The reason it doesn't work is that Popen never calls stdout.write. A wrapped file will work fine when we call its write method and will even be written to if passed to Popen, but the write will happen in a lower layer, skipping the write method.
This simple solution worked for me:
import sys
import datetime
import tempfile
import subprocess as sp
def run(command, description):
"""Runs a command in a formatted manner. Returns its return code."""
with tempfile.SpooledTemporaryFile(8*1024) as so:
print >> sys.stderr, '%-65s' % description
start=datetime.datetime.now()
retcode = sp.call(command, shell=True, stderr=sp.STDOUT, stdout=so)
end=datetime.datetime.now()
so.seek(0)
for line in so.readlines():
print >> sys.stderr,'logging this:', line.rstrip()
duration=end-start
status='Done' if retcode == 0 else 'Failed'
print >> sys.stderr, '%s (%d seconds)' % (status, duration.seconds)
REF_SCRIPT = r"""#!/usr/bin/python
import sys
sys.stdout.write('OUT\n')
sys.stdout.flush()
sys.stderr.write('ERR\n')
sys.stderr.flush()
"""
SCRIPT_NAME = 'refscript.py'
if __name__ == '__main__':
with open(SCRIPT_NAME, 'w') as script:
script.write(REF_SCRIPT)
run('python ' + SCRIPT_NAME, 'Reference script')
This uses Adam Rosenfield's make_async and read_async. Whereas my original answer used select.epoll and was thus Linux-only, it now uses select.select, which should work under Unix or Windows.
This logs output from the subprocess to /tmp/test.log as it occurs:
import logging
import subprocess
import shlex
import select
import fcntl
import os
import errno
def make_async(fd):
# https://stackoverflow.com/a/7730201/190597
'''add the O_NONBLOCK flag to a file descriptor'''
fcntl.fcntl(fd, fcntl.F_SETFL, fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK)
def read_async(fd):
# https://stackoverflow.com/a/7730201/190597
'''read some data from a file descriptor, ignoring EAGAIN errors'''
try:
return fd.read()
except IOError, e:
if e.errno != errno.EAGAIN:
raise e
else:
return ''
def log_process(proc,stdout_logger,stderr_logger):
loggers = { proc.stdout: stdout_logger, proc.stderr: stderr_logger }
def log_fds(fds):
for fd in fds:
out = read_async(fd)
if out.strip():
loggers[fd].info(out)
make_async(proc.stdout)
make_async(proc.stderr)
while True:
# Wait for data to become available
rlist, wlist, xlist = select.select([proc.stdout, proc.stderr], [], [])
log_fds(rlist)
if proc.poll() is not None:
# Corner case: check if more output was created
# between the last call to read_async and now
log_fds([proc.stdout, proc.stderr])
break
if __name__=='__main__':
formatter = logging.Formatter('[%(name)s: %(asctime)s] %(message)s')
handler = logging.FileHandler('/tmp/test.log','w')
handler.setFormatter(formatter)
stdout_logger=logging.getLogger('STDOUT')
stdout_logger.setLevel(logging.DEBUG)
stdout_logger.addHandler(handler)
stderr_logger=logging.getLogger('STDERR')
stderr_logger.setLevel(logging.DEBUG)
stderr_logger.addHandler(handler)
proc = subprocess.Popen(shlex.split('ls -laR /tmp'),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
log_process(proc,stdout_logger,stderr_logger)