cannot kill a Sub process created by Popen when printing process.stdout - python

I have created a script which should run a command and kill it after 15 seconds
import logging
import subprocess
import time
import os
import sys
import signal
#cmd = "ping 192.168.1.1 -t"
cmd = "C:\\MyAPP\MyExe.exe -t 80 -I C:\MyApp\Temp -M Documents"
proc=subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,shell=True)
**for line in proc.stdout:
print (line.decode("utf-8"), end='')**
time.sleep(15)
os.kill(proc.pid, signal.SIGTERM)
#proc.kill() #Tried this too but no luck
This doesnot terminate my subprocess. however if I comment out the logging to stdout part, ie
for line in proc.stdout:
print (line.decode("utf-8"), end='')
the subprocess has been killed.
I have tried proc.kill() and CTRL_C_EVENT too but no luck.
Any help would be highly appreciated. Please see me as novice to python

To terminate subprocess in 15 seconds while printing its output line-by-line:
#!/usr/bin/env python
from __future__ import print_function
from threading import Timer
from subprocess import Popen, PIPE, STDOUT
# start process
cmd = r"C:\MyAPP\MyExe.exe -t 80 -I C:\MyApp\Temp -M Documents"
process = Popen(cmd, stdout=PIPE, stderr=STDOUT,
bufsize=1, universal_newlines=True)
# terminate process in 15 seconds
timer = Timer(15, terminate, args=[process])
timer.start()
# print output
for line in iter(process.stdout.readline, ''):
print(line, end='')
process.stdout.close()
process.wait() # wait for the child process to finish
timer.cancel()
Notice, you don't need shell=True here. You could define terminate() as:
def terminate(process):
if process.poll() is None:
try:
process.terminate()
except EnvironmentError:
pass # ignore
If you want to kill the whole process tree then define terminate() as:
from subprocess import call
def terminate(process):
if process.poll() is None:
call('taskkill /F /T /PID ' + str(process.pid))
Use raw-string literals for Windows paths: r"" otherwise you should escape all backslashes in the string literal
Drop shell=True. It creates an additional process for no reason here
universal_newlines=True enables text mode (bytes are decode into Unicode text using the locale preferred encoding automatically on Python 3)
iter(process.stdout.readline, '') is necessary for compatibility with Python 2 (otherwise the data may be printed with a delay due to the read-ahead buffer bug)
Use process.terminate() instead of process.send_signal(signal.SIGTERM) or os.kill(proc.pid, signal.SIGTERM)
taskkill allows to kill a process tree on Windows

The problem is reading from stdout is blocking. You need to either read the subprocess's output or run the timer on a separate thread.
from subprocess import Popen, PIPE
from threading import Thread
from time import sleep
class ProcKiller(Thread):
def __init__(self, proc, time_limit):
super(ProcKiller, self).__init__()
self.proc = proc
self.time_limit = time_limit
def run(self):
sleep(self.time_limit)
self.proc.kill()
p = Popen('while true; do echo hi; sleep 1; done', shell=True)
t = ProcKiller(p, 5)
t.start()
p.communicate()
EDITED to reflect suggested changes in comment
from subprocess import Popen, PIPE
from threading import Thread
from time import sleep
from signal import SIGTERM
import os
class ProcKiller(Thread):
def __init__(self, proc, time_limit):
super(ProcKiller, self).__init__()
self.proc = proc
self.time_limit = time_limit
def run(self):
sleep(self.time_limit)
os.kill(self.proc.pid, SIGTERM)
p = Popen('while true; do echo hi; sleep 1; done', shell=True)
t = ProcKiller(p, 5)
t.start()
p.communicate()

Related

How to terminate a thread subprocess call and thread exit in Python?

I am trying to run subprocess Popen in a thread in Python. The command in Popen is expected to run continuously to collect logs. But when a condition is met outside the thread, I want to stop the Popen subprocess and the corresponding thread also to finish. Below is a sample representative code:
import threading
import subprocess
class MyClass(threading.Thread):
def __init__(self):
super(MyClass, self).__init__()
def run(self):
self.proc = subprocess.Popen("while true; do foo; sleep 2; done", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = self.proc.communicate()
myclass = MyClass()
myclass.start()
myclass.proc.kill()
print("Done")
But in the above code, it gets stuck forever. What is the correct way to stop the running Popen subprocess and also to finish the thread?
All you really need to do is to have the thread, i.e. the MyClass instance, do the "killing" and instantiate MyClass with an event that it will wait on and then when set will kill the process. As I do not know what is in foo, I have substituted a simple echo hello for that and have set text=True on the Popen call so that the output is Unicode rather than bytes:
import threading
import subprocess
import time
class MyClass(threading.Thread):
def __init__(self, evt):
super(MyClass, self).__init__()
self.evt = evt
def run(self):
proc = subprocess.Popen("while true; do echo hello; sleep 2; done", shell=True, text=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# wait for the "kill" order:
self.evt.wait()
proc.kill()
stdout, stderr = proc.communicate()
# for demo purposes let's see what has been output:
print(stdout, end='')
evt = threading.Event()
myclass = MyClass(evt)
myclass.start()
# let the process run for 5 seconds and then give the kill order:
time.sleep(5)
print("Killing process:")
evt.set()
myclass.join()
print("Done")
Prints:
Killing process:
hello
hello
hello
Done
Add param preexec_fn=os.setsid to your Popen func.
Use os.killpg(proc.pid, signal.SIGKILL) to kill sub process by pid.

How to make subprocess run for 60 sec

I have the following python script that runs.
I want is to run the subprocess to run for 60 sec and then send the SIGINT signal to subprocess and write the output in file.If i use sleep the subprocess doesn't run.
#!/usr/bin/python
import os
import subprocess
PIPE = subprocess.PIPE
import signal
import time
def handler(signum, frame):
pass
signal.signal(signal.SIGALRM, handler)
signal.alarm(60)
command = "strace -c ./server"
os.chdir("/root/Desktop/")
p = subprocess.Popen(command, stdout=PIPE, stderr=PIPE)
time.sleep(60)
p.send_signal(signal.SIGINT)
signal.alarm(0)
print p.communicate()[1]
In Python 3.3 and newer, there is a simpler version of the answer (untested code):
with open('output', 'wb', 0) as output_file:
p = subprocess.Popen("strace -c ./server".split(),
stdout=output_file, stderr=subprocess.STDOUT,
cwd="/root/Desktop/",
close_fds=True)
try:
p.wait(60) # Wait for the child process to finish, or for 60 seconds, which ever comes first.
except subprocess.TimeoutExpired:
p.send_signal(signal.SIGINT)
p.wait() # Wait fro the process to actually exit after receiving the terminate signal.
See also: https://docs.python.org/3/library/subprocess.html#subprocess.Popen.wait
There are several issues:
command should be a list
you should read from p.stdout/p.stderr pipes otherwise the child process may stall if it generates enough output
you should use either time.sleep() or signal.alarm() here, not both.
I want is to run the subprocess to run for 60 sec and then send the SIGINT signal to subprocess and write the output in file.
Start the subprocess, redirect its output to a file:
with open('output', 'wb', 0) as output_file:
p = subprocess.Popen("strace -c ./server".split(),
stdout=output_file, stderr=subprocess.STDOUT,
cwd="/root/Desktop/",
close_fds=True)
Send SIGINT in a minute:
class Alarm(Exception):
pass
def alarm_handler(signum, frame):
raise Alarm
# set signal handler
signal.signal(signal.SIGALRM, alarm_handler)
signal.alarm(60) # produce SIGALRM in a minute
try:
p.wait() # wait for the child process to finish
signal.alarm(0) # cancel alarm
except Alarm:
p.send_signal(signal.SIGINT)
p.wait()

Popen stdout reading pipe, deadlock using sleep

Well, I have two scripts. The a.py which prints the output of the b.py script as follows:
#a.py
from subprocess import Popen, PIPE, STDOUT
p = Popen(['/Users/damian/Desktop/b.py'], shell=False, stdout=PIPE, stderr=STDOUT)
while p.poll() is None:
print p.stdout.readline()
#b.py
#!/usr/bin/env python
import time
while 1:
print 'some output'
#time.sleep(1)
This works.But,
Why do my scripts deadlock when I uncomment the time.sleep() line?
Your output is probably buffered. Add a .flush() for stdout to clear it:
import sys
import time
while 1:
print 'someoutput'
sys.stdout.flush()
time.sleep(1)
If you add -u to the call in a.py (make the output unbuffered) then you don't need to modify b.py script:
import sys
from subprocess import Popen, PIPE, STDOUT
p = Popen([sys.executable, '-u', '/Users/damian/Desktop/b.py'],
stdout=PIPE, stderr=STDOUT, close_fds=True)
for line in iter(p.stdout.readline, ''):
print line,
p.stdout.close()
if p.wait() != 0:
raise RuntimeError("%r failed, exit status: %d" % (cmd, p.returncode))
See more ways to get output from a subprocess.

Python subprocess in parallel

I want to run many processes in parallel with ability to take stdout in any time. How should I do it? Do I need to run thread for each subprocess.Popen() call, a what?
You can do it in a single thread.
Suppose you have a script that prints lines at random times:
#!/usr/bin/env python
#file: child.py
import os
import random
import sys
import time
for i in range(10):
print("%2d %s %s" % (int(sys.argv[1]), os.getpid(), i))
sys.stdout.flush()
time.sleep(random.random())
And you'd like to collect the output as soon as it becomes available, you could use select on POSIX systems as #zigg suggested:
#!/usr/bin/env python
from __future__ import print_function
from select import select
from subprocess import Popen, PIPE
# start several subprocesses
processes = [Popen(['./child.py', str(i)], stdout=PIPE,
bufsize=1, close_fds=True,
universal_newlines=True)
for i in range(5)]
# read output
timeout = 0.1 # seconds
while processes:
# remove finished processes from the list (O(N**2))
for p in processes[:]:
if p.poll() is not None: # process ended
print(p.stdout.read(), end='') # read the rest
p.stdout.close()
processes.remove(p)
# wait until there is something to read
rlist = select([p.stdout for p in processes], [],[], timeout)[0]
# read a line from each process that has output ready
for f in rlist:
print(f.readline(), end='') #NOTE: it can block
A more portable solution (that should work on Windows, Linux, OSX) can use reader threads for each process, see Non-blocking read on a subprocess.PIPE in python.
Here's os.pipe()-based solution that works on Unix and Windows:
#!/usr/bin/env python
from __future__ import print_function
import io
import os
import sys
from subprocess import Popen
ON_POSIX = 'posix' in sys.builtin_module_names
# create a pipe to get data
input_fd, output_fd = os.pipe()
# start several subprocesses
processes = [Popen([sys.executable, 'child.py', str(i)], stdout=output_fd,
close_fds=ON_POSIX) # close input_fd in children
for i in range(5)]
os.close(output_fd) # close unused end of the pipe
# read output line by line as soon as it is available
with io.open(input_fd, 'r', buffering=1) as file:
for line in file:
print(line, end='')
#
for p in processes:
p.wait()
You can also collect stdout from multiple subprocesses concurrently using twisted:
#!/usr/bin/env python
import sys
from twisted.internet import protocol, reactor
class ProcessProtocol(protocol.ProcessProtocol):
def outReceived(self, data):
print data, # received chunk of stdout from child
def processEnded(self, status):
global nprocesses
nprocesses -= 1
if nprocesses == 0: # all processes ended
reactor.stop()
# start subprocesses
nprocesses = 5
for _ in xrange(nprocesses):
reactor.spawnProcess(ProcessProtocol(), sys.executable,
args=[sys.executable, 'child.py'],
usePTY=True) # can change how child buffers stdout
reactor.run()
See Using Processes in Twisted.
You don't need to run a thread for each process. You can peek at the stdout streams for each process without blocking on them, and only read from them if they have data available to read.
You do have to be careful not to accidentally block on them, though, if you're not intending to.
You can wait for process.poll() to finish, and run other stuff concurrently:
import time
import sys
from subprocess import Popen, PIPE
def ex1() -> None:
command = 'sleep 2.1 && echo "happy friday"'
proc = Popen(command, shell=True, stderr=PIPE, stdout=PIPE)
while proc.poll() is None:
# do stuff here
print('waiting')
time.sleep(0.05)
out, _err = proc.communicate()
print(out, file=sys.stderr)
sys.stderr.flush()
assert proc.poll() == 0
ex1()

How to replicate tee behavior in Python when using subprocess?

I'm looking for a Python solution that will allow me to save the output of a command in a file without hiding it from the console.
FYI: I'm asking about tee (as the Unix command line utility) and not the function with the same name from Python intertools module.
Details
Python solution (not calling tee, it is not available under Windows)
I do not need to provide any input to stdin for called process
I have no control over the called program. All I know is that it will output something to stdout and stderr and return with an exit code.
To work when calling external programs (subprocess)
To work for both stderr and stdout
Being able to differentiate between stdout and stderr because I may want to display only one of the to the console or I could try to output stderr using a different color - this means that stderr = subprocess.STDOUT will not work.
Live output (progressive) - the process can run for a long time, and I'm not able to wait for it to finish.
Python 3 compatible code (important)
References
Here are some incomplete solutions I found so far:
http://devlishgenius.blogspot.com/2008/10/logging-in-real-time-in-python.html (mkfifo works only on Unix)
http://blog.kagesenshi.org/2008/02/teeing-python-subprocesspopen-output.html (doesn't work at all)
Diagram http://blog.i18n.ro/wp-content/uploads/2010/06/Drawing_tee_py.png
Current code (second try)
#!/usr/bin/python
from __future__ import print_function
import sys, os, time, subprocess, io, threading
cmd = "python -E test_output.py"
from threading import Thread
class StreamThread ( Thread ):
def __init__(self, buffer):
Thread.__init__(self)
self.buffer = buffer
def run ( self ):
while 1:
line = self.buffer.readline()
print(line,end="")
sys.stdout.flush()
if line == '':
break
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdoutThread = StreamThread(io.TextIOWrapper(proc.stdout))
stderrThread = StreamThread(io.TextIOWrapper(proc.stderr))
stdoutThread.start()
stderrThread.start()
proc.communicate()
stdoutThread.join()
stderrThread.join()
print("--done--")
#### test_output.py ####
#!/usr/bin/python
from __future__ import print_function
import sys, os, time
for i in range(0, 10):
if i%2:
print("stderr %s" % i, file=sys.stderr)
else:
print("stdout %s" % i, file=sys.stdout)
time.sleep(0.1)
Real output
stderr 1
stdout 0
stderr 3
stdout 2
stderr 5
stdout 4
stderr 7
stdout 6
stderr 9
stdout 8
--done--
Expected output was to have the lines ordered. Remark, modifying the Popen to use only one PIPE is not allowed because in the real life I will want to do different things with stderr and stdout.
Also even in the second case I was not able to obtain real-time like out, in fact all the results were received when the process finished. By default, Popen should use no buffers (bufsize=0).
I see that this is a rather old post but just in case someone is still searching for a way to do this:
proc = subprocess.Popen(["ping", "localhost"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
with open("logfile.txt", "w") as log_file:
while proc.poll() is None:
line = proc.stderr.readline()
if line:
print "err: " + line.strip()
log_file.write(line)
line = proc.stdout.readline()
if line:
print "out: " + line.strip()
log_file.write(line)
If requiring python 3.6 isn't an issue there is now a way of doing this using asyncio. This method allows you to capture stdout and stderr separately but still have both stream to the tty without using threads. Here's a rough outline:
class RunOutput:
def __init__(self, returncode, stdout, stderr):
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
async def _read_stream(stream, callback):
while True:
line = await stream.readline()
if line:
callback(line)
else:
break
async def _stream_subprocess(cmd, stdin=None, quiet=False, echo=False) -> RunOutput:
if isWindows():
platform_settings = {"env": os.environ}
else:
platform_settings = {"executable": "/bin/bash"}
if echo:
print(cmd)
p = await asyncio.create_subprocess_shell(
cmd,
stdin=stdin,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
**platform_settings
)
out = []
err = []
def tee(line, sink, pipe, label=""):
line = line.decode("utf-8").rstrip()
sink.append(line)
if not quiet:
print(label, line, file=pipe)
await asyncio.wait(
[
_read_stream(p.stdout, lambda l: tee(l, out, sys.stdout)),
_read_stream(p.stderr, lambda l: tee(l, err, sys.stderr, label="ERR:")),
]
)
return RunOutput(await p.wait(), out, err)
def run(cmd, stdin=None, quiet=False, echo=False) -> RunOutput:
loop = asyncio.get_event_loop()
result = loop.run_until_complete(
_stream_subprocess(cmd, stdin=stdin, quiet=quiet, echo=echo)
)
return result
The code above was based on this blog post: https://kevinmccarthy.org/2016/07/25/streaming-subprocess-stdin-and-stdout-with-asyncio-in-python/
This is a straightforward port of tee(1) to Python.
import sys
sinks = sys.argv[1:]
sinks = [open(sink, "w") for sink in sinks]
sinks.append(sys.stderr)
while True:
input = sys.stdin.read(1024)
if input:
for sink in sinks:
sink.write(input)
else:
break
I'm running on Linux right now but this ought to work on most platforms.
Now for the subprocess part, I don't know how you want to 'wire' the subprocess's stdin, stdout and stderr to your stdin, stdout, stderr and file sinks, but I know you can do this:
import subprocess
callee = subprocess.Popen(
["python", "-i"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
Now you can access callee.stdin, callee.stdout and callee.stderr like normal files, enabling the above "solution" to work. If you want to get the callee.returncode, you'll need to make an extra call to callee.poll().
Be careful with writing to callee.stdin: if the process has exited when you do that, an error may be rised (on Linux, I get IOError: [Errno 32] Broken pipe).
This is how it can be done
import sys
from subprocess import Popen, PIPE
with open('log.log', 'w') as log:
proc = Popen(["ping", "google.com"], stdout=PIPE, encoding='utf-8')
while proc.poll() is None:
text = proc.stdout.readline()
log.write(text)
sys.stdout.write(text)
If you don't want to interact with the process you can use the subprocess module just fine.
Example:
tester.py
import os
import sys
for file in os.listdir('.'):
print file
sys.stderr.write("Oh noes, a shrubbery!")
sys.stderr.flush()
sys.stderr.close()
testing.py
import subprocess
p = subprocess.Popen(['python', 'tester.py'], stdout=subprocess.PIPE,
stdin=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
print stdout, stderr
In your situation you can simply write stdout/stderr to a file first. You can send arguments to your process with communicate as well, though I wasn't able to figure out how to continually interact with the subprocess.
On Linux, if you really need something like the tee(2) syscall, you can get it like this:
import os
import ctypes
ld = ctypes.CDLL(None, use_errno=True)
SPLICE_F_NONBLOCK = 0x02
def tee(fd_in, fd_out, length, flags=SPLICE_F_NONBLOCK):
result = ld.tee(
ctypes.c_int(fd_in),
ctypes.c_int(fd_out),
ctypes.c_size_t(length),
ctypes.c_uint(flags),
)
if result == -1:
errno = ctypes.get_errno()
raise OSError(errno, os.strerror(errno))
return result
To use this, you probably want to use Python 3.10 and something with os.splice (or use ctypes in the same way to get splice). See the tee(2) man page for an example.
My solution isn't elegant, but it works.
You can use powershell to gain access to "tee" under WinOS.
import subprocess
import sys
cmd = ['powershell', 'ping', 'google.com', '|', 'tee', '-a', 'log.txt']
if 'darwin' in sys.platform:
cmd.remove('powershell')
p = subprocess.Popen(cmd)
p.wait()

Categories