import serial
import string
from time import sleep
from subprocess import call, check_call, CalledProcessError
import logging
import random
import signal
import os
LOG_FILENAME = "/tmp/dfu.log"
LOG_FD = open(LOG_FILENAME, "w")
logging.basicConfig(filename=LOG_FILENAME, level=logging.DEBUG)
dfu_image_list = ['/tmp/sample.dfu']
while True:
try:
for test_file in dfu_image_list:
logging.info("\n==================\nbegin dfu download test")
check_call('sudo dfu-util -vD /tmp/sample.dfu', shell=True,
stdout=LOG_FD, stderr=LOG_FD)
logging.info("Download completed successfully!")
sleep(5)
except CalledProcessError as e:
msg = "dfu-util failed with return code :%s \n\nMessage:%s" %
(e.returncode, e.message)
logging.warning(msg)
logging.warning("USB device likely needs time to re-enumerate,
waiting 10 seconds before restarting")
sleep(10)
except OSError:
logging.error("dfu-util executable not found!")
exit(1)
Execution of above python script provides logs into /tmp/dfu.log.
However logs into log file are from the function check_call.
Expected behavior is main threads logs like
logging.info("\n==================\nbegin dfu download test")
logs of function check_call
logging.info("Download completed successfully!").
However only logs of function check_call gets reflected and main threads logs like
begin dfu download test
Download completed successfully!
are not getting reflected into log file.
Remember that the logging module does some buffering, which means writing a log using log.something() doesn't necessarily means that this log will be written to the file.
Also, you're opening a file twice, and writing to it from different places. Usually that's a bad idea, even if Linux is preemptive and you flush the log and could possible work it still a bad idea.
What about you communicate() with the process instead of check_call() and then you log the stdout or stderr as you wish. For example:
for image in dfu_image_list:
logging.info('=' * 10)
logging.info('Begin dfu download using {}'.format(image))
process = Popen(['sudo', 'dfu-util', '-vD', image], stdout=PIPE, stderr=PIPE)
stdout, stderr = process.communicate()
logging.info(stdout)
if stderr:
logging.warning(stderr)
logging.info('Download completed successfully!')
By the way, your loop logic is flawed. As any error will restart the loop for dfu_image_list images.
I think this is more what you want to do:
from sys import exit
from time import sleep
from subprocess import Popen, PIPE, CalledProcessError
import logging
LOG_FILENAME = "/tmp/dfu.log"
ATTEMPTS = 3
logging.basicConfig(filename=LOG_FILENAME, level=logging.DEBUG)
def download(where):
logging.info('=' * 10)
logging.info('Begin dfu download to {}'.format(where))
for attempt in range(ATTEMPTS):
logging.info('Attempt #{}...'.format(attempt + 1))
try:
process = Popen(
['sudo', 'dfu-util', '-vD', where],
stdout=PIPE, stderr=PIPE
)
stdout, stderr = process.communicate()
logging.info(stdout)
if stderr:
logging.warning(stderr)
logging.info('Download completed successfully!')
return True
except CalledProcessError as e:
logging.warning(
'dfu-util failed with return code {}'.format(e.returncode)
)
logging.warning(
'Message:\n{}'.format(e.message)
)
logging.warning(
'USB device likely needs time to re-enumerate, '
'waiting 10 seconds before restarting...'
)
sleep(10)
continue
except OSError:
logging.critical('dfu-util executable not found!')
return False
if __name__ == '__main__':
if not download('/tmp/sample.dfu'):
exit(1)
Related
I have the following snippet of code:
def terminal_command(command, timeout=5*60):
"""Executes a terminal command."""
cmd = command.split(" ")
timer = time.strftime('%Hh %Mm %Ss', time.gmtime(timeout))
proc = None
try:
proc = subprocess.run(cmd, timeout=timeout, capture_output=True)
except subprocess.TimeoutExpired:
print("Timeout")
proc.terminate()
reason = "timeout"
stdout = b'error'
stderr = b'error'
if proc != None:
# Finished!
stdout = proc.stdout
stderr = proc.stderr
reason = "finished"
return stdout.decode('utf-8').strip(), stderr.decode('utf-8').strip(), reason
I ran a command which takes significantly longer than 5 minutes. In this instance, subprocess.run raises an exception, but proc is now None so I cannot use proc.terminate(). When the code terminates, as has been well documented elsewhere, the child process continues to run. I would like to terminate it.
Is there any way to terminate a subprocess on a TimeoutExpired, whilst redirecting output? I am on a Linux system so am open to requiring Popen but ideally I would like this to be cross-platform.
Four months later: I got it.
The core issue appears to be that using os.kill with signal.SIGKILL doesn't properly kill the process.
Modifying my code to the following works.
def custom_terminal_command(self, command, timeout=5*60, cwd=None):
with subprocess.Popen(command.split(" "), preexec_fn=os.setsid) as process:
wd = os.getcwd()
try:
if cwd is not None:
# Man fuck linux
for d in cwd.split("/"):
os.chdir(d)
stdout, stderr = process.communicate(None, timeout=timeout)
except subprocess.TimeoutExpired as exc:
import signal
os.killpg(os.getpgid(process.pid), signal.SIGTERM)
try:
import msvcrt
except ModuleNotFoundError:
_mswindows = False
else:
_mswindows = True
if _mswindows:
# Windows accumulates the output in a single blocking
# read() call run on child threads, with the timeout
# being done in a join() on those threads. communicate()
# _after_ kill() is required to collect that and add it
# to the exception.
exc.stdout, exc.stderr = process.communicate()
else:
# POSIX _communicate already populated the output so
# far into the TimeoutExpired exception.
process.wait()
reason = 'timeout'
stdout, stderr = process.communicate()
except: # Including KeyboardInterrupt, communicate handled that.
process.kill()
# We don't call process.wait() as .__exit__ does that for us.
reason = 'other'
stdout, stderr = process.communicate()
raise
else:
reason = 'finished'
finally:
os.chdir(wd)
try:
return stdout.decode('utf-8').strip(), stderr.decode('utf-8').strip(), reason
except AttributeError:
try:
return stdout.strip(), stderr.strip(), reason
except AttributeError:
return stdout, stderr, reason
See the following SO post for a short discussion: How to terminate a python subprocess launched with shell=True
My goal is to implement a Python 3 method that will support running a system command (using subprocess) following a few requirements:
Running long lasting commands
Live logging of both stdout and stderr
Enforcing a timeout to stop the command if it fails to complete on time
In order to support live logging, I have used 2 threads which handles both stdout and stderr outputs.
My challenge is to enforce the timeout on the threads and the subprocess process.
My attempt to implement the timeout using a signal handler, seems to freeze the interpreter as soon as the handler is called.
What's wrong with my implementation ?
Is there any other way to implement my requirements?
Here is my current implementation attempt:
def run_live_output(cmd, timeout=900, **kwargs):
full_output = StringIO()
def log_popen_pipe(p, log_errors=False):
while p.poll() is None:
output = ''
if log_errors:
output = p.stderr.readline()
log.warning(f"{output}")
else:
output = p.stdout.readline()
log.info(f"{output}")
full_output.write(output)
if p.poll():
log.error(f"{cmd}\n{p.stderr.readline()}")
class MyTimeout(Exception):
pass
def handler(signum, frame):
log.info(f"Signal handler called with signal {signum}")
raise MyTimeout
with subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
universal_newlines=True,
**kwargs
) as sp:
with ThreadPoolExecutor(2) as pool:
try:
signal.signal(signal.SIGALRM, handler)
signal.alarm(timeout)
r1 = pool.submit(log_popen_pipe, sp)
r2 = pool.submit(log_popen_pipe, sp, log_errors=True)
r1.result()
r2.result()
except MyTimeout:
log.info(f"Timed out - Killing the threads and process")
pool.shutdown(wait=True)
sp.kill()
except Exception as e:
log.info(f"{e}")
return full_output.getvalue()
Q-1) My attempt to implement the timeout using a signal handler, seems to freeze the interpreter as soon as the handler is called, What's wrong with my implementation ?
A-1) No your signal handler not freezing, There is freezing but not in the signal handler, signal handler is fine. Your main thread blocked (frozen) when you call pool.shutdown(wait=True). Because your subprocess is still running and you do while p.poll() is None: in the log_popen_pipe func. That's why your main thread will not continue until log_popen_pipe finished.
To solve this issue, we need to remove pool.shutdown(wait=True) and then call the sp.terminate(). I suggest you to use sp.terminate() instead sp.kill() because sp.kill() will send SIGKILL signal which is not preferred until you really need it. In addition that, end of the with ThreadPoolExecutor(2) as pool: statement, pool.shutdown(wait=True) will be called and this will not block you if log_popen_pipe func ended.
In your case log_popen_pipe func will finished if subprocess finished when we do sp.terminate().
Q-2) Is there any other way to implement my requirements?
A-2) Yes there is, you can use Timer class from threading library. Timer class will create 1 thread and this thread will wait for timeout seconds and end of the timeout seconds, this created thread will call sp.terminate func
Here is the code:
from io import StringIO
import signal,subprocess
from concurrent.futures import ThreadPoolExecutor
import logging as log
from threading import Timer
log.root.setLevel(log.INFO)
def run_live_output(cmd, timeout=900, **kwargs):
full_output = StringIO()
def log_popen_pipe(p, log_errors=False):
while p.poll() is None:
output = ''
if log_errors:
output = p.stderr.readline()
log.warning(f"{output}")
else:
output = p.stdout.readline()
log.info(f"{output}")
full_output.write(output)
if p.poll()!=None:
log.error(f"subprocess finished, {cmd}\n{p.stdout.readline()}")
with subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
universal_newlines=True,
**kwargs
) as sp:
Timer(timeout,sp.terminate).start()
with ThreadPoolExecutor(2) as pool:
try:
r1 = pool.submit(log_popen_pipe, sp)
r2 = pool.submit(log_popen_pipe, sp, log_errors=True)
r1.result()
r2.result()
except Exception as e:
log.info(f"{e}")
return full_output.getvalue()
run_live_output(["python3","...."],timeout=4)
By the way p.poll() will return the returncode of the terminated subprocess. If you want to get output of successfully terminated subprocess, you need to use if p.poll()==0 0 generally means subprocess successfully terminated
This i what I have so far...
from gpiozero import MotionSensor
import subprocess
import threading
import time
pir = MotionSensor(4)
while True:
pir.wait_for_motion()
print("Start Playing Music")
subprocess.call(['mplayer', '-vo', 'null', '-ao', 'alsa', '-playlist', 'myplaylist', '-shuffle'])
The music playing part works great, but as for the timing, I've tried threading and time, but all seem to do is pause the code for a given amount of time. I want to run the subprocess for a given amount of time, then return to wait on motion. I'm still learning. Thanks for your help.
Python 2.7 - 3.x
Create your subprocess command. I have chosen Popen.
Popen doesn't block, allowing you to interact with the process while it's running, or continue with other things in your Python program. The call to Popen returns a Popen object.
You can read the difference between subprocess.Popen and subprocess.call here
You can use shlex module to split your string command - very comfortable.
After that, you can call your command in the thread. From this moment, you can manage your task called in a thread. There is a simple example, how to do it:
Example of code:
import logging
import shlex
import subprocess
import sys
import threading
logging.basicConfig(filename='log.log',
filemode='a',
format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',
datefmt='%H:%M:%S',
level=logging.INFO)
log = logging.getLogger(__name__)
def exec_cmd(command):
try:
cmd = subprocess.Popen(shlex.split(command), # nosec
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
_thread_command(cmd)
out, err = cmd.communicate()
log.error(err) if err else log.info(out)
except subprocess.CalledProcessError as su_err:
log.error('Calledprocerr: %s', su_err)
except OSError as os_error:
log.error('Could not execute command: %s', os_error)
def _thread_command(task, timeout=5):
"""
Thread. If task is longer than <timeout> - kill.
:param task: task to execute.
"""
task_thread = threading.Thread(target=task.wait)
task_thread.start()
task_thread.join(timeout)
if task_thread.is_alive(): # do whatever you want with your task, for example, kill:
task.kill()
logging.error('Timeout! Executed time is more than: %s', timeout)
sys.exit(1)
if __name__ == '__main__':
exec_cmd('sleep 10') # put your string command here
Tested on Centos:
[kchojnowski#zabbix4-worker1 ~]$ cat log.log
11:31:48,348 root ERROR Timeout! Executed time is more than: 5
I have the following python script that runs.
I want is to run the subprocess to run for 60 sec and then send the SIGINT signal to subprocess and write the output in file.If i use sleep the subprocess doesn't run.
#!/usr/bin/python
import os
import subprocess
PIPE = subprocess.PIPE
import signal
import time
def handler(signum, frame):
pass
signal.signal(signal.SIGALRM, handler)
signal.alarm(60)
command = "strace -c ./server"
os.chdir("/root/Desktop/")
p = subprocess.Popen(command, stdout=PIPE, stderr=PIPE)
time.sleep(60)
p.send_signal(signal.SIGINT)
signal.alarm(0)
print p.communicate()[1]
In Python 3.3 and newer, there is a simpler version of the answer (untested code):
with open('output', 'wb', 0) as output_file:
p = subprocess.Popen("strace -c ./server".split(),
stdout=output_file, stderr=subprocess.STDOUT,
cwd="/root/Desktop/",
close_fds=True)
try:
p.wait(60) # Wait for the child process to finish, or for 60 seconds, which ever comes first.
except subprocess.TimeoutExpired:
p.send_signal(signal.SIGINT)
p.wait() # Wait fro the process to actually exit after receiving the terminate signal.
See also: https://docs.python.org/3/library/subprocess.html#subprocess.Popen.wait
There are several issues:
command should be a list
you should read from p.stdout/p.stderr pipes otherwise the child process may stall if it generates enough output
you should use either time.sleep() or signal.alarm() here, not both.
I want is to run the subprocess to run for 60 sec and then send the SIGINT signal to subprocess and write the output in file.
Start the subprocess, redirect its output to a file:
with open('output', 'wb', 0) as output_file:
p = subprocess.Popen("strace -c ./server".split(),
stdout=output_file, stderr=subprocess.STDOUT,
cwd="/root/Desktop/",
close_fds=True)
Send SIGINT in a minute:
class Alarm(Exception):
pass
def alarm_handler(signum, frame):
raise Alarm
# set signal handler
signal.signal(signal.SIGALRM, alarm_handler)
signal.alarm(60) # produce SIGALRM in a minute
try:
p.wait() # wait for the child process to finish
signal.alarm(0) # cancel alarm
except Alarm:
p.send_signal(signal.SIGINT)
p.wait()
In python 2.7 in windows according to the documentation you can send a CTRL_C_EVENT
(Python 2.7 Subprocess Popen.send_signal documentation).
However when I tried it I did not receive the expected keyboard interrupt in the subprocess.
This is the sample code for for the parent process:
# FILE : parentProcess.py
import subprocess
import time
import signal
CREATE_NEW_PROCESS_GROUP = 512
process = subprocess.Popen(['python', '-u', 'childProcess.py'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True,
creationflags=CREATE_NEW_PROCESS_GROUP)
print "pid = ", process.pid
index = 0
maxLoops = 15
while index < maxLoops:
index += 1
# Send one message every 0.5 seconds
time.sleep(0.5)
# Send data to the subprocess
process.stdin.write('Bar\n')
# Read data from the subprocess
temp = process.stdout.readline()
print temp,
if (index == 10):
# Send Keyboard Interrupt
process.send_signal(signal.CTRL_C_EVENT)
This is the sample code for the child proceess:
# FILE : childProcess.py
import sys
while True:
try:
# Get data from main process
temp = sys.stdin.readline()
# Write data out
print 'Foo ' + temp,
except KeyboardInterrupt:
print "KeyboardInterrupt"
If I run the file parentProcess.py I expect to get "Foo Bar" ten times then a "KeyboardInterrupt" followed by "Foo Bar" 4 times but I get "Foo Bar" 15 times instead.
Is there a way to get the CTRL_C_EVENT to behave as a keyboard interrupt just as SIGINT behaves in Linux?
After doing some reading I found some information that seems to contradic the python documentation regarding CTRL_C_EVENT, in particular it says that
CTRL_C_EVENT
0 Generates a CTRL+C signal. This signal cannot be generated for process groups
The following site provide more inforamtion about creation flags:
Process Creation Flags.
This method of signal handling by subprocesses worked for me on both Linux and Windows 2008, both using Python 2.7.2, but it uses Ctrl-Break instead of Ctrl-C. See the note about process groups and Ctrl-C in http://msdn.microsoft.com/en-us/library/ms683155%28v=vs.85%29.aspx.
catcher.py:
import os
import signal
import sys
import time
def signal_handler(signal, frame):
print 'catcher: signal %d received!' % signal
raise Exception('catcher: i am done')
if hasattr(os.sys, 'winver'):
signal.signal(signal.SIGBREAK, signal_handler)
else:
signal.signal(signal.SIGTERM, signal_handler)
print 'catcher: started'
try:
while(True):
print 'catcher: sleeping...'
time.sleep(1)
except Exception as ex:
print ex
sys.exit(0)
thrower.py:
import signal
import subprocess
import time
import os
args = [
'python',
'catcher.py',
]
print 'thrower: starting catcher'
if hasattr(os.sys, 'winver'):
process = subprocess.Popen(args, creationflags=subprocess.CREATE_NEW_PROCESS_GROUP)
else:
process = subprocess.Popen(args)
print 'thrower: waiting a couple of seconds for catcher to start...'
time.sleep(2)
print 'thrower: sending signal to catch'
if hasattr(os.sys, 'winver'):
os.kill(process.pid, signal.CTRL_BREAK_EVENT)
else:
process.send_signal(signal.SIGTERM)
print 'thrower: i am done'
try with
win32api.GenerateConsoleCtrlEvent(CTRL_C_EVENT, pgroupid)
or
win32api.GenerateConsoleCtrlEvent(CTRL_BREAK_EVENT, pgroupid)
references:
http://docs.activestate.com/activepython/2.5/pywin3/win32process_CREATE_NEW_PROCESS_GROUP.html
http://msdn.microsoft.com/en-us/library/ms683155%28v=vs.85%29.aspx
read info about dwProcessGroupId, the groupid should be the same of the process id