After some error and trial I came up with the following solution (popen.py):
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import pty
import sys
from subprocess import Popen
from shlex import join
import errno
master_fd, slave_fd = pty.openpty()
cmd = join(sys.argv[1:])
print(">>", cmd)
try:
p = Popen(
cmd,
shell=True,
stdout=slave_fd,
stderr=slave_fd,
stdin=slave_fd,
close_fds=True,
universal_newlines=True,
)
os.close(slave_fd)
while p.returncode is None:
buffer = os.read(master_fd, 512)
if buffer:
os.write(1, buffer)
else:
break
except OSError as err:
if err.errno != errno.EIO:
raise
except KeyboardInterrupt:
print(f"\n## Err: Terminating the PID: {p.pid}")
p.terminate()
This works well in most of the cases:
> ./popen.py date
>> date
Wed 13 May 19:10:54 BST 2020
> ./popen.py date +'%F_%T'
>> date +%F_%T
2020-05-13_19:10:56
> ./popen.py bash -c 'while :; do echo -n .; sleep .5; done;'
>> bash -c 'while :; do echo -n .; sleep .5; done;'
.......^C
## Err: Terminating the PID: 840102
However it seems that my script is not capable to read the stdin:
> ./popen.py bash -c 'read -p "Enter something: " x; echo $x'
>> bash -c 'read -p "Enter something: " x; echo $x'
Enter something: bla bla
Come on... read it!!!
^C
## Err: Terminating the PID: 841583
> ./popen.py docker exec -it 9ab85463e3c1 sh
>> docker exec -it 9ab85463e3c1 sh
/opt/work_dir # ^[[20;19R
sfsdf
sdfsdf
^C
## Err: Terminating the PID: 847172
I've also tried to skip the os.close(slave_fd) step, but with exactly the same results :-/
My goal is to replicate bash script similar to the following (bash.sh):
#! /bin/bash --
ACT="${1:?## Err: Sorry, what should I do?}"
DID="${2:?## Err: Oh please.. Where\'s ID?}"
CMD=( docker exec -it )
case "${ACT}" in
(run)
shift 2
echo ">> ${CMD[#]}" "${DID}" "$#"
"${CMD[#]}" "${DID}" "$#"
;;
(*)
echo "## Err: Something went wrong..."
exit 1
;;
esac
Example:
> ./bash.sh run 9ab85463e3c1 sh
>> docker exec -it 9ab85463e3c1 sh
/opt/work_dir # date
Wed May 13 19:08:05 UTC 2020
/opt/work_dir # ^C
> ./bash.sh run 9ab85463e3c1 date +"%F_%T"
>> docker exec -it 9ab85463e3c1 date +%F_%T
2020-05-13_19:35:09
For my use case I eventually came up with the following script (wrapper.py):
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import pty
import fire
from shlex import split
from os.path import realpath, expanduser
def proc(cmd, cd=None):
"""Spawn a process, and connect its controlling terminal with the
current process's standard io."""
print("## PID: {}".format(os.getpid()))
def m_read(fd):
"""Master read function."""
return os.read(fd, 1024)
def i_read(fd):
"""Standard input read function."""
return os.read(fd, 1024)
cmd = cmd if isinstance(cmd, (list, tuple)) else split(cmd)
try:
cwd = os.getcwd()
if cd is not None:
print("## Changing directory: {}".format(realpath(cd)))
os.chdir(expanduser(os.fsdecode(cd)))
ex_msg = "\n## Exit Status Indicator: {}"
print(ex_msg.format(pty.spawn(cmd, m_read, i_read)))
except Exception as err:
print("## Err: {}".format(str(err)))
finally:
os.chdir(cwd)
if __name__ == "__main__":
fire.Fire({"run": proc})
It now can be used as a regular wrapper script:
> ./wrapper.py run -cd .. 'docker exec -it 9ab85463e3c1 sh'
## PID: 1679972
## Changing directory: /home
/opt/work_dir # date
Sat May 16 15:18:46 UTC 2020
/opt/work_dir # cat /etc/os-release
NAME="Alpine Linux"
ID=alpine
VERSION_ID=3.11.6
PRETTY_NAME="Alpine Linux v3.11"
HOME_URL="https://alpinelinux.org/"
BUG_REPORT_URL="https://bugs.alpinelinux.org/"
/opt/work_dir #
## Exit Status Indicator: 0
I hope that someone will find this helpful.
Related
If I run echo a; echo b in bash the result will be that both commands are run. However if I use subprocess then the first command is run, printing out the whole of the rest of the line.
The code below echos a; echo b instead of a b, how do I get it to run both commands?
import subprocess, shlex
def subprocess_cmd(command):
process = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE)
proc_stdout = process.communicate()[0].strip()
print proc_stdout
subprocess_cmd("echo a; echo b")
You have to use shell=True in subprocess and no shlex.split:
import subprocess
command = "echo a; echo b"
ret = subprocess.run(command, capture_output=True, shell=True)
# before Python 3.7:
# ret = subprocess.run(command, stdout=subprocess.PIPE, shell=True)
print(ret.stdout.decode())
returns:
a
b
I just stumbled on a situation where I needed to run a bunch of lines of bash code (not separated with semicolons) from within python. In this scenario the proposed solutions do not help. One approach would be to save a file and then run it with Popen, but it wasn't possible in my situation.
What I ended up doing is something like:
commands = '''
echo "a"
echo "b"
echo "c"
echo "d"
'''
process = subprocess.Popen('/bin/bash', stdin=subprocess.PIPE, stdout=subprocess.PIPE)
out, err = process.communicate(commands)
print out
So I first create the child bash process and after I tell it what to execute. This approach removes the limitations of passing the command directly to the Popen constructor.
Join commands with "&&".
os.system('echo a > outputa.txt && echo b > outputb.txt')
If you're only running the commands in one shot then you can just use subprocess.check_output convenience function:
def subprocess_cmd(command):
output = subprocess.check_output(command, shell=True)
print output
>>> command = "echo a; echo b"
>>> shlex.split(command);
['echo', 'a; echo', 'b']
so, the problem is shlex module do not handle ";"
Got errors like when I used capture_output=True
TypeError: __init__() got an unexpected keyword argument 'capture_output'
After made changes like as below and its works fine
import subprocess
command = '''ls'''
result = subprocess.run(command, stdout=subprocess.PIPE,shell=True)
print(result.stdout.splitlines())
import subprocess
cmd = "vsish -e ls /vmkModules/lsom/disks/ | cut -d '/' -f 1 | while read diskID ; do echo $diskID; vsish -e cat /vmkModules/lsom/disks/$diskID/virstoStats | grep -iE 'Delete pending |trims currently queued' ; echo '====================' ;done ;"
def subprocess_cmd(command):
process = subprocess.Popen(command,stdout=subprocess.PIPE, shell=True)
proc_stdout = process.communicate()[0].strip()
for line in proc_stdout.decode().split('\n'):
print (line)
subprocess_cmd(cmd)
I'm trying to run terminal commands from a web python script.
I tried many things but none seens to work... Such as: add 'www-data' to sudoers, use full path to bin, run command with sudo word, use 3 different system calls (os.spawnl and subprocess) and none of that works.
Read only commands like "ps aux" that only output information works, but a simple echo to file don't. It seens like need permitions to do so. What more can i try?
Example from output: Unexpected error: (, CalledProcessError(2, '/bin/echo hello > /var/www/html/cgi-bin/test2.htm'), )
On that example the /var/www/html/cgi-bin/ folder is owned by "www-data", same user as server config.
<!-- language: python -->
#!/usr/bin/python3
# coding=utf-8
import os
import sys
import subprocess
import cgi
import subprocess
SCRIPT_PATH = "/var/www/html/scripts/aqi3.py"
DATA_FILE = "/var/www/html/assets/aqi.json"
KILL_PROCESS = "ps aux | grep " + SCRIPT_PATH + " | grep -v \"grep\" | awk '{print $2}' | xargs kill -9"
START_PROCESS = "/usr/bin/python3 " + SCRIPT_PATH + " start > /dev/null 2>&1 &"
STOP_PROCESS = "/usr/bin/python3 " + SCRIPT_PATH + " stop > /dev/null 2>&1 &"
# Don't edit
def killProcess():
os.spawnl(os.P_NOWAIT, KILL_PROCESS)
try:
os.spawnl(os.P_NOWAIT, "/bin/echo hello > /var/www/html/cgi-bin/test2.htm")
proc = subprocess.Popen(['sudo', 'echo', 'hello > /var/www/html/cgi-bin/test3.htm'])
print(subprocess.check_output("/bin/echo hello > /var/www/html/cgi-bin/test2.htm", shell=True, timeout = 10))
except:
print("Unexpected error:", sys.exc_info())
print(KILL_PROCESS)
def stopSensor():
killProcess()
os.spawnl(os.P_NOWAIT, STOP_PROCESS)
def restartProcess():
killProcess()
print(START_PROCESS)
print(os.spawnl(os.P_NOWAIT, START_PROCESS))
def main():
arguments = cgi.FieldStorage()
for key in arguments.keys():
value = arguments[key].value
if key == 'action':
if value == 'stop':
stopSensor()
print("ok")
return
elif value == 'start' or value == 'restart':
restartProcess()
print("ok")
return
elif value == 'resetdata':
try:
with open(DATA_FILE, 'w') as outfile:
outfile.write('[]')
except:
print("Unexpected error:", sys.exc_info())
print("ok")
return
print("?")
main()
I was able to solve my problem with: http://alexanderhoughton.co.uk/blog/lighttpd-changing-default-user-raspberry-pi/
I am trying to execute a tshark command to get some output for a validation and using subprocess.Popen to get this work done, but i am seeing sometimes subprocess.Popen is not able to execute the command. Below is a small function of my code:
import subprocess
import logging
def fetch_avps(request_name, logger, tcpdump, port, session_id):
out_list = []
if request_name == 'CCR':
com_sessn_filter = """tshark -r "%s" -odiameter.tcp.ports:"%s" -R 'diameter.cmd.code == 272 and diameter.flags.request==1 and !tcp.analysis.retransmission and diameter.flags.T == 0' -Tpdml -Tfields -ediameter.Session-Id -ediameter.CC-Request-Type -ediameter.User-Name -ediameter.Subscription-Id-Data -ediameter.Value-Digits | grep "%s" | cut -f 1-6 --output-delimiter=':'""" %(tcpdump, port, session_id)
elif request_name == 'CCA':
com_sessn_filter = """tshark -r "%s" -odiameter.tcp.ports:"%s" -R 'diameter.cmd.code == 272 and diameter.flags.request==0 and !tcp.analysis.retransmission and diameter.flags.T == 0' -Tpdml -Tfields -ediameter.Session-Id -ediameter.CC-Request-Type -ediameter.Result-Code -ediameter.Validity-Time -ediameter.Value-Digits -ediameter.Unit-Quota-Threshold | grep "%s" | cut -f 1-6 --output-delimiter=':'""" %(tcpdump, port, session_id)
p = subprocess.Popen(com_sessn_filter, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out = p.stdout.read()
command_out_list = (out.strip().split("\n"))
sys.stdout.flush()
for i in range(len(command_out_list)):
out_list.append(command_out_list[i].split(":"))
if out_list[0][0] == '':
logger.error("Failed to execute Tshark command")
logger.debug("Failed to execute Tshark command \"%s\" for Session-Id \"%s\"" %(com_sessn_filter, session_id))
return 0
For example in the above code if I have 20 Sessions in a loop then subprocess.Popen might fail to execute around 12-13 times. Any help will be very useful.
Below is the stderr i am getting whenever it fails to execute.
(process:11306): GLib-ERROR **: /build/buildd/glib2.0-2.32.4/./glib/gmem.c:165: failed to allocate 4048572208 bytes Trace/breakpoint trap (core dumped)
Why is it that the subprocess pid (Popen.pid) has different value from that the ps command returns?
I've noticed this when ps called both from inside python (with subprocess.call()) and from another terminal.
Here's a simple python file to test:
#!/usr/bin/python3
'''
Test subprocess termination
'''
import subprocess
command = 'cat'
#keep pipes so that cat doesn't complain
proc = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
shell=True)
print('pid = %d' % proc.pid)
subprocess.call("ps -A | grep -w %s" % command,
shell=True)
proc.terminate()
proc.wait() # make sure its dead before exiting pytyhon
Usually the pid reported by ps is 1 or 2 more than that reported by Popen.pid.
Because the command is run with shell=True, the pid returned by subprocess is that of the shell process used to run the command.
I have a script which runs inside a while loop and monitors a mysql data source every 2 seconds. If I run if from the command line, it runs and works fine. But If I attach it to a daemon, it throws an error saying "MySQL has gone" or something similar. I checked and found MySQL up and running. I could even execute queries from other tools.
I badly need help. I am running Ubuntu 10.04.
Error Code
Traceback (most recent call last):
File "/home/masnun/Desktop/daemon/daemon.py", line 67, in <module>
main()
File "/home/masnun/Desktop/daemon/daemon.py", line 35, in main
USERPROG()
File "/home/masnun/Desktop/daemon/mymain.py", line 19, in main
cursor.execute("select * from hits_logs where id > '" + str(last) + "'")
File "/usr/lib/pymodules/python2.6/MySQLdb/cursors.py", line 166, in execute
self.errorhandler(self, exc, value)
File "/usr/lib/pymodules/python2.6/MySQLdb/connections.py", line 35, in defau$
raise errorclass, errorvalue
_mysql_exceptions.OperationalError: (2006, 'MySQL server has gone away')
File: daemon
#! /bin/sh
# example python daemon starter script
# based on skeleton from Debian GNU/Linux
# cliechti#gmx.net
# place the daemon scripts in a folder accessible by root. /usr/local/sbin is a good idea
PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin
DAEMON=/home/masnun/Desktop/daemon/daemon.py
NAME=pydaemon
DESC="Example daemon"
test -f $DAEMON || exit 0
set -e
case "$1" in
start)
echo -n "Starting $DESC: "
start-stop-daemon --start --quiet --pidfile /var/run/$NAME.pid \
--exec $DAEMON
echo "$NAME."
;;
stop)
echo -n "Stopping $DESC: "
start-stop-daemon --stop --quiet --pidfile /var/run/$NAME.pid
# \ --exec $DAEMON
echo "$NAME."
;;
#reload)
#
# If the daemon can reload its config files on the fly
# for example by sending it SIGHUP, do it here.
#
# If the daemon responds to changes in its config file
# directly anyway, make this a do-nothing entry.
#
# echo "Reloading $DESC configuration files."
# start-stop-daemon --stop --signal 1 --quiet --pidfile \
# /var/run/$NAME.pid --exec $DAEMON
#;;
restart|force-reload)
#
# If the "reload" option is implemented, move the "force-reload"
# option to the "reload" entry above. If not, "force-reload" is
# just the same as "restart".
#
echo -n "Restarting $DESC: "
start-stop-daemon --stop --quiet --pidfile \
/var/run/$NAME.pid
# --exec $DAEMON
sleep 1
start-stop-daemon --start --quiet --pidfile \
/var/run/$NAME.pid --exec $DAEMON
echo "$NAME."
;;
*)
N=/etc/init.d/$NAME
# echo "Usage: $N {start|stop|restart|reload|force-reload}" >&2
echo "Usage: $N {start|stop|restart|force-reload}" >&2
exit 1
;;
esac
exit 0
File: daemon.py
#!/usr/bin/env python
###########################################################################
# configure these paths:
LOGFILE = '/var/log/pydaemon.log'
PIDFILE = '/var/run/pydaemon.pid'
# and let USERPROG be the main function of your project
import mymain
USERPROG = mymain.main
###########################################################################
import sys, os
class Log:
"""file like for writes with auto flush after each write
to ensure that everything is logged, even during an
unexpected exit."""
def __init__(self, f):
self.f = f
def write(self, s):
self.f.write(s)
self.f.flush()
def main():
#change to data directory if needed
os.chdir("/home/masnun/Desktop/daemon")
#redirect outputs to a logfile
sys.stdout = sys.stderr = Log(open(LOGFILE, 'a+'))
#ensure the that the daemon runs a normal user
os.setegid(1000) #set group first "pydaemon"
os.seteuid(1000) #set user "pydaemon"
#start the user program here:
USERPROG()
if __name__ == "__main__":
# do the UNIX double-fork magic, see Stevens' "Advanced
# Programming in the UNIX Environment" for details (ISBN 0201563177)
try:
pid = os.fork()
if pid > 0:
# exit first parent
sys.exit(0)
except OSError, e:
print >>sys.stderr, "fork #1 failed: %d (%s)" % (e.errno, e.strerror)
sys.exit(1)
# decouple from parent environment
os.chdir("/") #don't prevent unmounting....
os.setsid()
os.umask(0)
# do second fork
try:
pid = os.fork()
if pid > 0:
# exit from second parent, print eventual PID before
#print "Daemon PID %d" % pid
open(PIDFILE,'w').write("%d"%pid)
sys.exit(0)
except OSError, e:
print >>sys.stderr, "fork #2 failed: %d (%s)" % (e.errno, e.strerror)
sys.exit(1)
# start the daemon main loop
main()
File: mymain.py
import MySQLdb
from ProxyChecker import ProxyChecker
from time import sleep
config = {"host":"localhost","username":"root","password":"masnun","database":"webtracc_db1"}
connection = MySQLdb.connect(config['host'],config['username'],config['password'],config['database'])
cursor = connection.cursor()
def main():
while True:
f = open("last","r")
last = f.read().strip()
f.close()
if last == '': last = 0;
last = int(last)
cursor.execute("select * from hits_logs where id > '" + str(last) + "'")
row = cursor.fetchall()
for x in row:
pc = ProxyChecker( x[2] )
pc.start()
last = x[0]
f = open("last","w")
f.write(str(last))
f.close()
sleep(2)
if __name__ == "__main__":
main()
File:
ProxyChecker.py
#! /usr/bin/env python
from threading import Thread
from CheckProxy import CheckProxy
class ProxyChecker(Thread):
def __init__(self, data):
self.data = data
Thread.__init__(self)
def run(self):
pc = CheckProxy()
pc.check(self.data)
File: CheckProxy.py
#! /usr/bin/env python
import MySQLdb
import socket
class CheckProxy:
def __init__(self):
self.config = {"host":"localhost","username":"root","password":"masnun","database":"webtracc_db1"}
self.portList = [80]
def check(self,host):
connection = MySQLdb.connect(self.config['host'],self.config['username'],self.config['password'],self.config['database'])
cursor = connection.cursor()
proxy = False
try:
for x in self.portList:
sock = socket.socket()
sock.connect((host,x))
#print "connected to: " + str (x)
sock.close()
cursor.execute("select count(*) from list_entries where list='1' and ip='"+ host + "' ")
data = cursor.fetchall()
#print data[0][0]
if data[0][0] < 1:
print 'ok'
proxy = True
except socket.error, e:
#print e
if proxy:
cursor.execute("insert into list_entries (ip,list) values ('"+ host.strip() +"','1') ")
else:
cursor.execute("insert into list_entries (ip,list) values ('"+ host.strip() +"','2') ")
if __name__ == "__main__":
print "Direct access not allowed!"
I haven't worked with Python, but it almost seems you are making a database connection, then forking. The other way around should work: fork at will, then connect in remaining process, possibly in your mymain.py:main() method.