script will not save locally over ssh - python

I am having some issues getting a script to run.
This works perfectly from command line:
ssh root#ip.add.re.ss /usr/sbin/tcpdump -i eth0 -w - | /usr/sbin/tcpdump -r - -w /home/cuckoo/cuckoo/storage/analyses/1/saveit.pcap
However when I use this script:
#!/usr/bin/env python
import sys
import os
import subprocess
cmd = []
remote_cmd = []
local_cmd = []
connect_cmd = []
outfile = None
try:
connect_cmd = str.split(os.environ["RTCPDUMP_CMD"], " ")
except:
connect_cmd = str.split("ssh root#fw", " ")
remote_cmd.extend(str.split("/usr/sbin/tcpdump -w -", " "))
local_cmd.extend(str.split("/usr/sbin/tcpdump -r -", " "))
for argument in xrange(1, len(sys.argv)):
if sys.argv[argument] == "-w":
outfile=sys.argv[argument+1]
sys.argv[argument] = None
sys.argv[argument+1] = None
if sys.argv[argument] == "-i":
remote_cmd.append(sys.argv[argument])
remote_cmd.append(sys.argv[argument+1])
sys.argv[argument] = None
sys.argv[argument+1] = None
if not sys.argv[argument] == None:
if " " in sys.argv[argument]:
local_cmd.append("'" + sys.argv[argument] + "'")
remote_cmd.append("'" + sys.argv[argument] + "'")
else:
local_cmd.append(sys.argv[argument])
remote_cmd.append(sys.argv[argument])
if not outfile == None:
local_cmd.insert(1, "-w")
local_cmd.insert(2, outfile)
cmd.extend(connect_cmd)
cmd.extend(remote_cmd)
cmd.append("|")
cmd.extend(local_cmd)
try:
subprocess.call(cmd)
except KeyboardInterrupt:
exit(0)
It spawns both tcpdump processes on the remote host and the second tcpdump fails to save due to non working path. I added a print cmd at the end and the ssh command being passed to the prompt is exactly the same (when running the script itself, cuckoo passes a ton of options when it calls the script. Also it gets the -w - before the -i eth0, but I tested that and it works from command line as well).
So I am thoroughly stumped, why is the pipe to local not working in the script but it works from prompt?
Oh, and credit for the script belongs to Michael Boman
http://blog.michaelboman.org/2013/02/making-cuckoo-sniff-remotely.html

So I am thoroughly stumped, why is the pipe to local not working in the script but it works from prompt?
Because pipes are handled by the shell, and you're not running a shell.
If you look at the docs, under Replacing Older Functions with the subprocess Module, it explains how to do the same thing shell pipelines do. Here's the example:
output=`dmesg | grep hda`
# becomes
p1 = Popen(["dmesg"], stdout=PIPE)
p2 = Popen(["grep", "hda"], stdin=p1.stdout, stdout=PIPE)
p1.stdout.close() # Allow p1 to receive a SIGPIPE if p2 exits.
output = p2.communicate()[0]
So, in your terms:
cmd.extend(connect_cmd)
cmd.extend(remote_cmd)
try:
remote = subprocess.Popen(cmd, stdout=subprocess.PIPE)
local = subprocess.Popen(local_cmd, stdin=remote.stdout)
remote.stdout.close() # Allow p1 to receive a SIGPIPE if p2 exits.
local.communicate()
except KeyboardInterrupt:
exit(0)

Related

Python script to run multiple processes using subprocess.call and get their pids

I have created a script that is reading the paths of scripts from a config file and then using the subprocess module I am running those paths sequentially.
My script is as follows:
#! /usr/bin/python3
import configparser
import argparse as ap
import subprocess
import sys
import shlex
import os
import signal
def file_reader():
config_dict = {}
configParser = configparser.ConfigParser()
configParser.read('config.ini')
for section_name in configParser.sections():
for (each_key, each_value) in configParser.items(section_name):
config_dict[each_key] = each_value
config_dict = dict(configParser.items(section_name))
reversed_dictionary = dict(map(reversed, configParser.items(section_name)))
list_vals = list(config_dict.values())
list_keys = list(config_dict.keys())
return config_dict, reversed_dictionary
def main():
config_dict, reversed_dictionary = file_reader()
for key,val in config_dict.items():
print(key)
print(config_dict[key])
parser = ap.ArgumentParser()
parser.add_argument('-s', '--start', help='start script', action='store_true')
parser.add_argument('-q', '--stop', help='stop script', action='store_true')
args = parser.parse_args()
if args.start and args.stop:
print("error")
elif args.start:
for k,v in reversed_dictionary.items():
print(k, "=", v )
if(reversed_dictionary[k] == v):
proc = subprocess.call([k], shell=True, close_fds=True)
if proc != 0:
if proc < 0:
print("\nKilled by signal!", -proc)
else:
print("\nReturn code is:", proc)
else:
print("\nSuccess")
sys.exit(0)
elif args.stop:
kill_process()
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
exit()
My config file is as follows:
[my-config]
path1 = /usr/local/src/sipp-3.3/sipp -i 10.171.0.202 -mi 10.171.0.202 -sf HA_demo.xml -inf HA_demo.csv 10.171.0.231:5060 -p 5060 -r 1 -rp 1s -l 1 -m 1 -watchdog_minor_threshold 1500 -trace_err -aa -d 350s -oocsn ooc_default -t u1 -trace_screen -skip_rlimit
path2 = /home/asad.javed/script.py
path3 = /home/asad.javed/stop-services.sh
There are problems that I have not able been able to resolve yet.
I want to be able to run the child processes individually in a sequential manner from subprocess.call and get the pid of each process. The child process in this case is the paths to scripts I have given in the config file.
I want to be able to get pid of each child process and do additional work on it such as kill the process using the pid i.e. if the child process gets stuck during execution.
The other problem I am experiencing is that while executing the script I am always getting the return code as 1 even though the script has executed successfully in which case the exit status should be 0.
I am new to the Python world and help will be very much appreciated if someone clarifies above issues I am facing.
Run child processes sequentially and get the pid
import subprocess
commands = ["date", "myscript.sh"]
child_processes = []
for cmd in commands:
proc = subprocess.Popen(cmd)
child_processes.append(proc)
print(proc.pid)
Do additional work
for proc in child_processes:
try:
outs, errs = proc.communicate(timeout=15)
except TimeoutExpired:
proc.kill()
outs, errs = proc.communicate()
Notes
See subprocess.Popen. The Popen object has methods to poll or communicate with the subprocess and also has properties for the pid.

Write command stdout to console and file (live) -- Python 3.5 + subprocess.run()

Platform: Python 3.5 on rhel6 (64-bit)
Scenario: Execute a Bash command which runs a job. This job returns several lines of output to stdout, every few seconds.
Command: ./run_job --name 'myjob' --config_file ./myconfig.conf
Goal: Using Python's subprocess.run(), I am trying to run the above command and capture the stdout of the process, print it to the console and also save it to a file. I need the stdout to be printed as it becomes available (live).
What I've Tried: I have searched extensively for this, and every solution I found was using subprocess.Popen(). This method somewhat worked, but implementing it resulted in breaking the return logic I currently have. Reading through the Python documentation, the subprocess.run() method is the recommended way as of Python 3.5, so that's why I am going this route.
My Setup: So far, I have one common file with the logging and running the shell command below.
def setup_logging(log_lvl="INFO"):
script_name = path.splitext(path.basename(__file__))[0]
log_path = environ["HOME"] + "/logs/" + script_name + ".log"
logging.basicConfig(
level=getattr(logging, log_lvl.upper()),
format="%(asctime)s: [%(levelname)s] %(message)s",
handlers=[
logging.FileHandler(filename=log_path, mode="w", encoding="utf-8"),
logging.StreamHandler()
]
)
def run_shell(cmd_str, print_stdout=True, fail_msg=""):
logger = logging.getLogger()
result = run(cmd_str, universal_newlines=True, shell=True, stderr=STDOUT, stdout=PIPE)
cmd_stdout = result.stdout.strip()
cmd_code = result.returncode
if print_stdout and cmd_stdout != "":
logger.info("[OUT] " + cmd_stdout)
if cmd_code != 0 and fail_msg != "":
logger.error(fail_msg)
exit(cmd_code)
return cmd_code, cmd_stdout
So I would use the following code to run my script:
run_shell("./run_job --name 'myjob' --config_file ./myconfig.conf", fail_msg="Job failed.")
This partially works, but the full stdout is printed only when the process has completed. So the terminal will hang until that happens. I need to print the stdout line by line, in a live manner, so that it can be written by the logger.
Is there any way to do this without overhauling my existing code? Any help would be appreciated.
After trying for some time using threads and polling, I couldn't quite get it to work. However, from that same thread linked by Todd, I found another way to achieve what I need by using Popen:
def run_shell(cmd_str, print_stdout=True, fail_msg=""):
"""Run a Linux shell command and return the result code."""
p = Popen(cmd_str,
shell=True,
stderr=STDOUT,
stdout=PIPE,
bufsize=1,
universal_newlines=True)
logger = logging.getLogger()
output_lines = list()
while True:
output = p.stdout.readline().strip()
if len(output) == 0 and p.poll() is not None:
break
output_lines.append(output)
if print_stdout:
logger.info("[cmd] " + output)
cmd_code = p.returncode
cmd_stdout = "\n".join(output_lines)
if cmd_code != 0 and fail_msg != "":
logger.error(fail_msg)
exit(-1)
return cmd_code, cmd_stdout

Lighttpd cgi python fail to run system processes

I'm trying to run terminal commands from a web python script.
I tried many things but none seens to work... Such as: add 'www-data' to sudoers, use full path to bin, run command with sudo word, use 3 different system calls (os.spawnl and subprocess) and none of that works.
Read only commands like "ps aux" that only output information works, but a simple echo to file don't. It seens like need permitions to do so. What more can i try?
Example from output: Unexpected error: (, CalledProcessError(2, '/bin/echo hello > /var/www/html/cgi-bin/test2.htm'), )
On that example the /var/www/html/cgi-bin/ folder is owned by "www-data", same user as server config.
<!-- language: python -->
#!/usr/bin/python3
# coding=utf-8
import os
import sys
import subprocess
import cgi
import subprocess
SCRIPT_PATH = "/var/www/html/scripts/aqi3.py"
DATA_FILE = "/var/www/html/assets/aqi.json"
KILL_PROCESS = "ps aux | grep " + SCRIPT_PATH + " | grep -v \"grep\" | awk '{print $2}' | xargs kill -9"
START_PROCESS = "/usr/bin/python3 " + SCRIPT_PATH + " start > /dev/null 2>&1 &"
STOP_PROCESS = "/usr/bin/python3 " + SCRIPT_PATH + " stop > /dev/null 2>&1 &"
# Don't edit
def killProcess():
os.spawnl(os.P_NOWAIT, KILL_PROCESS)
try:
os.spawnl(os.P_NOWAIT, "/bin/echo hello > /var/www/html/cgi-bin/test2.htm")
proc = subprocess.Popen(['sudo', 'echo', 'hello > /var/www/html/cgi-bin/test3.htm'])
print(subprocess.check_output("/bin/echo hello > /var/www/html/cgi-bin/test2.htm", shell=True, timeout = 10))
except:
print("Unexpected error:", sys.exc_info())
print(KILL_PROCESS)
def stopSensor():
killProcess()
os.spawnl(os.P_NOWAIT, STOP_PROCESS)
def restartProcess():
killProcess()
print(START_PROCESS)
print(os.spawnl(os.P_NOWAIT, START_PROCESS))
def main():
arguments = cgi.FieldStorage()
for key in arguments.keys():
value = arguments[key].value
if key == 'action':
if value == 'stop':
stopSensor()
print("ok")
return
elif value == 'start' or value == 'restart':
restartProcess()
print("ok")
return
elif value == 'resetdata':
try:
with open(DATA_FILE, 'w') as outfile:
outfile.write('[]')
except:
print("Unexpected error:", sys.exc_info())
print("ok")
return
print("?")
main()
I was able to solve my problem with: http://alexanderhoughton.co.uk/blog/lighttpd-changing-default-user-raspberry-pi/

How can I start a process and put it to background in python?

I am currently writing my first python program (in Python 2.6.6). The program facilitates starting and stopping different applications running on a server providing the user common commands (like starting and stopping system services on a Linux server).
I am starting the applications' startup scripts by
p = subprocess.Popen(startCommand, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, err = p.communicate()
print(output)
The problem is, that the startup script of one application stays in foreground and so p.communicate() waits forever. I have already tried to use "nohup startCommand &" in front of the startCommand but that did not work as expected.
As a workaround I now use the following bash script to call the application's start script:
#!/bin/bash
LOGFILE="/opt/scripts/bin/logs/SomeServerApplicationStart.log"
nohup /opt/someDir/startSomeServerApplication.sh >${LOGFILE} 2>&1 &
STARTUPOK=$(tail -1 ${LOGFILE} | grep "Server started in RUNNING mode" | wc -l)
COUNTER=0
while [ $STARTUPOK -ne 1 ] && [ $COUNTER -lt 100 ]; do
STARTUPOK=$(tail -1 logs/SomeServerApplicationStart.log | grep "Server started in RUNNING mode" | wc -l)
if (( STARTUPOK )); then
echo "STARTUP OK"
exit 0
fi
sleep 1
COUNTER=$(( $COUNTER + 1 ))
done
echo "STARTUP FAILED"
The bash script is called from my python code. This workaround works perfect but I would prefer to do all in python...
Is subprocess.Popen the wrong way? How could I accommplish my task in Python only?
First it is easy not to block the Python script in communicate... by not calling communicate! Just read from output or error output from the command until you find the correct message and just forget about the command.
# to avoid waiting for an EOF on a pipe ...
def getlines(fd):
line = bytearray()
c = None
while True:
c = fd.read(1)
if c is None:
return
line += c
if c == '\n':
yield str(line)
del line[:]
p = subprocess.Popen(startCommand, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT) # send stderr to stdout, same as 2>&1 for bash
for line in getlines(p.stdout):
if "Server started in RUNNING mode" in line:
print("STARTUP OK")
break
else: # end of input without getting startup message
print("STARTUP FAILED")
p.poll() # get status from child to avoid a zombie
# other error processing
The problem with the above, is that the server is still a child a the Python process and could get unwanted signals such as SIGHUP. If you want to make it a daemon, you must first start a subprocess that next start your server. That way when first child will end, it can be waited by caller and the server will get a PPID of 1 (adopted by init process). You can use multiprocessing module to ease that part
Code could be like:
import multiprocessing
import subprocess
# to avoid waiting for an EOF on a pipe ...
def getlines(fd):
line = bytearray()
c = None
while True:
c = fd.read(1)
if c is None:
return
line += c
if c == '\n':
yield str(line)
del line[:]
def start_child(cmd):
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
shell=True)
for line in getlines(p.stdout):
print line
if "Server started in RUNNING mode" in line:
print "STARTUP OK"
break
else:
print "STARTUP FAILED"
def main():
# other stuff in program
p = multiprocessing.Process(target = start_child, args = (server_program,))
p.start()
p.join()
print "DONE"
# other stuff in program
# protect program startup for multiprocessing module
if __name__ == '__main__':
main()
One could wonder what is the need for the getlines generator when a file object is itself an iterator that returns one line at a time. The problem is that it internally calls read that read until EOF when file is not connected to a terminal. As it is now connected to a PIPE, you will not get anything until the server ends... which is not what is expected

python subprocess communicate freezes

I have the following python code that hangs :
cmd = ["ssh", "-tt", "-vvv"] + self.common_args
cmd += [self.host]
cmd += ["cat > %s" % (out_path)]
p = subprocess.Popen(cmd, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate(in_string)
It is supposed to save a string (in_string) into a remote file over ssh.
The file is correctly saved but then the process hangs. If I use
cmd += ["echo"] instead of
cmd += ["cat > %s" % (out_path)]
the process does not hang so I am pretty sure that I misunderstand something about the way communicate considers that the process has exited.
do you know how I should write the command so the the "cat > file" does not make communicate hang ?
-tt option allocates tty that prevents the child process to exit when .communicate() closes p.stdin (EOF is ignored). This works:
import pipes
from subprocess import Popen, PIPE
cmd = ["ssh", self.host, "cat > " + pipes.quote(out_path)] # no '-tt'
p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate(in_string)
You could use paramiko -- pure Python ssh library, to write data to a remote file via ssh:
#!/usr/bin/env python
import os
import posixpath
import sys
from contextlib import closing
from paramiko import SSHConfig, SSHClient
hostname, out_path, in_string = sys.argv[1:] # get from command-line
# load parameters to setup ssh connection
config = SSHConfig()
with open(os.path.expanduser('~/.ssh/config')) as config_file:
config.parse(config_file)
d = config.lookup(hostname)
# connect
with closing(SSHClient()) as ssh:
ssh.load_system_host_keys()
ssh.connect(d['hostname'], username=d.get('user'))
with closing(ssh.open_sftp()) as sftp:
makedirs_exists_ok(sftp, posixpath.dirname(out_path))
with sftp.open(out_path, 'wb') as remote_file:
remote_file.write(in_string)
where makedirs_exists_ok() function mimics os.makedirs():
from functools import partial
from stat import S_ISDIR
def isdir(ftp, path):
try:
return S_ISDIR(ftp.stat(path).st_mode)
except EnvironmentError:
return None
def makedirs_exists_ok(ftp, path):
def exists_ok(mkdir, name):
"""Don't raise an error if name is already a directory."""
try:
mkdir(name)
except EnvironmentError:
if not isdir(ftp, name):
raise
# from os.makedirs()
head, tail = posixpath.split(path)
if not tail:
assert path.endswith(posixpath.sep)
head, tail = posixpath.split(head)
if head and tail and not isdir(ftp, head):
exists_ok(partial(makedirs_exists_ok, ftp), head) # recursive call
# do create directory
assert isdir(ftp, head)
exists_ok(ftp.mkdir, path)
It makes sense that the cat command hangs. It is waiting for an EOF. I tried sending an EOF in the string but couldn't get it to work. Upon researching this question, I found a great module for streamlining the use of SSH for command line tasks like your cat example. It might not be exactly what you need for your usecase, but it does do what your question asks.
Install fabric with
pip install fabric
Inside a file called fabfile.py put
from fabric.api import run
def write_file(in_string, path):
run('echo {} > {}'.format(in_string,path))
And then run this from the command prompt with,
fab -H username#host write_file:in_string=test,path=/path/to/file

Categories