Python subprocess.Popen() is not starting the subprocess properly - python

I have a python test that startes a tcp server as subprocess.
def test_client(self):
if sys.platform.startswith('linux'):
proc = subprocess.Popen([f'{self.bin_output_path}/CaptureUnitHalServer'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
else:
proc = subprocess.Popen([f'{self.bin_output_path}/CaptureUnitHalServer'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
hal_access = HalAccess(port=12309)
hal = CaptureUnitHal(hal_access)
response = hal.test_scratch(TestScratchReq([1]))
assert TestScratchCnf(verdict=True, return_value=5) == response
print(f"\nRESPONSE: {response}\n")
# The first proc.communicate waits for the subprocess to finish. As the server runs forever a TimeoutExpired
# error is thrown the second proc.communicate in the exception handler get stdout and stderr from the server
try:
outs, errs = proc.communicate(timeout=2)
print(f'{outs.decode()}\n')
except subprocess.TimeoutExpired:
proc.kill()
outs, errs = proc.communicate()
print("CaptureUnitHalServer stdout:")
print(f'{outs.decode()}\n')
print("CaptureUnitHalServer stderr:")
print(f'{errs.decode()}\n')
The hal is a simple tcp client that sends a test request (TestScratchReq) to it and receives the response.
In linux this works perfectly fine. But when I run this in windows The code blocks forever at the line response = hal.test_scratch(TestScratchReq([1])).
This line calls
def send_and_receive(self, request: str) -> str:
self._send(request)
return self._receive()
and blocks in the socket.recv() call in self._receive()
data = self._socket.recv(1024).decode(encoding=self.MESSAGE_ENCODING) # blocking
So it seems like the server is not started properly as a subprocess in windows when calling subprocess.Popen().
The following command shows the port as listening however:
Get-NetTCPConnection -State Listen | grep 12309
0.0.0.0 12309 0.0.0.0 0 Listen
I have a second implementation that is also working on windows:
def test_client(self):
daemon = Thread(target=self.server_thread, daemon=True, name='HalServer')
daemon.start()
hal_access = HalAccess(port=12309)
hal = CaptureUnitHal(hal_access)
response = hal.test_scratch(TestScratchReq([1]))
print(response)
assert TestScratchCnf(verdict=True, return_value=5) == response
daemon.join() # wait for daemon timeout
def server_thread(self):
if sys.platform.startswith('linux'):
result = subprocess.run([f'{self.bin_output_path}/CaptureUnitHalServer'], timeout=5, stdout=subprocess.PIPE)
else:
result = subprocess.run([f'{self.bin_output_path}/CaptureUnitHalServer'], timeout=5, creationflags=subprocess.CREATE_NEW_CONSOLE)
print(result.stdout.decode())
print(result.stderr.decode())
pass
But it seems overcomplicated to me to have another Thread just to start the blocking subprocess.run call that throws a TimeoutExpired error after 5 seconds. A problem with this solution is also that I don't get the stdout and stderr of the subprocess respectively the following two lines of code don't print anything as the Thread is killed by the exception before it reaches these lines.
print(result.stdout.decode())
print(result.stderr.decode())
EDIT:
My question is: Why is the windows version in the first version of the code blocking? How does subprocess.Popen() differ between linux and windows? Is the subprocess not started properly in the windows case?

Related

Python subprocess timeout does not terminate process

I have a problem with terminating processes on timeout. Basically, I am running one linux command in for loop (same command for a list of files):
for target in targets:
try:
result = subprocess.run(['filerunner', 'work', '-target=' + target],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, timeout=600)
logging.info(result.stdout.decode('utf-8')) # logging out to log file
logging.info(result.stderr.decode('utf-8')) # logging err to log file
except subprocess.TimeoutExpired:
logging.info('Operation failed due to process timeout (10 minutes).')
# result.kill() tried with this to kill process manually
time.sleep(1)
pass
I tried few things, but processes are not being killed after timeout expired. How can I do it?

Suprocess process.stdout.readline() freezes the program

I am trying to make a python program communicate with a minecraft server I am hosting, but the problem I am finding is when the console output from the server, it freezes the program. I am using Popen and PIPE from subprocess, and whenever I use process.stdout.readline() it prints all the lines and when it is done, it freezes the program, and then I am not able to execute any more commands because of that.
def start(cmd):
try:
process = Popen(cmd, stdin=PIPE, stdout=PIPE)
except Exception as e:
print(e)
return process
start('java -Xmx1024M -Xms1024M -jar server.jar nogui') # runs the minecraft server with 8GB of RAM
while True:
print(process.stdout.readline()) # prints out every line in the server console
readmail() # the program checks email for commands, and enters the command into the console using stdin
I have tried this:
def start(cmd):
try:
process = Popen(['python', '-u', cmd], stdin=PIPE, stdout=PIPE)
except Exception as e:
print(e)
return process
It fixes the issue with process.stdout.readline(), but it gives me an error because the command is meant to be called in cmd and not with python. Does anyone know how to fix the issue with readline()?

Unable to kill Python subprocess using process.kill() or process.terminate() or os.kill() or using psutil

Using python, I am starting two subprocesses in parallel. One is an HTTP Server, while other is an execution of another program(CustomSimpleHTTPServer.py, which is a python script generated by selenium IDE plugin to open firefox, navigate to a website and do some interactions). On the other hand, I want to stop execution of the first subprocess(the HTTP Server) when the second subprocess is finished executing.
The logic of my code is that the selenium script will open a website. The website will automatically make a few GET calls to my HTTP Server. After the selenium script is finished executing, the HTTP Server is supposed to be closed so that it can log all the captured requests in a file.
Here is my main Python code:
class Myclass(object):
HTTPSERVERPROCESS = ""
def startHTTPServer(self):
print "********HTTP Server started*********"
try:
self.HTTPSERVERPROCESS=subprocess.Popen('python CustomSimpleHTTPServer.py', \
shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT)
except Exception as e:
print "Exception captured while starting HTTP Server process: %s\n" % e
def startNavigatingFromBrowser(self):
print "********Opening firefox to start navigation*********"
try:
process=subprocess.Popen('python navigationScript.py', \
shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
process.communicate()
process.wait()
except Exception as e:
print "Exception captured starting Browser Navigation process : %s\n" % e
try:
if process.returncode==0:
print "HTTPSERVEPROCESS value: %s" % self.HTTPSERVERPROCESS.returncode
print self.HTTPSERVERPROCESS
#self.HTTPSERVERPROCESS.kill()
#self.HTTPSERVERPROCESS.terminate()
#self.kill(self.HTTPSERVERPROCESS.pid)
except Exception as e:
print "Exception captured while killing HTTP Server process : %s\n" % e
def kill(self,proc_pid):
process = psutil.Process(proc_pid)
for proc in process.get_children(recursive=True):
proc.kill()
process.kill()
def startCapture(self):
print "********Starting Parallel execution of Server initiation and firefox navigation script*********"
t1 = threading.Thread(target=self.startHTTPServer())
t2 = threading.Thread(target=self.startNavigatingFromBrowser())
t1.start()
t2.start()
t2.join()
Note: Execution starts by calling startCapture()
Here is the code for CustomSimpleHTTPServer.py, which is supposed to write the captured requests to logfile.txt upon termination:
import SimpleHTTPServer
import SocketServer
PORT = 5555
class MyHTTPHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
log_file = open('logfile.txt', 'w')
def log_message(self, format, *args):
self.log_file.write("%s - - [%s] %s\n" %
(self.client_address[0],
self.log_date_time_string(),
format%args))
Handler = MyHTTPHandler
httpd = SocketServer.TCPServer(("", PORT), Handler)
httpd.serve_forever()
When I use self.HTTPSERVERPROCESS.kill() or self.HTTPSERVERPROCESS.terminate() or os.kill(), I get following in my terminal upon running the main Python code
********Starting Parallel execution of Server initiation and firefox navigation script*********
********SimpleHTTPServer started*********
********Opening firefox to start navigation*********
HTTPSERVEPROCESS value: <subprocess.Popen object at 0x1080f8410>
2459
Exception captured while killing HTTP Server process : [Errno 3] No such process
Process finished with exit code 0
And when I use self.kill(self.HTTPSERVERPROCESS.pid), I get following in my terminal upon running the main Python code
********Starting Parallel execution of Server initiation and firefox navigation script*********
********SimpleHTTPServer started*********
********Opening firefox to start navigation*********
HTTPSERVEPROCESS value: <subprocess.Popen object at 0x1080f8410>
2459
Exception captured while killing HTTP Server process : 'Process' object has no attribute 'get_children'
Process finished with exit code 0
Neither of the following 3 are able to kill the HTTPServer process:
self.HTTPSERVERPROCESS.kill()
self.HTTPSERVERPROCESS.terminate()
self.kill(self.HTTPSERVERPROCESS.pid)
I know that CustomSimpleHTTPServer.py is correct because when I run it seperately and manually browse to the website, and then manually terminate the CustomSimpleHTTPServer.py script by hitting CTRL-c in terminal, the logs are populated in logfle.txt.
What changes do I make to my code so that it works properly and logs are populated?
You should just use os.kill() to signal processes:
import os
import signal
...
os.kill(the_pid, signal.SIGTERM) # usually kills processes
os.kill(the_pid, signal.SIGKILL) # should always kill a process
Also, if you kill the parent process it also usually kills the children.
Update:
I made two small changes to the Server program:
Add a call to self.log_file.flush() to make sure log entries
are flushed out to the log file.
Override allow_reuse_address so that you can reuse the
same address shortly after terminating the server.
(See this SO question)
File Server:
#!/usr/bin/env python
import SimpleHTTPServer
import SocketServer
PORT = 5555
class MyServer(SocketServer.TCPServer):
allow_reuse_address = True
class MyHTTPHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
log_file = open('logfile.txt', 'w')
def log_message(self, format, *args):
self.log_file.write("%s - - [%s] %s\n" %
(self.client_address[0],
self.log_date_time_string(),
format%args))
self.log_file.flush()
Handler = MyHTTPHandler
httpd = MyServer(("", PORT), Handler)
httpd.serve_forever()
Here is an simple navigation program (file Navigate):
#!/usr/bin/env python
import requests
import time
URL = "http://localhost:5555/"
def main():
for x in range(5):
print "x =", x
r = requests.get(URL + "asd")
time.sleep(1)
print "Quitting"
main()
And here is the main program:
#!/usr/bin/env python
import os
import subprocess
import signal
def main():
# start web server
web = subprocess.Popen(["./Server"])
print "web server pid:", web.pid
# start navigator
nav = subprocess.Popen(["./Navigate"])
print "nav pid: ", nav.pid
# wait for nav to exit
nav.wait()
print "done waiting for nav"
print "killing web server"
os.kill(web.pid, signal.SIGTERM )
web.wait()
print "server terminated"
main()

Python sending command over a socket

I'm having a bit of trouble. I want to create a simple program that connects to the server and executes a command using subprocess then returns the result to the client. It's simple but I can't get it to work. Right now this is what I have:
client:
import sys, socket, subprocess
conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host = sys.argv[1]
port = int(sys.argv[2])
socksize = 1024
conn.connect((host, port))
while True:
shell = raw_input("$ ")
conn.send(shell)
data = conn.recv(socksize)
#msglen = len(data)
output = data
iotype = subprocess.PIPE
cmd = ['/bin/sh', '-c', shell]
proc = subprocess.Popen(cmd, stdout=iotype).wait()
stdout,stderr = proc.communicate()
conn.send(stdout)
print(output)
if proc.returncode != 0:
print("Error")
server:
import sys, socket, subprocess
host = ''
port = 50106
socksize = 1024
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((host, port))
print("Server started on port: %s" %port)
s.listen(1)
print("Now listening...\n")
conn, addr = s.accept()
while True:
print 'New connection from %s:%d' % (addr[0], addr[1])
data = conn.recv(socksize)
cmd = ['/bin/sh', '-c', data]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE).wait()
stdout,stderr = cmd.communicate()
if not data:
break
elif data == 'killsrv':
sys.exit()
Danger, Will Robinson!!!
Do you really want to send commands in clear text without authentication over the network? It is very, very dangerous.
Do it over SSH with paramiko.
Alright I've heard this answer too many times. I don't want to use SSH I'm just building it to learn more about sockets. I'm not going to actually use this if I want to send commands to a system. – AustinM
There is no way I could infer this noble quest from your question. :-)
The sockets module is a thin layer over the posix library; plain sockets is tedious and hard to get right. As of today (2014), asynchronous I/O and concurrency are not among Python's strongest traits - 3.4 is starting to change that but libraries will lag behind for a while. My advice is to spent your time learning some higher level API like Twisted (twistedmatrix.com/trac). If you are really interested in the low level stuff, dive in the project source.
Alright. Any idea on how I could use twisted for this type of thing? – AustinM
Look at twistedmatrix.com/documents/current/core/examples/#auto2
Well I can understand your frustration Austin; I was in the same boat. However trial and error at last worked out. Hopefully you were looking for this:
print "Command is:",command
op = subprocess.Popen(command, shell=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
if op:
output=str(op.stdout.read())
print "Output:",output
conn.sendall(output)
else:
error=str(op.stderr.read())
print "Error:",error
conn.sendall(error)
It's unclear why you are using subprocess.Popen() for the same command in both the client and the server. Here's an outline of what I would try to do (pseudocode):
client
while True:
read command from user
send command to server
wait for and then read response from server
print response to user
server
while True:
wait for and then read command from client
if command is "killsrv", exit
execute command and capture output
send output to client
The problem with your code is this line (in both client and server):
proc = subprocess.Popen(cmd, stdout=iotype).wait()
stdout,stderr = proc.communicate()
You are calling wait on the Popen object, which means that the variable proc is getting an int (returned by wait) instead of a Popen object. You can just get rid of the wait -- since communicate waits for the process to end before returning, and you aren't checking the exit code anyway, you don't need to call it.
Then, in your client, I don't think you even need the subprocess calls, unless you're running some command that the server is sending back.

Long-running ssh commands in python paramiko module (and how to end them)

I want to run a tail -f logfile command on a remote machine using python's paramiko module. I've been attempting it so far in the following fashion:
interface = paramiko.SSHClient()
#snip the connection setup portion
stdin, stdout, stderr = interface.exec_command("tail -f logfile")
#snip into threaded loop
print stdout.readline()
I'd like the command to run as long as necessary, but I have 2 problems:
How do I stop this cleanly? I thought of making a Channel and then using the shutdown() command on the channel when I'm through with it- but that seems messy. Is it possible to do something like sent Ctrl-C to the channel's stdin?
readline() blocks, and I could avoid threads if I had a non-blocking method of getting output- any thoughts?
Instead of calling exec_command on the client, get hold of the transport and generate your own channel. The channel can be used to execute a command, and you can use it in a select statement to find out when data can be read:
#!/usr/bin/env python
import paramiko
import select
client = paramiko.SSHClient()
client.load_system_host_keys()
client.connect('host.example.com')
transport = client.get_transport()
channel = transport.open_session()
channel.exec_command("tail -f /var/log/everything/current")
while True:
rl, wl, xl = select.select([channel],[],[],0.0)
if len(rl) > 0:
# Must be stdout
print channel.recv(1024)
The channel object can be read from and written to, connecting with stdout and stdin of the remote command. You can get at stderr by calling channel.makefile_stderr(...).
I've set the timeout to 0.0 seconds because a non-blocking solution was requested. Depending on your needs, you might want to block with a non-zero timeout.
1) You can just close the client if you wish. The server on the other end will kill the tail process.
2) If you need to do this in a non-blocking way, you will have to use the channel object directly. You can then watch for both stdout and stderr with channel.recv_ready() and channel.recv_stderr_ready(), or use select.select.
Just a small update to the solution by Andrew Aylett. The following code actually breaks the loop and quits when the external process finishes:
import paramiko
import select
client = paramiko.SSHClient()
client.load_system_host_keys()
client.connect('host.example.com')
channel = client.get_transport().open_session()
channel.exec_command("tail -f /var/log/everything/current")
while True:
if channel.exit_status_ready():
break
rl, wl, xl = select.select([channel], [], [], 0.0)
if len(rl) > 0:
print channel.recv(1024)
To close the process simply run:
interface.close()
In terms of nonblocking, you can't get a non-blocking read. The best you would be able to to would be to parse over it one "block" at a time, "stdout.read(1)" will only block when there are no characters left in the buffer.
Just for information, there is a solution to do this using channel.get_pty(). Fore more details have a look at: https://stackoverflow.com/a/11190727/1480181
The way I've solved this is with a context manager. This will make sure my long running commands are aborted. The key logic is to wrap to mimic SSHClient.exec_command but capture the created channel and use a Timer that will close that channel if the command runs for too long.
import paramiko
import threading
class TimeoutChannel:
def __init__(self, client: paramiko.SSHClient, timeout):
self.expired = False
self._channel: paramiko.channel = None
self.client = client
self.timeout = timeout
def __enter__(self):
self.timer = threading.Timer(self.timeout, self.kill_client)
self.timer.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
print("Exited Timeout. Timed out:", self.expired)
self.timer.cancel()
if exc_val:
return False # Make sure the exceptions are re-raised
if self.expired:
raise TimeoutError("Command timed out")
def kill_client(self):
self.expired = True
print("Should kill client")
if self._channel:
print("We have a channel")
self._channel.close()
def exec(self, command, bufsize=-1, timeout=None, get_pty=False, environment=None):
self._channel = self.client.get_transport().open_session(timeout=timeout)
if get_pty:
self._channel.get_pty()
self._channel.settimeout(timeout)
if environment:
self._channel.update_environment(environment)
self._channel.exec_command(command)
stdin = self._channel.makefile_stdin("wb", bufsize)
stdout = self._channel.makefile("r", bufsize)
stderr = self._channel.makefile_stderr("r", bufsize)
return stdin, stdout, stderr
To use the code it's pretty simple now, the first example will throw a TimeoutError
ssh = paramiko.SSHClient()
ssh.connect('hostname', username='user', password='pass')
with TimeoutChannel(ssh, 3) as c:
ssh_stdin, ssh_stdout, ssh_stderr = c.exec("cat") # non-blocking
exit_status = ssh_stdout.channel.recv_exit_status() # block til done, will never complete because cat wants input
This code will work fine (unless the host is under insane load!)
ssh = paramiko.SSHClient()
ssh.connect('hostname', username='user', password='pass')
with TimeoutChannel(ssh, 3) as c:
ssh_stdin, ssh_stdout, ssh_stderr = c.exec("uptime") # non-blocking
exit_status = ssh_stdout.channel.recv_exit_status() # block til done, will complete quickly
print(ssh_stdout.read().decode("utf8")) # Show results

Categories