My class when is connected to the server should immediately send sign in string, afterwards when the session is over it should send out the sign out string and clean up the sockets. Below is my code.
import trio
class test:
_buffer = 8192
_max_retry = 4
def __init__(self, host='127.0.0.1', port=12345, usr='user', pwd='secret'):
self.host = str(host)
self.port = int(port)
self.usr = str(usr)
self.pwd = str(pwd)
self._nl = b'\r\n'
self._attempt = 0
self._queue = trio.Queue(30)
self._connected = trio.Event()
self._end_session = trio.Event()
#property
def connected(self):
return self._connected.is_set()
async def _sender(self, client_stream, nursery):
print('## sender: started!')
q = self._queue
while True:
cmd = await q.get()
print('## sending to the server:\n{!r}\n'.format(cmd))
if self._end_session.is_set():
nursery.cancel_scope.shield = True
with trio.move_on_after(1):
await client_stream.send_all(cmd)
nursery.cancel_scope.shield = False
await client_stream.send_all(cmd)
async def _receiver(self, client_stream, nursery):
print('## receiver: started!')
buff = self._buffer
while True:
data = await client_stream.receive_some(buff)
if not data:
print('## receiver: connection closed')
self._end_session.set()
break
print('## got data from the server:\n{!r}'.format(data))
async def _watchdog(self, nursery):
await self._end_session.wait()
await self._queue.put(self._logoff)
self._connected.clear()
nursery.cancel_scope.cancel()
#property
def _login(self, *a, **kw):
nl = self._nl
usr, pwd = self.usr, self.pwd
return nl.join(x.encode() for x in ['Login', usr,pwd]) + 2*nl
#property
def _logoff(self, *a, **kw):
nl = self._nl
return nl.join(x.encode() for x in ['Logoff']) + 2*nl
async def _connect(self):
host, port = self.host, self.port
print('## connecting to {}:{}'.format(host, port))
try:
client_stream = await trio.open_tcp_stream(host, port)
except OSError as err:
print('##', err)
else:
async with client_stream:
self._end_session.clear()
self._connected.set()
self._attempt = 0
# Sign in as soon as connected
await self._queue.put(self._login)
async with trio.open_nursery() as nursery:
print("## spawning watchdog...")
nursery.start_soon(self._watchdog, nursery)
print("## spawning sender...")
nursery.start_soon(self._sender, client_stream, nursery)
print("## spawning receiver...")
nursery.start_soon(self._receiver, client_stream, nursery)
def connect(self):
while self._attempt <= self._max_retry:
try:
trio.run(self._connect)
trio.run(trio.sleep, 1)
self._attempt += 1
except KeyboardInterrupt:
self._end_session.set()
print('Bye bye...')
break
tst = test()
tst.connect()
My logic doesn't quite work. Well it works if I kill the netcat listener, so then my session looks like the following:
## connecting to 127.0.0.1:12345
## spawning watchdog...
## spawning sender...
## spawning receiver...
## receiver: started!
## sender: started!
## sending to the server:
b'Login\r\nuser\r\nsecret\r\n\r\n'
## receiver: connection closed
## sending to the server:
b'Logoff\r\n\r\n'
Note that Logoff string has been sent out, although it doesn't make sense in here as connection is already broken by that time.
However my goal is to Logoff when user KeyboardInterrupt. In this case my session looks similar to this:
## connecting to 127.0.0.1:12345
## spawning watchdog...
## spawning sender...
## spawning receiver...
## receiver: started!
## sender: started!
## sending to the server:
b'Login\r\nuser\r\nsecret\r\n\r\n'
Bye bye...
Note that Logoff hasn't been sent off.
Any ideas?
Here your call tree looks something like:
connect
|
+- _connect*
|
+- _watchdog*
|
+- _sender*
|
+- _receiver*
The *s indicate the 4 trio tasks. The _connect task is sitting at the end of the nursery block, waiting for the child tasks to complete. The _watchdog task is blocked in await self._end_session.wait(), the _sender task is blocked in await q.get(), and the _receiver task is blocked in await client_stream.receive_some(...).
When you hit control-C, then the standard Python semantics are that whatever bit of Python code is running suddenly raises KeyboardInterrupt. In this case, you have 4 different tasks running, so one of those blocked operations gets picked at random [1], and raises a KeyboardInterrupt. This means a few different things might happen:
If _watchdog's wait call raises KeyboardInterrupt, then the _watchdog method immediately exits, so it never even tries to send logout. Then as part of unwinding the stack, trio cancels all the other tasks, and once they've exited then the KeyboardInterrupt keeps propagating up until it reaches your finally block in connect. At this point you try to notify the watchdog task using self._end_session.set(), but it's not running anymore, so it doesn't notice.
If _sender's q.get() call raises KeyboardInterrupt, then the _sender method immediately exits, so even if the _watchdog did ask it to send a logoff message, it won't be there to notice. And in any case, trio then proceeds to cancel the watchdog and receiver tasks anyway, and things proceed as above.
If _receiver's receive_all call raises KeyboardInterrupt... same thing happens.
Minor subtlety: _connect can also receive the KeyboardInterrupt, which does the same thing: cancels all the children, and then waits for them to stop before allowing the KeyboardInterrupt to keep propagating.
If you want to reliably catch control-C and then do something with it, then this business of it being raised at some random point is quite a nuisance. The simplest way to do this is to use Trio's support for catching signals to catch the signal.SIGINT signal, which is the thing that Python normally converts into a KeyboardInterrupt. (The "INT" stands for "interrupt".) Something like:
async def _control_c_watcher(self):
# This API is currently a little cumbersome, sorry, see
# https://github.com/python-trio/trio/issues/354
with trio.catch_signals({signal.SIGINT}) as batched_signal_aiter:
async for _ in batched_signal_aiter:
self._end_session.set()
# We exit the loop, restoring the normal behavior of
# control-C. This way hitting control-C once will try to
# do a polite shutdown, but if that gets stuck the user
# can hit control-C again to raise KeyboardInterrupt and
# force things to exit.
break
and then start this running alongside your other tasks.
You also have the problem that in your _watchdog method, it puts the logoff request into the queue – thus scheduling a message to be sent later, by the _sender task – and then immediately cancels all the tasks, so that the _sender task probably won't get a chance to see the message and react to it! In general, I find my code works nicer when I use tasks only when necessary. Instead of having a sender task and then putting messages in a queue when you want to send them, why not have the code that wants to send a message call stream.send_all directly? The one thing you have to watch out for is if you have multiple tasks that might send things simultaneously, you might want to use a trio.Lock() to make sure they don't bump into each other by calling send_all at the same time:
async def send_all(self, data):
async with self.send_lock:
await self.send_stream.send_all(data)
async def do_logoff(self):
# First send the message
await self.send_all(b"Logoff\r\n\r\n")
# And then, *after* the message has been sent, cancel the tasks
self.nursery.cancel()
If you do it this way, you might be able to get rid of the watchdog task and the _end_session event entirely.
A few other notes about your code while I'm here:
Calling trio.run multiple times like this is unusual. The normal style is to call it once at the top of your program, and put all your real code inside it. Once you exit trio.run, all of trio's state is lost, you're definitely not running any concurrent tasks (so there's no way anything could possibly be listening and notice your call to _end_session.set()!). And in general, almost all Trio functions assume that you're already inside a call to trio.run. It turns out that right now you can call trio.Queue() before starting trio without getting an exception, but that's basically just a coincidence.
The use of shielding inside _sender looks odd to me. Shielding is generally an advanced feature that you almost never want to use, and I don't think this is an exception.
Hope that helps! And if you want to talk more about style/design issues like this but are worried they might be too vague for stack overflow ("is this program designed well?"), then feel free to drop by the trio chat channel.
[1] Well, actually trio probably picks the main task for various reasons, but that's not guaranteed and in any case it doesn't make a difference here.
Related
I am writing a pyModbus server with asyncio, based on this example.
Alongside the server I've got a serial device which I'm communicating with and a server updating task.
One task should check the status of the serial device every 500ms.
The server updating task should check if there are any changes in the status of the serial device and update the info on the server. Moreover, if there is a request waiting on the server it should call another task which will send necessary info to the serial device.
My three questions are:
How should I stop the server politely? For now the app is running only in console so it is stopped by ctrl+c - how can I stop the server without causing an avalanche of errors?
How can I implement tasks to be executed cyclically (let's say I want to frefresh the server data every 500ms)? I've found the aiocron module but as far as I can tell its functionalities are a bit limtied as it is intended just for calling functions in intervals.
How can I politely cancel all the tasks before stopping the server (the infinitely, cyclically running ones) when closing the app?
Thanks!
EDIT:
Speaking of running cyclical tasks and cancelling them - is this a proper way to do that? This doesn't rise any errors but does it clean eveything correctly? (I created this sketch compiling a dozen of questions on stackoverflow, I am not sure if this makes sense)
import asyncio
async def periodic():
try:
while True:
print('periodic')
await asyncio.sleep(1)
except asyncio.CancelledError as ex:
print('task1', type(ex))
raise
async def periodic2():
try:
while True:
print('periodic2')
await asyncio.sleep(0.5)
except asyncio.CancelledError as ex:
print('task2', type(ex))
raise
async def main():
tasks = []
task = asyncio.create_task(periodic())
tasks.append(task)
task2 = asyncio.create_task(periodic2())
tasks.append(task2)
for task in tasks:
await task
if __name__ == "__main__":
try:
asyncio.run(main())
except KeyboardInterrupt:
pass
I'm using python with pika, and have the following two similar use cases:
Connect to RabbitMQ server A and server B (at different IP addrs with different credentials), listen on exchange A1 on server A; when a message arrives, process it and send to an exchange on server B
Open an HTTP listener and connect to RabbitMQ server B; when a specific HTTP request arrives, process it and send to an exchange on server B
Alas, in both these cases using my usual techniques, by the time I get to sending to server B the connection throws ConnectionClosed or ChannelClosed.
I assume this is the cause: while waiting on the incoming messages, the connection to server B (its "driver") is starved of CPU cycles, and it never gets a chance to service is connection socket, thus it can't respond to heartbeats from server B, thus the servers shuts down the connection.
But I can't noodle out the fix. My current work around is lame: I catch the ConnectionClosed, reopen a connection to server B, and retry sending my message.
But what is the "right" way to do this? I've considered these, but don't really feel I have all the parts to solve this:
Don't just sit forever in server A's basic_consume (my usual pattern), but rather, use a timeout, and when I catch the timeout somehow "service" heartbeats on server B's driver, before returning to a "consume with timeout"... but how do I do that? How do I "let service B's connection driver service its heartbeats"?
I know the socket library's select() call can wait for messages on several sockets and once, then service the socket who has packets waiting. So maybe this is what pika's SelectConnection is for? a) I'm not sure, this is just a hunch. b) Even if right, while I can find examples of how to create this connection, I can't find examples of how to use it to solve my multiconnection case.
Set up the the two server connections in different processes... and use Python interprocess queues to get the processed message from one process to the next. The concept is "two different RabbitMQ connections in two different processes should thus then be able to independently service their heartbeats". Except... I think this has a fatal flaw: the process with "server B" is, instead, going to be "stuck" waiting on the interprocess queue, and the same "starvation" is going to happen.
I've checked StackOverflow and Googled this for an hour last night: I can't for the life of me find a blog post or sample code for this.
Any input? Thanks a million!
I managed to work it out, basing my solution on the documentation and an answer in the pika-python Google group.
First of all, your assumption is correct — the client process that's connected to server B, responsible for publishing, cannot reply to heartbeats if it's already blocking on something else, like waiting a message from server A or blocking on an internal communication queue.
The crux of the solution is that the publisher should run as a separate thread and use BlockingConnection.process_data_events to service heartbeats and such. It looks like that method is supposed to be called in a loop that checks if the publisher still needs to run:
def run(self):
while self.is_running:
# Block at most 1 second before returning and re-checking
self.connection.process_data_events(time_limit=1)
Proof of concept
Since proving the full solution requires having two separate RabbitMQ instances running, I have put together a Git repo with an appropriate docker-compose.yml, the application code and comments to test this solution.
https://github.com/karls/rabbitmq-two-connections
Solution outline
Below is a sketch of the solution, minus imports and such. Some notable things:
Publisher runs as a separate thread
The only "work" that the publisher does is servicing heartbeats and such, via Connection.process_data_events
The publisher registers a callback whenever the consumer wants to publish a message, using Connection.add_callback_threadsafe
The consumer takes the publisher as a constructor argument so it can publish the messages it receives, but it can work via any other mechanism as long as you have a reference to an instance of Publisher
The code is taken from the linked Git repo, which is why certain details are hardcoded, e.g the queue name etc. It will work with any RabbitMQ setup needed (direct-to-queue, topic exchange, fanout, etc).
class Publisher(threading.Thread):
def __init__(
self,
connection_params: ConnectionParameters,
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.daemon = True
self.is_running = True
self.name = "Publisher"
self.queue = "downstream_queue"
self.connection = BlockingConnection(connection_params)
self.channel = self.connection.channel()
self.channel.queue_declare(queue=self.queue, auto_delete=True)
self.channel.confirm_delivery()
def run(self):
while self.is_running:
self.connection.process_data_events(time_limit=1)
def _publish(self, message):
logger.info("Calling '_publish'")
self.channel.basic_publish("", self.queue, body=message.encode())
def publish(self, message):
logger.info("Calling 'publish'")
self.connection.add_callback_threadsafe(lambda: self._publish(message))
def stop(self):
logger.info("Stopping...")
self.is_running = False
# Call .process_data_events one more time to block
# and allow the while-loop in .run() to break.
# Otherwise the connection might be closed too early.
#
self.connection.process_data_events(time_limit=1)
if self.connection.is_open:
self.connection.close()
logger.info("Connection closed")
logger.info("Stopped")
class Consumer:
def __init__(
self,
connection_params: ConnectionParameters,
publisher: Optional["Publisher"] = None,
):
self.publisher = publisher
self.queue = "upstream_queue"
self.connection = BlockingConnection(connection_params)
self.channel = self.connection.channel()
self.channel.queue_declare(queue=self.queue, auto_delete=True)
self.channel.basic_qos(prefetch_count=1)
def start(self):
self.channel.basic_consume(
queue=self.queue, on_message_callback=self.on_message
)
try:
self.channel.start_consuming()
except KeyboardInterrupt:
logger.info("Warm shutdown requested...")
except Exception:
traceback.print_exception(*sys.exc_info())
finally:
self.stop()
def on_message(self, _channel: Channel, m, _properties, body):
try:
message = body.decode()
logger.info(f"Got: {message!r}")
if self.publisher:
self.publisher.publish(message)
else:
logger.info(f"No publisher provided, printing message: {message!r}")
self.channel.basic_ack(delivery_tag=m.delivery_tag)
except Exception:
traceback.print_exception(*sys.exc_info())
self.channel.basic_nack(delivery_tag=m.delivery_tag, requeue=False)
def stop(self):
logger.info("Stopping consuming...")
if self.connection.is_open:
logger.info("Closing connection...")
self.connection.close()
if self.publisher:
self.publisher.stop()
logger.info("Stopped")
I have an app similar to a chat-room writing in python that intends to do the following things:
A prompt for user to input websocket server address.
Then create a websocket client that connects to server and send/receive messages. Disable the ability to create a websocket client.
After receiving "close" from server (NOT a close frame), client should drop connecting and re-enable the app to create a client. Go back to 1.
If user exits the app, it exit the websocket client if there is one running.
My approach for this is using a main thread to deal with user input. When user hits enter, a thread is created for WebSocketClient using AutoBahn's twisted module and pass a Queue to it. Check if the reactor is running or not and start it if it's not.
Overwrite on message method to put a closing flag into the Queue when getting "close". The main thread will be busy checking the Queue until receiving the flag and go back to start. The code looks like following.
Main thread.
def main_thread():
while True:
text = raw_input("Input server url or exit")
if text == "exit":
if myreactor:
myreactor.stop()
break
msgq = Queue.Queue()
threading.Thread(target=wsthread, args=(text, msgq)).start()
is_close = False
while True:
if msgq.empty() is False:
msg = msgq.get()
if msg == "close":
is_close = True
else:
print msg
if is_close:
break
print 'Websocket client closed!'
Factory and Protocol.
class MyProtocol(WebSocketClientProtocol):
def onMessage(self, payload, isBinary):
msg = payload.decode('utf-8')
self.Factory.q.put(msg)
if msg == 'close':
self.dropConnection(abort=True)
class WebSocketClientFactoryWithQ(WebSocketClientFactory):
def __init__(self, *args, **kwargs):
self.queue = kwargs.pop('queue', None)
WebSocketClientFactory.__init__(self, *args, **kwargs)
Client thread.
def wsthread(url, q):
factory = WebSocketClientFactoryWithQ(url=url, queue=q)
factory.protocol = MyProtocol
connectWS(Factory)
if myreactor is None:
myreactor = reactor
reactor.run()
print 'Done'
Now I got a problem. It seems that my client thread never stops. Even if I receive "close", it seems still running and every time I try to recreate a new client, it creates a new thread. I understand the first thread won't stop since reactor.run() will run forever, but from the 2nd thread and on, it should be non-blocking since I'm not starting it anymore. How can I change that?
EDIT:
I end up solving it with
Adding stopFactory() after disconnect.
Make protocol functions with reactor.callFromThread().
Start the reactor in the first thread and put clients in other threads and use reactor.callInThread() to create them.
Your main_thread creates new threads running wsthread. wsthread uses Twisted APIs. The first wsthread becomes the reactor thread. All subsequent threads are different and it is undefined what happens if you use a Twisted API from them.
You should almost certainly remove the use of threads from your application. For dealing with console input in a Twisted-based application, take a look at twisted.conch.stdio (not the best documented part of Twisted, alas, but just what you want).
I'm new to Stack Overflow (although have been a long-term "stalker"!) so please be gentle with me!
I'm trying to learn Python, in particular Asyncio using websockets.
Having scoured the web for examples/tutorials I've put together the following tiny chat application, and could use some advice before it gets bulkier (more commands etc) and becomes difficult to refactor.
My main question, is why (when sending the DISCONNECT command) does it need the asyncio.sleep(0) in order to send the disconnection verification message BEFORE closing the connection?
Other than that, am I on the right tracks with the structure here?
I feel that there's too much async/await but I can't quite wrap my head around why.
Staring at tutorials and S/O posts for hours on end doesn't seem to be helping at this point so I thought I'd get some expert advice directly!
Here we go, simple WS server that responds to "nick", "msg", "test" & "disconnect" commands. No prefix required, i.e "nick Rachel".
import asyncio
import websockets
import sys
class ChatServer:
def __init__(self):
print("Chat Server Starting..")
self.Clients = set()
if sys.platform == 'win32':
self.loop = asyncio.ProactorEventLoop()
asyncio.set_event_loop(self.loop)
else:
self.loop = asyncio.get_event_loop()
def run(self):
start_server = websockets.serve(self.listen, '0.0.0.0', 8080)
try:
self.loop.run_until_complete(start_server)
print("Chat Server Running!")
self.loop.run_forever()
except:
print("Chat Server Error!")
async def listen(self, websocket, path):
client = Client(websocket=websocket)
sender_task = asyncio.ensure_future(self.handle_outgoing_queue(client))
self.Clients.add(client)
print("+ connection: " + str(len(self.Clients)))
while True:
try:
msg = await websocket.recv()
if msg is None:
break
await self.handle_message(client, msg)
except websockets.exceptions.ConnectionClosed:
break
self.Clients.remove(client)
print("- connection: " + str(len(self.Clients)))
async def handle_outgoing_queue(self, client):
while client.websocket.open:
msg = await client.outbox.get()
await client.websocket.send(msg)
async def handle_message(self, client, data):
strdata = data.split(" ")
_cmd = strdata[0].lower()
try:
# Check to see if the command exists. Otherwise, AttributeError is thrown.
func = getattr(self, "cmd_" + _cmd)
try:
await func(client, param, strdata)
except IndexError:
await client.send("Not enough parameters!")
except AttributeError:
await client.send("Command '%s' does not exist!" % (_cmd))
# SERVER COMMANDS
async def cmd_nick(self, client, param, strdata):
# This command needs a parameter (with at least one character). If not supplied, IndexError is raised
# Is there a cleaner way of doing this? Otherwise it'll need to reside within all functions that require a param
test = param[1][0]
# If we've reached this point there's definitely a parameter supplied
client.Nick = param[1]
await client.send("Your nickname is now %s" % (client.Nick))
async def cmd_msg(self, client, param, strdata):
# This command needs a parameter (with at least one character). If not supplied, IndexError is raised
# Is there a cleaner way of doing this? Otherwise it'll need to reside within all functions that require a param
test = param[1][0]
# If we've reached this point there's definitely a parameter supplied
message = strdata.split(" ",1)[1]
# Before we proceed, do we have a nickname?
if client.Nick == None:
await client.send("You must choose a nickname before sending messages!")
return
for each in self.Clients:
await each.send("%s says: %s" % (client.Nick, message))
async def cmd_test(self, client, param, strdata):
# This command doesn't need a parameter, so simply let the client know they issued this command successfully.
await client.send("Test command reply!")
async def cmd_disconnect(self, client, param, strdata):
# This command doesn't need a parameter, so simply let the client know they issued this command successfully.
await client.send("DISCONNECTING")
await asyncio.sleep(0) # If this isn't here we don't receive the "disconnecting" message - just an exception in "handle_outgoing_queue" ?
await client.websocket.close()
class Client():
def __init__(self, websocket=None):
self.websocket = websocket
self.IPAddress = websocket.remote_address[0]
self.Port = websocket.remote_address[1]
self.Nick = None
self.outbox = asyncio.Queue()
async def send(self, data):
await self.outbox.put(data)
chat = ChatServer()
chat.run()
Your code uses infinite size Queues, which means .put() calls .put_nowait() and returns immediately. (If you do want to keep these queues in your code, consider using 'None' in the queue as a signal to close a connection and move client.websocket.close() to handle_outgoing_queue()).
Another issue: Consider replacing for x in seq: await co(x) with await asyncio.wait([co(x) for x in seq]). Try it with asyncio.sleep(1) to experience a dramatic difference.
I believe a better option will be dropping all outbox Queues and just relay on the built in asyncio queue and ensure_future. The websockets package already includes Queues in its implementation.
I want to point out that the author of websockets indicated in a post on July 17 of 2017 that websockets used to return None when the connection was closed but that was changed at some point. Instead he suggests that you use a try and deal with the exception. The OP's code shows both a check for None AND a try/except. The None check is needlessly verbose and apparently not even accurate since with the current version, websocket.recv() doesn't return anything when the client closes.
Addressing the "main" question, it looks like a race condition of sorts. Remember that asyncio does it's work by going around and touching all the awaited elements in order to nudge them along. If your 'close connection' command is processed at some point ahead of when your queue is cleared, the client will never get that last message in the queue. Adding the async.sleep adds an extra step to the round robin and probably puts your queue emptying task ahead of your 'close connection'.
Addressing the amount of awaits, it's all about how many asynchronous things you need to have happen to accomplish the goal. If you block at any point you'll stop all the other tasks that you want to keep going.
I am writing a simple client-server program in python. In the client program, I am creating two threads (using Python's threading module), one for receiving, one for sending. The receiving thread continuously receives strings from the server side; while the sending thread continuously listens to the user input (using raw_input()) and send it to the server side. The two threads communicate using a Queue (which is natively synchronized, LIKE!).
The basic logic is like following:
Receiving thread:
global queue = Queue.Queue(0)
def run(self):
while 1:
receive a string from the server side
if the string is QUIT signal:
sys.exit()
else:
put it into the global queue
Sending thread:
def run(self):
while 1:
str = raw_input()
send str to the server side
fetch an element from the global queue
deal with the element
As you can see, in the receiving thread, I have a if condition to test whether the server has sent a "QUIT signal" to the client. If it has, then I want the whole program to stop.
The problem here is that for most of its time, the sending thread is blocked by "raw_input()" and waiting for the user input. When it is blocked, calling "sys.exit()" from the other thread (receiving thread) will not terminate the sending thread immediately. The sending thread has to wait for the user to type something and hit the enter button.
Could anybody inspire me how to get around with this? I do not mind using alternatives of "raw_input()". Actually I do not even mind changing the whole structure.
-------------EDIT-------------
I am running this on a linux machine, and my Python version is 2.7.5
You could just make the sending thread daemonic:
send_thread = SendThread() # Assuming this inherits from threading.Thread
send_thread.daemon = True # This must be called before you call start()
The Python interpreter won't be blocked from exiting if the only threads left running are daemons. So, if the only thread left is send_thread, your program will exit, even if you're blocked on raw_input.
Note that this will terminate the sending thread abruptly, no matter what its doing. This could be dangerous if it accesses external resources that need to be cleaned up properly or shouldn't be interrupted (like writing to a file, for example). If you're doing anything like that, protect it with a threading.Lock, and only call sys.exit() from the receiving thread if you can acquire that same Lock.
The short answer is you can't. input() like a lot of such input commands is blocking and it's blocking whether everything about the thread has been killed. You can sometimes call sys.exit() and get it to work depending on the OS, but it's not going to be consistent. Sometimes you can kill the program by deferring out to the local OS. But, then you're not going to be widely cross platform.
What you might want to consider if you have this is to funnel the functionality through the sockets. Because unlike input() we can do timeouts, and threads and kill things rather easily. It also gives you the ability to do multiple connections and maybe accept connections more broadly.
import socket
import time
from threading import Thread
def process(command, connection):
print("Command Entered: %s" % command)
# Any responses are written to connection.
connection.send(bytes('>', 'utf-8'))
class ConsoleSocket:
def __init__(self):
self.keep_running_the_listening_thread = True
self.data_buffer = ''
Thread(target=self.tcp_listen_handle).start()
def stop(self):
self.keep_running_the_listening_thread = False
def handle_tcp_connection_in_another_thread(self, connection, addr):
def handle():
while self.keep_running_the_listening_thread:
try:
data_from_socket = connection.recv(1024)
if len(data_from_socket) != 0:
self.data_buffer += data_from_socket.decode('utf-8')
else:
break
while '\n' in self.data_buffer:
pos = self.data_buffer.find('\n')
command = self.data_buffer[0:pos].strip('\r')
self.data_buffer = self.data_buffer[pos + 1:]
process(command, connection)
except socket.timeout:
continue
except socket.error:
if connection is not None:
connection.close()
break
Thread(target=handle).start()
connection.send(bytes('>', 'utf-8'))
def tcp_listen_handle(self, port=23, connects=5, timeout=2):
"""This is running in its own thread."""
sock = socket.socket()
sock.settimeout(timeout)
sock.bind(('', port))
sock.listen(connects) # We accept more than one connection.
while self.keep_running_the_listening_thread:
connection = None
try:
connection, addr = sock.accept()
address, port = addr
if address != '127.0.0.1': # Only permit localhost.
connection.close()
continue
# makes a thread deals with that stuff. We only do listening.
connection.settimeout(timeout)
self.handle_tcp_connection_in_another_thread(connection, addr)
except socket.timeout:
pass
except OSError:
# Some other error.
if connection is not None:
connection.close()
sock.close()
c = ConsoleSocket()
def killsocket():
time.sleep(20)
c.stop()
Thread(target=killsocket).start()
This launches a listener thread for the connections set on port 23 (telnet), and you connect and it passes that connection off to another thread. And it starts a killsocket thread that disables the various threads and lets them die peacefully (for demonstration purposes). You cannot however connect localhost within this code, because you'd need input() to know what to send to the server, which recreates the problem.