How make a twisted python client with readline functionality - python

I'm trying to write a client for simple TCP server using Python Twisted. Of course I pretty new to Python and just started looking at Twisted so I could be doing it all wrong.
The server is simple and you're intended to use use nc or telnet. There is no authentication. You just connect and get a simple console. I'd like to write a client that adds some readline functionality (history and emacs like ctrl-a/ctrl-e are what I'm after)
Below is code I've written that works just as good as using netcat from the command line like this nc localhost 4118
from twisted.internet import reactor, protocol, stdio
from twisted.protocols import basic
from sys import stdout
host='localhost'
port=4118
console_delimiter='\n'
class MyConsoleClient(protocol.Protocol):
def dataReceived(self, data):
stdout.write(data)
stdout.flush()
def sendData(self,data):
self.transport.write(data+console_delimiter)
class MyConsoleClientFactory(protocol.ClientFactory):
def startedConnecting(self,connector):
print 'Starting connection to console.'
def buildProtocol(self, addr):
print 'Connected to console!'
self.client = MyConsoleClient()
self.client.name = 'console'
return self.client
def clientConnectionFailed(self, connector, reason):
print 'Connection failed with reason:', reason
class Console(basic.LineReceiver):
factory = None
delimiter = console_delimiter
def __init__(self,factory):
self.factory = factory
def lineReceived(self,line):
if line == 'quit':
self.quit()
else:
self.factory.client.sendData(line)
def quit(self):
reactor.stop()
def main():
factory = MyConsoleClientFactory()
stdio.StandardIO(Console(factory))
reactor.connectTCP(host,port,factory)
reactor.run()
if __name__ == '__main__':
main()
The output:
$ python ./console-console-client.py
Starting connection to console.
Connected to console!
console> version
d305dfcd8fc23dc6674a1d18567a3b4e8383d70e
console> number-events
338
console> quit
I've looked at
Python Twisted integration with Cmd module
This really didn't work out for me. The example code works great but when I introduced networking I seemed to have race conditions with stdio. This older link seems to advocate a similar approach (running readline in a seperate thread) but I didn't get far with it.
I've also looked into twisted conch insults but I haven't had any luck getting anything to work other than the demo examples.
What's the best way to make a terminal based client that would provide readline support?
http://twistedmatrix.com/documents/current/api/twisted.conch.stdio.html
looks promising but I'm confused how to use it.
http://twistedmatrix.com/documents/current/api/twisted.conch.recvline.HistoricRecvLine.html
also seems to provide support for handling up and down arrow for instance but I couldn't get switching Console to inherit from HistoricRecVLine instead of LineReceiver to function.
Maybe twisted is the wrong framework to be using or I should be using all conch classes. I just liked the event driven style of it. Is there a better/easier approach to having readline or readline like support in a twisted client?

I landed up solving this by not using the Twisted framework. It's a great framework but I think it was the wrong tool for the job. Instead I used the telnetlib, cmd and readline modules.
My server is asynchronous but that didn't mean my client needed to be so I used telnetlib for my communication to the server. This made it easy to create a ConsoleClient class which subclasses cmd.Cmd and get history and emacs-like shortcuts.
#! /usr/bin/env python
import telnetlib
import readline
import os
import sys
import atexit
import cmd
import string
HOST='127.0.0.1'
PORT='4118'
CONSOLE_PROMPT='console> '
class ConsoleClient(cmd.Cmd):
"""Simple Console Client in Python. This allows for readline functionality."""
def connect_to_console(self):
"""Can throw an IOError if telnet connection fails."""
self.console = telnetlib.Telnet(HOST,PORT)
sys.stdout.write(self.read_from_console())
sys.stdout.flush()
def read_from_console(self):
"""Read from console until prompt is found (no more data to read)
Will throw EOFError if the console is closed.
"""
read_data = self.console.read_until(CONSOLE_PROMPT)
return self.strip_console_prompt(read_data)
def strip_console_prompt(self,data_received):
"""Strip out the console prompt if present"""
if data_received.startswith(CONSOLE_PROMPT):
return data_received.partition(CONSOLE_PROMPT)[2]
else:
#The banner case when you first connect
if data_received.endswith(CONSOLE_PROMPT):
return data_received.partition(CONSOLE_PROMPT)[0]
else:
return data_received
def run_console_command(self,line):
self.write_to_console(line + '\n')
data_recved = self.read_from_console()
sys.stdout.write(self.strip_console_prompt(data_recved))
sys.stdout.flush()
def write_to_console(self,line):
"""Write data to the console"""
self.console.write(line)
sys.stdout.flush()
def do_EOF(self, line):
try:
self.console.write("quit\n")
self.console.close()
except IOError:
pass
return True
def do_help(self,line):
"""The server already has it's own help command. Use that"""
self.run_console_command("help\n")
def do_quit(self, line):
return self.do_EOF(line)
def default(self, line):
"""Allow a command to be sent to the console."""
self.run_console_command(line)
def emptyline(self):
"""Don't send anything to console on empty line."""
pass
def main():
histfile = os.path.join(os.environ['HOME'], '.consolehistory')
try:
readline.read_history_file(histfile)
except IOError:
pass
atexit.register(readline.write_history_file, histfile)
try:
console_client = ConsoleClient()
console_client.prompt = CONSOLE_PROMPT
console_client.connect_to_console()
doQuit = False;
while doQuit != True:
try:
console_client.cmdloop()
doQuit = True;
except KeyboardInterrupt:
#Allow for ^C (Ctrl-c)
sys.stdout.write('\n')
except IOError as e:
print "I/O error({0}): {1}".format(e.errno, e.strerror)
except EOFError:
pass
if __name__ == '__main__':
main()
One change I did was remove the prompt returned from the server and use Cmd.prompt to display to the user. I reason was to support Ctrl-c acting more like a shell.

Related

Shutdown for socketserver based Python 3 server hangs

I am working on a "simple" server using a threaded SocketServer in Python 3.
I am going through a lot of trouble implementing shutdown for this. The code below I found on the internet and shutdown works initially but stops working after sending a few commands from the client via telnet. Some investigation tells me it hangs in threading._shutdown... threading._wait_for_tstate_lock but so far this does not ring a bell.
My research tells me that there are ~42 different solutions, frameworks, etc. on how to do this in different python versions. So far I could not find a working approach for python3. E.g. I love telnetsrv
(https://pypi.python.org/pypi/telnetsrv/0.4) for python 2.7 (it uses greenlets from gevent) but this one does not work for python 3. So if there is a more pythonic, std lib approach or something that works reliably I would love to hear about it!
My bet currently is with socketserver but I could not figure out yet how to deal with the hanging server. I removed all the log statements and most functionality so I can post this minimal server which exposes the issue:
# -*- coding: utf-8 -*-
import socketserver
import threading
SERVER = None
def shutdown_cmd(request):
global SERVER
request.send(bytes('server shutdown requested\n', 'utf-8'))
request.close()
SERVER.shutdown()
print('after shutdown!!')
#SERVER.server_close()
class service(socketserver.BaseRequestHandler):
def handle(self):
while True:
try:
msg = str(self.request.recv(1024).strip(), 'utf-8')
if msg == 'shutdown':
shutdown_cmd(msg, self.request)
else:
self.request.send(bytes("You said '{}'\n".format(msg), "utf-8"))
except Exception as e:
pass
class ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
pass
def run():
global SERVER
SERVER = ThreadedTCPServer(('', 1520), service)
server_thread = threading.Thread(target=SERVER.serve_forever)
server_thread.daemon = True
server_thread.start()
input("Press enter to shutdown")
SERVER.shutdown()
if __name__ == '__main__':
run()
It would be great being able to stop the server from the handler, too (see shutdown_cmd)
shutdown() works as expected, the server has stopped accepting new connections, but python still waiting for alive threads to terminate.
By default, socketserver.ThreadingMixIn will create new threads to handle incoming connection and by default, those are non-daemon threads, so python will wait for all alive non-daemon threads to terminate.
Of course, you could make the server spawn daemon threads, then python will not waiting:
The ThreadingMixIn class defines an attribute daemon_threads, which indicates whether or not the server should wait for thread termination. You should set the flag explicitly if you would like threads to behave autonomously; the default is False, meaning that Python will not exit until all threads created by ThreadingMixIn have exited.
class ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
daemon_threads = True
But that is not the ideal solution, you should check why threads never terminate, usually, the server should stop processing connection when no new data available or client shutdown connection:
import socketserver
import threading
shutdown_evt = threading.Event()
class service(socketserver.BaseRequestHandler):
def handle(self):
self.request.setblocking(False)
while True:
try:
msg = self.request.recv(1024)
if msg == b'shutdown':
shutdown_evt.set()
break
elif msg:
self.request.send(b'you said: ' + msg)
if shutdown_evt.wait(0.1):
break
except Exception as e:
break
class ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
pass
def run():
SERVER = ThreadedTCPServer(('127.0.0.1', 10000), service)
server_thread = threading.Thread(target=SERVER.serve_forever)
server_thread.daemon = True
server_thread.start()
input("Press enter to shutdown")
shutdown_evt.set()
SERVER.shutdown()
if __name__ == '__main__':
run()
I tried two solutions to implement a tcp server which runs on Python 3 on both Linux and Windows (I tried Windows 7):
using socketserver (my question) - shutdown is not working
using asyncio (posted an answer for that) - does not work on Windows
Both solutions have been based upon search results on the web. In the end I had to give up on the idea of finding a proven solution because I could not find one. Consequently I implemented my own solution (based on gevent). I post it here because I hope it will be helpful for others to avoid stuggeling the way I did.
# -*- coding: utf-8 -*-
from gevent.server import StreamServer
from gevent.pool import Pool
class EchoServer(StreamServer):
def __init__(self, listener, handle=None, spawn='default'):
StreamServer.__init__(self, listener, handle=handle, spawn=spawn)
def handle(self, socket, address):
print('New connection from %s:%s' % address[:2])
socket.sendall(b'Welcome to the echo server! Type quit to exit.\r\n')
# using a makefile because we want to use readline()
rfileobj = socket.makefile(mode='rb')
while True:
line = rfileobj.readline()
if not line:
print("client disconnected")
break
if line.strip().lower() == b'quit':
print("client quit")
break
if line.strip().lower() == b'shutdown':
print("client initiated server shutdown")
self.stop()
break
socket.sendall(line)
print("echoed %r" % line.decode().strip())
rfileobj.close()
srv = EchoServer(('', 1520), spawn=Pool(20))
srv.serve_forever()
after more research I found a sample that works using asyncio:
# -*- coding: utf-8 -*-
import asyncio
# after further research I found this relevant europython talk:
# https://www.youtube.com/watch?v=pi49aiLBas8
# * protocols and transport are useful if you do not have tons of socket based code
# * event loop pushes data in
# * transport used to push data back to the client
# found decent sample in book by wrox "professional python"
class ServerProtocol(asyncio.Protocol):
def connection_made(self, transport):
self.transport = transport
self.write('Welcome')
def connection_lost(self, exc):
self.transport = None
def data_received(self, data):
if not data or data == '':
return
message = data.decode('ascii')
command = message.strip().split(' ')[0].lower()
args = message.strip().split(' ')[1:]
#sanity check
if not hasattr(self, 'command_%s' % command):
self.write('Invalid command: %s' % command)
return
# run command
try:
return getattr(self, 'command_%s' % command)(*args)
except Exception as ex:
self.write('Error: %s' % str(ex))
def write(self, msg):
self.transport.write((msg + '\n').encode('ascii', 'ignore'))
def command_shutdown(self):
self.write('Okay. shutting down')
raise KeyboardInterrupt
def command_bye(self):
self.write('bye then!')
self.transport.close()
self.transport = None
if __name__ == '__main__':
loop = asyncio.get_event_loop()
coro = loop.create_server(ServerProtocol, '127.0.0.1', 8023)
asyncio.async(coro)
try:
loop.run_forever()
except KeyboardInterrupt:
pass
I understand that this is the most useful way to do this kind of network programming. If necessary the performance could be improved using the same code with uvloop (https://magic.io/blog/uvloop-blazing-fast-python-networking/).
Another way to shut down the server is by creating a process/thread for the serve_forever call.
After server_forever is started, simply wait for a custom flag to trigger and use server_close on the server, and terminate the process.
streaming_server = StreamingServer(('', 8000), StreamingHandler)
FLAG_KEEP_ALIVE.value = True
process_serve_forever = Process(target=streaming_server.serve_forever)
process_serve_forever.start()
while FLAG_KEEP_ALIVE.value:
pass
streaming_server.server_close()
process_serve_forever.terminate()

Python socket closed before all data have been consumed by remote

I am writing a Python module which is communicating with a go program through unix sockets. The client (the python module) write data to the socket and the server consume them.
# Simplified version of the code used
outputStream = socket.socket(socketfamily, sockettype, protocol)
outputStream.connect(socketaddress)
outputStream.setblocking(True)
outputStream.sendall(message)
....
outputStream.close()
My issue is that the Python client tends to finish and close the socket before the data have been effectively read by the server which leads to a "broken pipe, connection reset by peer" on the server side. Whatever I do, for the Python code everything has been sent and so the calls to send() sendall() select() are all successful...
Thanks in advance
EDIT: I can't use shutdown because of mac OS
EDIT2: I also tried to remove the timeout and call setblocking(True) but it doesn't change anything
EDIT3: After ready this issue http://bugs.python.org/issue6774 it seems that the documentation is unnecessary scary so I restored the shutdown but I still have the same issue:
# Simplified version of the code used
outputStream = socket.socket(socketfamily, sockettype, protocol)
outputStream.connect(socketaddress)
outputStream.settimeout(5)
outputStream.sendall(message)
....
outputStream.shutdown(socket.SHUT_WR)
outputStream.close()
IHMO this is best done with an Asynchornous I/O library/framework. Here's such a solution using circuits:
The server echos what it receives to stdout and the client opens a file and sends this to the server waiting for it to complete before closing the socket and terminating. This is done with a mixture of Async I/O and Coroutines.
server.py:
from circuits import Component
from circuits.net.sockets import UNIXServer
class Server(Component):
def init(self, path):
UNIXServer(path).register(self)
def read(self, sock, data):
print(data)
Server("/tmp/server.sock").run()
client.py:
import sys
from circuits import Component, Event
from circuits.net.sockets import UNIXClient
from circuits.net.events import connect, close, write
class done(Event):
"""done Event"""
class sendfile(Event):
"""sendfile Event"""
class Client(Component):
def init(self, path, filename, bufsize=8192):
self.path = path
self.filename = filename
self.bufsize = bufsize
UNIXClient().register(self)
def ready(self, *args):
self.fire(connect(self.path))
def connected(self, *args):
self.fire(sendfile(self.filename, bufsize=self.bufsize))
def done(self):
raise SystemExit(0)
def sendfile(self, filename, bufsize=8192):
with open(filename, "r") as f:
while True:
try:
yield self.call(write(f.read(bufsize)))
except EOFError:
break
finally:
self.fire(close())
self.fire(done())
Client(*sys.argv[1:]).run()
In my testing of this it behaves exactly as I expect it to with no
errors and the servers gets the complete file before the client clsoes
the socket and shuts down.
After a discussion with a colleague aware of the C sockets (in cpython the socket module is a wrapper for the C sockets) he spoke about this http://ia600609.us.archive.org/22/items/TheUltimateSo_lingerPageOrWhyIsMyTcpNotReliable/the-ultimate-so_linger-page-or-why-is-my-tcp-not-reliable.html (that's how it is done in PHP internal for the record)
TL&DR: shutdown + quick poll + close or ioctl(SIOCOUTQ) on linux

Pymodbus/Twisted Asynchronous Client Reconnecting

I have written a test code which reads some coils/registers from a PLC's modbus server. When I call one request the code works. I unplugged the cable then Twisted calls clientConnectionLost function so my client will reconnected, when I plugged back the cable. If I do multiple requests, like in the code below, the handling breaks, nothing happens. I don't know what causes the problem.
#!/usr/bin/env python
from PyQt4 import QtCore, QtGui
from twisted.internet import reactor, protocol,defer
from pymodbus.constants import Defaults
from pymodbus.client.async import ModbusClientProtocol
from time import sleep
def logger():
import logging
logging.basicConfig()
log = logging.getLogger()
log.setLevel(logging.DEBUG)
logger()
class MyModbusClientProtocol(ModbusClientProtocol):
def connectionMade(self):
ModbusClientProtocol.connectionMade(self)
print 'Connected'
self.read()
def read(self):
deferred = self.read_coils(0,1999)
deferred.addCallbacks(self.requestFetched,self.requestNotFetched)
deferred = self.read_holding_registers(0,124)
deferred.addCallbacks(self.requestFetched,self.requestNotFetched)
def requestNotFetched(self,error):
print error
sleep(0.5)
def requestFetched(self,response):
try:
print ("Fetched %d" % response.getRegister(1))
except:
print ("Fetched %d" % response.getBit(1))
self.factory.counter += 1
if self.factory.counter == 2:
self.factory.counter = 0
reactor.callLater(0,self.read)
class MyModbusClientFactory(protocol.ClientFactory):
"""A factory.
A new protocol instance will be created each time we connect to the server.
"""
def __init__(self):
self.counter = 0
def buildProtocol(self, addr):
p = MyModbusClientProtocol()
p.factory = self
return p
def clientConnectionLost(self, connector, reason):
print "connection lost:", reason
connector.connect()
def clientConnectionFailed(self, connector, reason):
print "connection failed:", reason
connector.connect()
if __name__ == "__main__":
factoryinstance = MyModbusClientFactory()
reactor.connectTCP("192.168.2.69", 502, factoryinstance)
reactor.run()
I have tested your code and believe that you have seen a timing related red herring when your code was seen to work after commenting out one of your requests. The behavior you are seeing where clientConnectionLost is not called is covered in the twisted FAQ: Why isn't my connectionLost method called?
What you need to do is create your own protocol specific timeout as you can't always rely on TCP's timeouts to work in your favor. A simple way to fix your code would be to add this to the end of your read method:
self.timeout = reactor.callLater(5, self.transport.abortConnection)
Which will abort the connection after 5 seconds wait. You also need to cancel this timeout when your requests have completed successfully with:
self.timeout.cancel()
in your requestFetched method before you call your read again.

Gevent.monkey.patch_all breaks code that relies on socket.shutdown()

I'm currently working to add support for gevent-socketio to an existing django project. I'm finding that gevent.monkey.patch_all() call is breaking the cancellation mechanism of a thread which is responsible for receiving data from a socket, we'll call the class SocketReadThread for now.
SocketReadThread is pretty simple, it calls recv() on a blocking socket. When it receives data is processes it and calls recv() again. The thread stops when an exception occurs or when recv() returns 0 bytes as occurs when socket.shutdown(SHUT_RDWR) is called in SocketReadThread.stop_reading()
The problem occurs when the gevent.monkey.patch_all() replaces the default socket implementation. Instead of shutting down nicely I get the following exception:
error: [Errno 9] File descriptor was closed in another greenlet
I'm assuming this is occurring because gevent makes my socket non-blocking in order to work its magic. This means that when I call socket.shutdown(socket.SHUT_RDWR) the greenlet that was doing the work for the monkey patched socket.recv call tried to read from the closed file descriptor.
I coded an example to isolate this issue:
from gevent import monkey
monkey.patch_all()
import socket
import sys
import threading
import time
class SocketReadThread(threading.Thread):
def __init__(self, socket):
super(SocketReadThread, self).__init__()
self._socket = socket
def run(self):
connected = True
while connected:
try:
print "calling socket.recv"
data = self._socket.recv(1024)
if (len(data) < 1):
print "received nothing, assuming socket shutdown"
connected = False
else :
print "Recieved something: {}".format(data)
except socket.timeout as e:
print "Socket timeout: {}".format(e)
connected = false
except :
ex = sys.exc_info()[1]
print "Unexpected exception occurrred: {}".format(str(ex))
raise ex
def stop_reading(self):
self._socket.shutdown(socket.SHUT_RDWR)
self._socket.close()
if __name__ == '__main__':
sock = socket.socket()
sock.connect(('127.0.0.1', 4242))
st = SocketReadThread(sock)
st.start()
time.sleep(3)
st.stop_reading()
st.join()
If you open a terminal an run nc -lp 4242 & (to give this program something to connect to) and then run this program you will see the exception mentioned above. If you remove the call to monkey.patch_all() you will see that it works just fine.
My question is: How can support cancellation of the SocketReadThread in a way that works with or without gevent monkey patching and doesn't require the use of an arbitrary timeout that would make cancellation slow (i.e. calling recv() with a timeout and checking a conditional)?
I found that there were two different workarounds for this. The first was to simply catch and suppress the exception. This appears to work fine since it is common practice for one thread to close a socket in order to cause another thread to exit from a blocking read. I don't know or understand why greenlets would complain about this other than a debugging aid. It is really just an annoyance.
The second option was to use the self-pipe trick (a quick search yields many explanations) as a mechanism to wake up a blocked thread. Essentially we create a second file descriptor (a socket is like a type of file descriptor to the OS) for signaling cancellation. We then use select as our blocking to wait for either incoming data on the socket or a cancellation request to come in on the cancellation file descriptor. See the example code below.
from gevent import monkey
monkey.patch_all()
import os
import select
import socket
import sys
import threading
import time
class SocketReadThread(threading.Thread):
def __init__(self, socket):
super(SocketReadThread, self).__init__()
self._socket = socket
self._socket.setblocking(0)
r, w = os.pipe()
self._cancelpipe_r = os.fdopen(r, 'r')
self._cancelpipe_w = os.fdopen(w, 'w')
def run(self):
connected = True
read_fds = [self._socket, self._cancelpipe_r]
while connected:
print "Calling select"
read_list, write_list, x_list = select.select(read_fds, [], [])
print "Select returned"
if self._cancelpipe_r in read_list :
print "exiting"
self._cleanup()
connected = False
elif self._socket in read_list:
print "calling socket.recv"
data = self._socket.recv(1024)
if (len(data) < 1):
print "received nothing, assuming socket shutdown"
connected = False
self._cleanup()
else :
print "Recieved something: {}".format(data)
def stop_reading(self):
print "writing to pipe"
self._cancelpipe_w.write("\n")
self._cancelpipe_w.flush()
print "joining"
self.join()
print "joined"
def _cleanup(self):
self._cancelpipe_r.close()
self._cancelpipe_w.close()
self._socket.shutdown(socket.SHUT_RDWR)
self._socket.close()
if __name__ == '__main__':
sock = socket.socket()
sock.connect(('127.0.0.1', 4242))
st = SocketReadThread(sock)
st.start()
time.sleep(3)
st.stop_reading()
Again, before running the above program run netcat -lp 4242 & to give it a listening socket to connect to.

Simple Twisted Echo Client

I'm trying to write a simple Echo client in Twisted that sends keyboard input to the server, and is terminated by the user entering 'q' on it's own. In short, I'm just trying to modify the simple echo client (and variants) found on this page. Nothing sexy at all, just the basics.
I'm struggling with the very basic event loop. It looks like I can't start/stop the reactor within the loop as a stopped reactor cannot be restarted. If I don't stop the reactor, then I'll never get to the next line that gets keyboard input.
Any help in getting my echo client working would be much appreciated.
from twisted.internet.protocol import ClientFactory
from twisted.protocols.basic import LineReceiver
from twisted.internet import reactor
class EchoClient(LineReceiver):
end="Bye-bye!"
def connectionMade(self):
#only write and end transmission if the message isn't empty
if len(self.factory.message) > 0:
self.sendLine(self.factory.message)
self.sendLine(self.end)
else:
#Else just terminate the connection
self.transport.loseConnection()
def lineReceived(self, line):
print "receive:", line
if line==self.end:
self.transport.loseConnection()
class EchoClientFactory(ClientFactory):
message = ""
def buildProtocol(self, address):
p = EchoClient()
p.factory = self
return p
def clientConnectionFailed(self, connector, reason):
reactor.stop()
def clientConnectionLost(self, connector, reason):
reactor.stop()
def main():
s = raw_input('Text to send (''q'' to terminate): ')
while s != 'q':
factory = EchoClientFactory()
factory.message = s
reactor.connectTCP('localhost', 8000, factory)
#This is bad because reactor cannot be restarted once it's been stopped
reactor.run()
s = raw_input('Text to send(''q'' to terminate): ')
if __name__ == '__main__':
main()
As a rule of a thumb - there are very rare instances where you would want to restart or stop reactor, unless you are terminating your program alltogeather. If your encounter a piece of code which will cause block e.g. database access, long computation or in your case raw_input, you have to either: find a twisted alternative (twisted.enterprise.adabi in case of database) or make it twisted compatible.
The easiest way to 'deblock' your code is move the blocking bits into thread by utilizing deferToThread from twisted.internet.threads.
Consider this example:
from twisted.internet.threads import deferToThread as __deferToThread
from twisted.internet import reactor
def mmprint(s):
print(s)
class TwistedRAWInput(object):
def start(self,callable,terminator):
self.callable=callable
self.terminator=terminator
self.startReceiving()
def startReceiving(self,s=''):
if s!=self.terminator:
self.callable(s)
__deferToThread(raw_input,':').addCallback(self.startReceiving)
tri = TwistedRAWInput()
reactor.callWhenRunning(tri.start,mmprint,'q')
reactor.run()
You would never have to stop reactor, as raw_input would happen in an outside thread, callbacking deferred on every new line.

Categories