I need your help please.
This code only works once, a second wget gives timeout (attached file).
wget http://localhost:9090
#!/usr/bin/env python
import trollius as asyncio
from trollius import From
import os
class Client(asyncio.Protocol):
def connection_made(self, transport):
self.connected = True
# save the transport
self.transport = transport
def data_received(self, data):
# forward data to the server
self.server_transport.write(data)
def connection_lost(self, *args):
self.connected = False
class Server(asyncio.Protocol):
clients = {}
def connection_made(self, transport):
# save the transport
self.transport = transport
#asyncio.coroutine
def send_data(self, data):
# get a client by its peername
peername, port = self.transport.get_extra_info('peername')
client = self.clients.get(peername)
# create a client if peername is not known or the client disconnect
if client is None or not client.connected:
protocol, client = yield From(loop.create_connection(Client, 'google.com', 80))
client.server_transport = self.transport
self.clients[peername] = client
# forward data to the client
client.transport.write(data)
def data_received(self, data):
# use a task so this is executed async
asyncio.Task(self.send_data(data))
#asyncio.coroutine
def initialize(loop):
# use a coroutine to use yield from and get the async result of
# create_server
server = yield From(loop.create_server(Server, '127.0.0.1', 9090))
loop = asyncio.get_event_loop()
# main task to initialize everything
asyncio.Task(initialize(loop))
# run
loop.run_forever()
Does anyone know the reason?
Thanks!
You need a real 'loop' in servers when you are writing socket servers in asyncio. Note that despite 'sync' coding, infinite loops do not block execution here. You need an infinite while loop within your server. There are many samples,I recommend websockets library samples!
Related
I would like to forward data captured on one serial port to a multiclient TCP Server. In short, I need a serial to TCPIP bridge.
import sys
import socket
from threading import Thread
import serial
import serial.threaded
class SerialToNet(serial.threaded.Protocol):
"""serial->socket"""
def __init__(self):
self.sockets: list[socket.socket] = []
def __call__(self):
return self
def data_received(self, data):
"""Forward data from Serial to IP client Sockets"""
for socket in self.sockets:
socket.sendall(data)
class NetToSerial(Thread):
"""socket->serial"""
serial_worker: serial.threaded.ReaderThread
def __init__(self, client_socket):
Thread.__init__(self)
self._socket = client_socket
def run(self):
try:
while True:
data = self._socket.recv(1024)
serial_worker.write(data)
except (ConnectionAbortedError, ConnectionResetError):
print("NetToSerial client disconnection")
return
if __name__ == "__main__":
# Serial connection
SERIAL_COM_PORT = 'COM9'
try:
ser = serial.Serial(SERIAL_COM_PORT, 115200, timeout=2)
except serial.SerialException:
sys.exit(f"Serial port {SERIAL_COM_PORT} it not available")
serial_to_net = SerialToNet()
serial_worker = serial.threaded.ReaderThread(ser, serial_to_net)
serial_worker.start()
# TCP Server
# :todo Use socketserver.TCPServer
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind(('', 3490))
mythreads = []
try:
# Wait new IP clients
while True:
server_socket.listen()
print("Server: waiting TCP client connection")
(client_socket, _) = server_socket.accept()
# New client
net_to_serial_thread = NetToSerial(client_socket)
net_to_serial_thread.serial_worker = serial_worker
serial_to_net.sockets.append(client_socket)
net_to_serial_thread.start()
mythreads.append(net_to_serial_thread)
except KeyboardInterrupt:
pass
for t in mythreads:
t.join()
This implementation is quite working but I don't known how to update sockets in SerialToNet class when a TCP client disconnect.
You need to implement some logic for when a network client disconnects.
You know a client has disconnected because you receive an empty response (b'') from the socket. You're receiving data from network clients in NetToSerial, here:
def run(self):
try:
while True:
data = self._socket.recv(1024)
serial_worker.write(data)
except (ConnectionAbortedError, ConnectionResetError):
print("NetToSerial client disconnection")
return
You need to check the value of data, and if it's empty implement your disconnect logic:
Close the associated socket.
Exit the thread.
That might look like:
class NetToSerial(Thread):
"""socket->serial"""
serial_worker: serial.threaded.ReaderThread
def __init__(self, client_socket):
Thread.__init__(self)
self._socket = client_socket
def run(self):
try:
while True:
data = self._socket.recv(1024)
if not data:
break
serial_worker.write(data)
except (ConnectionAbortedError, ConnectionResetError):
print("NetToSerial client disconnection")
return
finally:
self._socket.close()
But that's only half the solution, because you're writing to this socket in your SerialToNet class. You need to remove the socket from SerialToNet sockets array. You can have the class remove the socket in response to an exception when writing, like this:
class SerialToNet(serial.threaded.Protocol):
"""serial->socket"""
def __init__(self):
self.sockets: list[socket.socket] = []
def __call__(self):
return self
def data_received(self, data):
"""Forward data from Serial to IP client Sockets"""
for socket in self.sockets[:]:
try:
socket.sendall(data)
except OSError:
self.sockets.remove(socket)
Note that because it's not possible to remove an item from a list over which you're currently iterating, we are iterating over a copy of self.sockets in the above code. This means we're free to remove sockets from self.sockets from inside the loop.
With the above changes I believe your code will operate as you intend.
Not directly related to your question, but I'd like to make a comment about your code: as written, it allows multiple network clients to write to the serial port at the same time. That seems like a recipe for disaster and I cannot think of any situation in which that would make sense. You may want to reconsider that aspect of your code.
I have a module which makes blocking network requests to some TCP server and receive responses. I must integrate it into asyncio application. My module looks like this:
import socket
# class providing transport facilities
# can be implemented in any manner
# for example it can manage asyncio connection
class CustomTransport:
def __init__(self, host, port):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.host = host
self.port = port
def write(self, data):
left = len(data)
while left:
written = self.sock.send(data.encode('utf-8'))
left = left - written
data = data[written:]
def read(self, sz):
return self.sock.recv(sz)
def open(self):
self.sock.connect((self.host, self.port))
def close(self):
self.sock.shutdown(2)
# generated. shouldn't be modified
# however any transport can be passed
class HelloNetClient:
def __init__(self, transport):
self.transport = transport
def say_hello_net(self):
self.transport.write('hello')
response = self.transport.read(5)
return response
# can be modified
class HelloService:
def __init__(self):
# create transport for connection to echo TCP server
self.transport = CustomTransport('127.0.0.1', 6789)
self.hello_client = HelloNetClient(self.transport)
def say_hello(self):
print('Saying hello...')
return self.hello_client.say_hello_net()
def __enter__(self):
self.transport.open()
return self
def __exit__(self,exc_type, exc_val, exc_tb):
self.transport.close()
Usage:
def start_conversation():
with HelloService() as hs:
answer = hs.say_hello()
print(answer.decode('utf-8'))
if __name__ == "__main__":
start_conversation()
Now I see that only way to turn my module to be compatible with asyncio is to convert everything to coroutines and replace regular socket with asyncio-provided transport. But I don't want to touch generated code (HelloNetClient). Is it possible?
P.S. I want it to be used like this:
async def start_conversation():
async with HelloService() as hs:
answer = await hs.say_hello()
print(answer.decode('utf-8'))
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(start_conversation())
HelloService will probably need to use run_in_executor (which manages a thread pool) to run HelloNetClient methods in the background. For example:
async def say_hello(self):
print('Saying hello...')
loop = asyncio.get_event_loop()
return await loop.run_in_executor(None, self.hello_client.say_hello_net)
This is not an idiomatic use of asyncio, and you're missing out on some of its features - for example, you won't be able to create thousands of clients that all work in parallel, and you won't get reliable cancellation (ability to cancel() any task you wish). Nonetheless, simple usage will work just fine.
Unfortunately the ability to provide custom transports is not of help here because the middle tier, HelloNetClient, expects synchronous behavior. Even if you were to write a custom transport that hooked into asyncio, methods like say_hello_net would still wait for as long as it takes for the response to arrive, so HelloService would have to schedule them in a separate thread. For this reason your best bet is to use the default transport and connect the code with asyncio with the code in the service as shown above.
I have a simple multithreading server, But it creates a new thread for each socket, I don't want to create a lot of threads. My idea is to receive the messages in other way: when the user send a message, it will add the message to a queue of messages and with a threadpool the server will handle these requests.
The simple multithreaded server:
import socket
import threading
class ThreadedServer(object):
def __init__(self, host, port):
self.host = host
self.port = port
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind((self.host, self.port))
def listen(self):
self.sock.listen(5)
while True:
client, address = self.sock.accept()
client.settimeout(60)
threading.Thread(target = self.listenToClient,args = (client,address)).start()
def listenToClient(self, client, address):
size = 1024
while True:
try:
data = client.recv(size)
if data:
# Set the response to echo back the recieved data
response = data
client.send(response)
else:
raise error('Client disconnected')
except:
client.close()
return False
if __name__ == "__main__":
port_num = input("Port? ")
ThreadedServer('',port_num).listen()
How can I implement my idea or is there better way to do it?
The question seems to be pretty old but i also stumble upon the same issue while working on the socket server, so here is the below code which you can use to make threaded socket server which doesnt spawn new threads on arrival.
Just to give gist ThreadingMixIn classes is overided with threaded pool.
class ThreadPoolMixIn(socketserver.ThreadingMixIn):
'''
use a thread pool instead of a new thread on every request
'''
# numThreads = 50
allow_reuse_address = True # seems to fix socket.error on server restart
def serve_forever(self):
'''
Handle one request at a time until doomsday.
'''
print('[X] Server is Running with No of Threads :- {}'.format(self.numThreads))
# set up the threadpool
self.requests = Queue(self.numThreads)
for x in range(self.numThreads):
t = threading.Thread(target = self.process_request_thread)
t.setDaemon(1)
t.start()
# server main loop
while True:
self.handle_request()
self.server_close()
def process_request_thread(self):
'''
obtain request from queue instead of directly from server socket
'''
while True:
socketserver.ThreadingMixIn.process_request_thread(self, *self.requests.get())
def handle_request(self):
'''
simply collect requests and put them on the queue for the workers.
'''
try:
request, client_address = self.get_request()
except socket.error:
return
if self.verify_request(request, client_address):
self.requests.put((request, client_address))
And then it is called in ThreadedTCPRequest Handler and override the numThreads parameter :
class ThreadedTCPServer(ThreadPoolMixIn, socketserver.TCPServer):
#Extend base class and overide the thread paramter to control the number of threads.
def __init__(self, no_of_threads, server_address, ThreadedTCPRequestHandler):
self.numThreads = no_of_threads
super().__init__(server_address, ThreadedTCPRequestHandler)
Ultimately creating the server which serves forever :
def create_multi_threaded_socket(CONFIG, HandlerClass = ThreadedTCPRequestHandler,
ServerClass = ThreadedTCPServer,
protocol="HTTP/1.0"):
server_address = ('', CONFIG.port)
HandlerClass.protocol_version = protocol
# httpd = ServerClass(server_address, HandlerClass)
server = ThreadedTCPServer(CONFIG.no_of_threads, server_address, ThreadedTCPRequestHandler)
sa = server.socket.getsockname()
print("Serving HTTP on {} port : {}".format(sa[0], sa[1]))
server.serve_forever()
I got the sample code from :
http://code.activestate.com/recipes/574454-thread-pool-mixin-class-for-use-with-socketservert/
Modified bit according to my need.
Hope this helps :) .
I'm trying to write a Server Side Events server which I can connect to with telnet and have the telnet content be pushed to a browser. The idea behind using Python and asyncio is to use as little CPU as possible as this will be running on a Raspberry Pi.
So far I have the following which uses a library found here: https://pypi.python.org/pypi/asyncio-sse/0.1 which uses asyncio.
And I have also copied a telnet server which uses asyncio as well.
Both work separately, but I have no idea how to tie both together. As I understand it, I need to call send() in the SSEHandler class from inside Telnet.data_received, but I don't know how to access it. Both of these 'servers' need to be running in a loop to accept new connections, or push data.
Can anyone help, or point me in another direction?
import asyncio
import sse
# Get an instance of the asyncio event loop
loop = asyncio.get_event_loop()
# Setup SSE address and port
sse_host, sse_port = '192.168.2.25', 8888
class Telnet(asyncio.Protocol):
def connection_made(self, transport):
print("Connection received!");
self.transport = transport
def data_received(self, data):
print(data)
self.transport.write(b'echo:')
self.transport.write(data)
# This is where I want to send data via SSE
# SSEHandler.send(data)
# Things I've tried :(
#loop.call_soon_threadsafe(SSEHandler.handle_request());
#loop.call_soon_threadsafe(sse_server.send("PAH!"));
def connection_lost(self, esc):
print("Connection lost!")
telnet_server.close()
class SSEHandler(sse.Handler):
#asyncio.coroutine
def handle_request(self):
self.send('Working')
# SSE server
sse_server = sse.serve(SSEHandler, sse_host, sse_port)
# Telnet server
telnet_server = loop.run_until_complete(loop.create_server(Telnet, '192.168.2.25', 7777))
#telnet_server.something = sse_server;
loop.run_until_complete(sse_server)
loop.run_until_complete(telnet_server.wait_closed())
Server side events are a sort of http protocol; and you may have any number of concurrent http requests in flight at any given moment, you may have zero if nobody is connected, or dozens. This nuance is all wrapped up in the two sse.serve and sse.Handler constructs; the former represents a single listening port, which dispatches each separate client request to the latter.
Additionally, sse.Handler.handle_request() is called once for each client, and the client is disconnected once that co-routine terminates. In your code, that coroutine terminates immediately, and so the client sees a single "Working" event. So, we need to wait, more-or-less forever. We can do that by yield froming an asyncio.Future().
The second issue is that we'll somehow need to get a hold of all of the separate instances of a SSEHandler() and use the send() method on each of them, somehow. Well, we can have each one self-register in their handle_request() methods; by adding each one to a dict which maps the individual handler instances to the future they are waiting on.
class SSEHandler(sse.Handler):
_instances = {}
#asyncio.coroutine
def handle_request(self):
self.send('Working')
my_future = asyncio.Future()
SSEHandler._instances[self] = my_future
yield from my_future
Now, to send an event to every listening we just visit all of the SSEHandler instances registered in the dict we created and using send() on each one.
class SSEHandler(sse.Handler):
#...
#classmethod
def broadcast(cls, message):
for instance, future in cls._instances.items():
instance.send(message)
class Telnet(asyncio.Protocol):
#...
def data_received(self, data):
#...
SSEHandler.broadcast(data.decode('ascii'))
lastly, your code exits when the telnet connection closes. that's fine, but we should clean-up at that time, too. Fortunately, that's just a matter of setting a result on all of the futures for all of the handlers
class SSEHandler(sse.Handler):
#...
#classmethod
def abort(cls):
for instance, future in cls._instances.items():
future.set_result(None)
cls._instances = {}
class Telnet(asyncio.Protocol):
#...
def connection_lost(self, esc):
print("Connection lost!")
SSEHandler.abort()
telnet_server.close()
here's a full, working dump in case my illustration is not obvious.
import asyncio
import sse
loop = asyncio.get_event_loop()
sse_host, sse_port = '0.0.0.0', 8888
class Telnet(asyncio.Protocol):
def connection_made(self, transport):
print("Connection received!");
self.transport = transport
def data_received(self, data):
SSEHandler.broadcast(data.decode('ascii'))
def connection_lost(self, esc):
print("Connection lost!")
SSEHandler.abort()
telnet_server.close()
class SSEHandler(sse.Handler):
_instances = {}
#classmethod
def broadcast(cls, message):
for instance, future in cls._instances.items():
instance.send(message)
#classmethod
def abort(cls):
for instance, future in cls._instances.items():
future.set_result(None)
cls._instances = {}
#asyncio.coroutine
def handle_request(self):
self.send('Working')
my_future = asyncio.Future()
SSEHandler._instances[self] = my_future
yield from my_future
sse_server = sse.serve(SSEHandler, sse_host, sse_port)
telnet_server = loop.run_until_complete(loop.create_server(Telnet, '0.0.0.0', 7777))
loop.run_until_complete(sse_server)
loop.run_until_complete(telnet_server.wait_closed())
I am trying to learn how to use sockets and a useful asynchronous backend. I've started in python with asyncore. After reading various online posts I've written a very simple chat server and connection client, reproduced below.
It seems to work. I open a python interactive session and type
> import chatserver
> server = chatserver.EchoServer('localhost', 7667)
> server.serve()
Then I open another IPython interactive session and type
> import chatserver
> cxn = chatserver.Connection()
> cxn._connect('localhost', 7667)
When I do that, I get a log output in the server window indicating that a connection has been made. Good. Then I type
> cxn.say('hi')
Nothing happens for a while, and then log messages show up for the server and client as expected.
Why is this delay ocurring?
Am I using the log functionality correctly?
I used threading to make it so that I could use the interactive session while the asyncore loop does it's thing for the Connection. Did I do this in a reasonable way?
(optional) If I don't include the line self.out_buffer="" in the Connection._connect function I get an error saying that .out_buffer does not exist. What's up with this?
import asyncore
import socket
import logging
import threading
logging.basicConfig(level=logging.DEBUG, format="%(created)-15s %(msecs)d %(levelname)8s %(thread)d %(name)s %(message)s")
log = logging.getLogger(__name__)
class Connection(asyncore.dispatcher_with_send):
def __init__(self):
asyncore.dispatcher.__init__(self)
def _connect(self, host, port, timeout=5, password=None):
self.host = host
self.port = port
self.out_buffer=""
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.connect((host, port))
#Run the asyncore loop in its own thread so that we can use the interactive session
self.loop = threading.Thread(target=asyncore.loop)
self.loop.daemon = True
self.loop.start()
def say(self, msg):
self.out_buffer = msg
def handle_read(self):
data = self.recv(4096)
log.debug('Received %s'%data)
class EchoHandler(asyncore.dispatcher_with_send):
def handle_read(self):
log.debug("handle_read")
data = self.recv(1024)
log.debug("after recv")
if data:
log.debug("got data: %s"%data)
self.out_buffer = data
else:
log.debug("got null data")
class EchoServer(asyncore.dispatcher):
SOCKET_TYPE = socket.SOCK_STREAM
ADDRESS_FAMILY = socket.AF_INET
def __init__(self, host, port):
self.address = (host,port)
asyncore.dispatcher.__init__(self)
self.create_socket(self.ADDRESS_FAMILY, self.SOCKET_TYPE)
log.debug("bind address=%s %s"%(host,port))
self.bind(self.address)
self.listen(1)
def fileno(self):
return self.socket.fileno()
def serve(self):
asyncore.loop()
#Start asyncore loop in new thread
# self.loop = threading.Thread(target=asyncore.loop)
# self.loop.daemon = True
# self.loop.start()
def handle_accept(self):
"""Deal with a newly accepted client"""
(connSock, clientAddress) = self.accept()
log.info("conn made: clientAddress=%s %s"%(clientAddress[0], clientAddress[1]))
#Make a handler for this connection
EchoHandler(connSock)
def handle_close(self):
self.close()
Looking at the asyncore docs you are relying on asyncore.dispatcher_with_send to call send() and the default timeout for asyncore.loop() is 30 seconds. This may explain the delay.
It turns out the problem was as Eero suggested.
I made two changes:
In EchoServer
asyncore.loop() to asyncore.loop(timeout=0.1)
In Connection
self.loop = threading.Thread(target=asyncore.loop) to self.loop = threading.Thread(target=asyncore.loop, kwargs={'timeout':0.1})
The response is now much faster. This seems like a hack though so if someone can explain a way to get the same effect in a proper way please contribute.