I'm using Pika to process data from RabbitMQ.
As I seemed to run into different kind of problems I decided to write a small test application to see how I can handle disconnects.
I wrote this test app which does following:
Connect to Broker, retry until successful
When connected create a queue.
Consume this queue and put result into a python Queue.Queue(0)
Get item from Queue.Queue(0) and produce it back into the broker queue.
What I noticed were 2 issues:
When I run my script from one host connecting to rabbitmq on another host (inside a vm) then this scripts exits on random moments without producing an error.
When I run my script on the same host on which RabbitMQ is installed it runs fine and keeps running.
This might be explained because of network issues, packets dropped although I find the connection not really robust.
When the script runs locally on the RabbitMQ server and I kill the RabbitMQ then the script exits with error: "ERROR pika SelectConnection: Socket Error on 3: 104"
So it looks like I can't get the reconnection strategy working as it should be. Could someone have a look at the code so see what I'm doing wrong?
Thanks,
Jay
#!/bin/python
import logging
import threading
import Queue
import pika
from pika.reconnection_strategies import SimpleReconnectionStrategy
from pika.adapters import SelectConnection
import time
from threading import Lock
class Broker(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.logging = logging.getLogger(__name__)
self.to_broker = Queue.Queue(0)
self.from_broker = Queue.Queue(0)
self.parameters = pika.ConnectionParameters(host='sandbox',heartbeat=True)
self.srs = SimpleReconnectionStrategy()
self.properties = pika.BasicProperties(delivery_mode=2)
self.connection = None
while True:
try:
self.connection = SelectConnection(self.parameters, self.on_connected, reconnection_strategy=self.srs)
break
except Exception as err:
self.logging.warning('Cant connect. Reason: %s' % err)
time.sleep(1)
self.daemon=True
def run(self):
while True:
self.submitData(self.from_broker.get(block=True))
pass
def on_connected(self,connection):
connection.channel(self.on_channel_open)
def on_channel_open(self,new_channel):
self.channel = new_channel
self.channel.queue_declare(queue='sandbox', durable=True)
self.channel.basic_consume(self.processData, queue='sandbox')
def processData(self, ch, method, properties, body):
self.logging.info('Received data from broker')
self.channel.basic_ack(delivery_tag=method.delivery_tag)
self.from_broker.put(body)
def submitData(self,data):
self.logging.info('Submitting data to broker.')
self.channel.basic_publish(exchange='',
routing_key='sandbox',
body=data,
properties=self.properties)
if __name__ == '__main__':
format=('%(asctime)s %(levelname)s %(name)s %(message)s')
logging.basicConfig(level=logging.DEBUG, format=format)
broker=Broker()
broker.start()
try:
broker.connection.ioloop.start()
except Exception as err:
print err
The main problem with your script is that it is interacting with a single channel from both your main thread (where the ioloop is running) and the "Broker" thread (calls submitData in a loop). This is not safe.
Also, SimpleReconnectionStrategy does not seem to do anything useful. It does not cause a reconnect if the connection is interrupted. I believe this is a bug in Pika: https://github.com/pika/pika/issues/120
I attempted to refactor your code to make it work as I think you wanted it to, but ran into another problem. Pika does not appear to have a way to detect delivery failure, which means that data may be lost if the connection drops. This seems like such an obvious requirement! How can there be no way to detect that basic_publish failed? I tried all kinds of stuff including transactions and add_on_return_callback (all of which seemed clunky and overly complicated), but came up with nothing. If there truly is no way then pika only seems to be useful in situations that can tolerate loss of data sent to RabbitMQ, or in programs that only need to consume from RabbitMQ.
This is not reliable, but for reference, here's some code that solves your multi-thread problem:
import logging
import pika
import Queue
import sys
import threading
import time
from functools import partial
from pika.adapters import SelectConnection, BlockingConnection
from pika.exceptions import AMQPConnectionError
from pika.reconnection_strategies import SimpleReconnectionStrategy
log = logging.getLogger(__name__)
DEFAULT_PROPERTIES = pika.BasicProperties(delivery_mode=2)
class Broker(object):
def __init__(self, parameters, on_channel_open, name='broker'):
self.parameters = parameters
self.on_channel_open = on_channel_open
self.name = name
def connect(self, forever=False):
name = self.name
while True:
try:
connection = SelectConnection(
self.parameters, self.on_connected)
log.debug('%s connected', name)
except Exception:
if not forever:
raise
log.warning('%s cannot connect', name, exc_info=True)
time.sleep(10)
continue
try:
connection.ioloop.start()
finally:
try:
connection.close()
connection.ioloop.start() # allow connection to close
except Exception:
pass
if not forever:
break
def on_connected(self, connection):
connection.channel(self.on_channel_open)
def setup_submitter(channel, data_queue, properties=DEFAULT_PROPERTIES):
def on_queue_declared(frame):
# PROBLEM pika does not appear to have a way to detect delivery
# failure, which means that data could be lost if the connection
# drops...
channel.confirm_delivery(on_delivered)
submit_data()
def on_delivered(frame):
if frame.method.NAME in ['Confirm.SelectOk', 'Basic.Ack']:
log.info('submission confirmed %r', frame)
# increasing this value seems to cause a higher failure rate
time.sleep(0)
submit_data()
else:
log.warn('submission failed: %r', frame)
#data_queue.put(...)
def submit_data():
log.info('waiting on data queue')
data = data_queue.get()
log.info('got data to submit')
channel.basic_publish(exchange='',
routing_key='sandbox',
body=data,
properties=properties,
mandatory=True)
log.info('submitted data to broker')
channel.queue_declare(
queue='sandbox', durable=True, callback=on_queue_declared)
def blocking_submitter(parameters, data_queue,
properties=DEFAULT_PROPERTIES):
while True:
try:
connection = BlockingConnection(parameters)
channel = connection.channel()
channel.queue_declare(queue='sandbox', durable=True)
except Exception:
log.error('connection failure', exc_info=True)
time.sleep(1)
continue
while True:
log.info('waiting on data queue')
try:
data = data_queue.get(timeout=1)
except Queue.Empty:
try:
connection.process_data_events()
except AMQPConnectionError:
break
continue
log.info('got data to submit')
try:
channel.basic_publish(exchange='',
routing_key='sandbox',
body=data,
properties=properties,
mandatory=True)
except Exception:
log.error('submission failed', exc_info=True)
data_queue.put(data)
break
log.info('submitted data to broker')
def setup_receiver(channel, data_queue):
def process_data(channel, method, properties, body):
log.info('received data from broker')
data_queue.put(body)
channel.basic_ack(delivery_tag=method.delivery_tag)
def on_queue_declared(frame):
channel.basic_consume(process_data, queue='sandbox')
channel.queue_declare(
queue='sandbox', durable=True, callback=on_queue_declared)
if __name__ == '__main__':
if len(sys.argv) != 2:
print 'usage: %s RABBITMQ_HOST' % sys.argv[0]
sys.exit()
format=('%(asctime)s %(levelname)s %(name)s %(message)s')
logging.basicConfig(level=logging.DEBUG, format=format)
host = sys.argv[1]
log.info('connecting to host: %s', host)
parameters = pika.ConnectionParameters(host=host, heartbeat=True)
data_queue = Queue.Queue(0)
data_queue.put('message') # prime the pump
# run submitter in a thread
setup = partial(setup_submitter, data_queue=data_queue)
broker = Broker(parameters, setup, 'submitter')
thread = threading.Thread(target=
partial(broker.connect, forever=True))
# uncomment these lines to use the blocking variant of the submitter
#thread = threading.Thread(target=
# partial(blocking_submitter, parameters, data_queue))
thread.daemon = True
thread.start()
# run receiver in main thread
setup = partial(setup_receiver, data_queue=data_queue)
broker = Broker(parameters, setup, 'receiver')
broker.connect(forever=True)
Related
I am working on a "simple" server using a threaded SocketServer in Python 3.
I am going through a lot of trouble implementing shutdown for this. The code below I found on the internet and shutdown works initially but stops working after sending a few commands from the client via telnet. Some investigation tells me it hangs in threading._shutdown... threading._wait_for_tstate_lock but so far this does not ring a bell.
My research tells me that there are ~42 different solutions, frameworks, etc. on how to do this in different python versions. So far I could not find a working approach for python3. E.g. I love telnetsrv
(https://pypi.python.org/pypi/telnetsrv/0.4) for python 2.7 (it uses greenlets from gevent) but this one does not work for python 3. So if there is a more pythonic, std lib approach or something that works reliably I would love to hear about it!
My bet currently is with socketserver but I could not figure out yet how to deal with the hanging server. I removed all the log statements and most functionality so I can post this minimal server which exposes the issue:
# -*- coding: utf-8 -*-
import socketserver
import threading
SERVER = None
def shutdown_cmd(request):
global SERVER
request.send(bytes('server shutdown requested\n', 'utf-8'))
request.close()
SERVER.shutdown()
print('after shutdown!!')
#SERVER.server_close()
class service(socketserver.BaseRequestHandler):
def handle(self):
while True:
try:
msg = str(self.request.recv(1024).strip(), 'utf-8')
if msg == 'shutdown':
shutdown_cmd(msg, self.request)
else:
self.request.send(bytes("You said '{}'\n".format(msg), "utf-8"))
except Exception as e:
pass
class ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
pass
def run():
global SERVER
SERVER = ThreadedTCPServer(('', 1520), service)
server_thread = threading.Thread(target=SERVER.serve_forever)
server_thread.daemon = True
server_thread.start()
input("Press enter to shutdown")
SERVER.shutdown()
if __name__ == '__main__':
run()
It would be great being able to stop the server from the handler, too (see shutdown_cmd)
shutdown() works as expected, the server has stopped accepting new connections, but python still waiting for alive threads to terminate.
By default, socketserver.ThreadingMixIn will create new threads to handle incoming connection and by default, those are non-daemon threads, so python will wait for all alive non-daemon threads to terminate.
Of course, you could make the server spawn daemon threads, then python will not waiting:
The ThreadingMixIn class defines an attribute daemon_threads, which indicates whether or not the server should wait for thread termination. You should set the flag explicitly if you would like threads to behave autonomously; the default is False, meaning that Python will not exit until all threads created by ThreadingMixIn have exited.
class ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
daemon_threads = True
But that is not the ideal solution, you should check why threads never terminate, usually, the server should stop processing connection when no new data available or client shutdown connection:
import socketserver
import threading
shutdown_evt = threading.Event()
class service(socketserver.BaseRequestHandler):
def handle(self):
self.request.setblocking(False)
while True:
try:
msg = self.request.recv(1024)
if msg == b'shutdown':
shutdown_evt.set()
break
elif msg:
self.request.send(b'you said: ' + msg)
if shutdown_evt.wait(0.1):
break
except Exception as e:
break
class ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
pass
def run():
SERVER = ThreadedTCPServer(('127.0.0.1', 10000), service)
server_thread = threading.Thread(target=SERVER.serve_forever)
server_thread.daemon = True
server_thread.start()
input("Press enter to shutdown")
shutdown_evt.set()
SERVER.shutdown()
if __name__ == '__main__':
run()
I tried two solutions to implement a tcp server which runs on Python 3 on both Linux and Windows (I tried Windows 7):
using socketserver (my question) - shutdown is not working
using asyncio (posted an answer for that) - does not work on Windows
Both solutions have been based upon search results on the web. In the end I had to give up on the idea of finding a proven solution because I could not find one. Consequently I implemented my own solution (based on gevent). I post it here because I hope it will be helpful for others to avoid stuggeling the way I did.
# -*- coding: utf-8 -*-
from gevent.server import StreamServer
from gevent.pool import Pool
class EchoServer(StreamServer):
def __init__(self, listener, handle=None, spawn='default'):
StreamServer.__init__(self, listener, handle=handle, spawn=spawn)
def handle(self, socket, address):
print('New connection from %s:%s' % address[:2])
socket.sendall(b'Welcome to the echo server! Type quit to exit.\r\n')
# using a makefile because we want to use readline()
rfileobj = socket.makefile(mode='rb')
while True:
line = rfileobj.readline()
if not line:
print("client disconnected")
break
if line.strip().lower() == b'quit':
print("client quit")
break
if line.strip().lower() == b'shutdown':
print("client initiated server shutdown")
self.stop()
break
socket.sendall(line)
print("echoed %r" % line.decode().strip())
rfileobj.close()
srv = EchoServer(('', 1520), spawn=Pool(20))
srv.serve_forever()
after more research I found a sample that works using asyncio:
# -*- coding: utf-8 -*-
import asyncio
# after further research I found this relevant europython talk:
# https://www.youtube.com/watch?v=pi49aiLBas8
# * protocols and transport are useful if you do not have tons of socket based code
# * event loop pushes data in
# * transport used to push data back to the client
# found decent sample in book by wrox "professional python"
class ServerProtocol(asyncio.Protocol):
def connection_made(self, transport):
self.transport = transport
self.write('Welcome')
def connection_lost(self, exc):
self.transport = None
def data_received(self, data):
if not data or data == '':
return
message = data.decode('ascii')
command = message.strip().split(' ')[0].lower()
args = message.strip().split(' ')[1:]
#sanity check
if not hasattr(self, 'command_%s' % command):
self.write('Invalid command: %s' % command)
return
# run command
try:
return getattr(self, 'command_%s' % command)(*args)
except Exception as ex:
self.write('Error: %s' % str(ex))
def write(self, msg):
self.transport.write((msg + '\n').encode('ascii', 'ignore'))
def command_shutdown(self):
self.write('Okay. shutting down')
raise KeyboardInterrupt
def command_bye(self):
self.write('bye then!')
self.transport.close()
self.transport = None
if __name__ == '__main__':
loop = asyncio.get_event_loop()
coro = loop.create_server(ServerProtocol, '127.0.0.1', 8023)
asyncio.async(coro)
try:
loop.run_forever()
except KeyboardInterrupt:
pass
I understand that this is the most useful way to do this kind of network programming. If necessary the performance could be improved using the same code with uvloop (https://magic.io/blog/uvloop-blazing-fast-python-networking/).
Another way to shut down the server is by creating a process/thread for the serve_forever call.
After server_forever is started, simply wait for a custom flag to trigger and use server_close on the server, and terminate the process.
streaming_server = StreamingServer(('', 8000), StreamingHandler)
FLAG_KEEP_ALIVE.value = True
process_serve_forever = Process(target=streaming_server.serve_forever)
process_serve_forever.start()
while FLAG_KEEP_ALIVE.value:
pass
streaming_server.server_close()
process_serve_forever.terminate()
Summary
I have a client-server application which makes use of Websockets. The backend (server) part is implemented in Python using autobahn.
The server, in addition to serving a Websockets endpoint, runs a series of threads which will feed the Websockets channel with data, though a queue.Queue().
One of these threads has a problem: it crashes at a missing parameter and hangs when resolving the exception.
Implementation details
The server implementation (cut down to highlight the problem):
from autobahn.asyncio.websocket import WebSocketServerProtocol, WebSocketServerFactory
import time
import threading
import arrow
import queue
import asyncio
import json
# backends of components
import dummy
class MyServerProtocol(WebSocketServerProtocol):
def __init__(self):
super().__init__()
print("webserver initialized")
# global queue to handle updates from modules
self.events = queue.Queue()
# consumer
threading.Thread(target=self.push).start()
threading.Thread(target=dummy.Dummy().dummy, args=(self.events,)).start()
def push(self):
""" consume the content of the queue and push it to the browser """
while True:
update = self.events.get()
print(update)
if update:
self.sendMessage(json.dumps(update).encode('utf-8'), False)
print(update)
time.sleep(1)
def worker(self):
print("started thread")
while True:
try:
self.sendMessage(arrow.now().isoformat().encode('utf-8'), False)
except AttributeError:
print("not connected?")
time.sleep(3)
def onConnect(self, request):
print("Client connecting: {0}".format(request.peer))
def onOpen(self):
print("WebSocket connection open.")
def onClose(self, wasClean, code, reason):
print("WebSocket connection closed: {0}".format(reason))
if __name__ == '__main__':
factory = WebSocketServerFactory(u"ws://127.0.0.1:9100")
factory.protocol = MyServerProtocol
loop = asyncio.get_event_loop()
coro = loop.create_server(factory, '0.0.0.0', 9100)
loop.run_until_complete(coro)
loop.run_forever()
The dummy module imported in the code above:
import time
import arrow
class Dummy:
def __init__(self, events):
self.events = events
print("dummy initialized")
def dummy(self):
while True:
self.events.put({
'dummy': {
'time': arrow.now().isoformat()
}
})
time.sleep(1)
The problem
When running the code above and connecting from a client, I get on the output webserver initialized (which proves that the connection was initiated), and WebSocket connection to 'ws://127.0.0.1:9100/' failed: Error in connection establishment: net::ERR_CONNECTION_REFUSED on the client.
When debugging the code, I see that the call to threading.Thread(target=dummy.Dummy().dummy, args=(self.events,)).start() crashes and the debugger (PyCharm) leads me to C:\Program Files (x86)\Python36-32\Lib\asyncio\selector_events.py, specifically to the line 236
# It's now up to the protocol to handle the connection.
except Exception as exc:
if self._debug:
The thread hangs when executing if self._debug but I see on the exceptline (thanks to Pycharm) that
exc: __init__() missing 1 required positional argument: 'events'
My question
Why is this parameter missing? It is provided via the threading.Thread(target=dummy.Dummy().dummy, args=(self.events,)).start() call.
As a side question: why does the thread hangs on the if condition?
Notes
there is never a Traceback thrown by my program (due to the hang)
removing this thread call resolves the issue (the client connects correctly)
The events arg is needed for the constructor, not the dummy method. I think you meant something more like:
d = Dummy(self.events)
threading.Thread(d.dummy).start()
I need to process frames from a webcam and send a few selected frames to a remote websocket server. The server answers immediately with a confirmation message (much like an echo server).
Frame processing is slow and cpu intensive so I want to do it using a separate thread pool (producer) to use all the available cores. So the client (consumer) just sits idle until the pool has something to send.
My current implementation, see below, works fine only if I add a small sleep inside the producer test loop. If I remove this delay I stop receiving any answer from the server (both the echo server and from my real server). Even the first answer is lost, so I do not think this is a flood protection mechanism.
What am I doing wrong?
import tornado
from tornado.websocket import websocket_connect
from tornado import gen, queues
import time
class TornadoClient(object):
url = None
onMessageReceived = None
onMessageSent = None
ioloop = tornado.ioloop.IOLoop.current()
q = queues.Queue()
def __init__(self, url, onMessageReceived, onMessageSent):
self.url = url
self.onMessageReceived = onMessageReceived
self.onMessageSent = onMessageSent
def enqueueMessage(self, msgData, binary=False):
print("TornadoClient.enqueueMessage")
self.ioloop.add_callback(self.addToQueue, (msgData, binary))
print("TornadoClient.enqueueMessage done")
#gen.coroutine
def addToQueue(self, msgTuple):
yield self.q.put(msgTuple)
#gen.coroutine
def main_loop(self):
connection = None
try:
while True:
while connection is None:
try:
print("Connecting...")
connection = yield websocket_connect(self.url)
print("Connected " + str(connection))
except Exception, e:
print("Exception on connection " + str(e))
connection = None
print("Retry in a few seconds...")
yield gen.Task(self.ioloop.add_timeout, time.time() + 3)
try:
print("Waiting for data to send...")
msgData, binaryVal = yield self.q.get()
print("Writing...")
sendFuture = connection.write_message(msgData, binary=binaryVal)
print("Write scheduled...")
finally:
self.q.task_done()
yield sendFuture
self.onMessageSent("Sent ok")
print("Write done. Reading...")
msg = yield connection.read_message()
print("Got msg.")
self.onMessageReceived(msg)
if msg is None:
print("Connection lost")
connection = None
print("main loop completed")
except Exception, e:
print("ExceptionExceptionException")
print(e)
connection = None
print("Exit main_loop function")
def start(self):
self.ioloop.run_sync(self.main_loop)
print("Main loop completed")
######### TEST METHODS #########
def sendMessages(client):
time.sleep(2) #TEST only: wait for client startup
while True:
client.enqueueMessage("msgData", binary=False)
time.sleep(1) # <--- comment this line to break it
def testPrintMessage(msg):
print("Received: " + str(msg))
def testPrintSentMessage(msg):
print("Sent: " + msg)
if __name__=='__main__':
from threading import Thread
client = TornadoClient("ws://echo.websocket.org", testPrintMessage, testPrintSentMessage)
thread = Thread(target = sendMessages, args = (client, ))
thread.start()
client.start()
My real problem
In my real program I use a "window like" mechanism to protect the consumer (an autobahn.twisted.websocket server): the producer can send up to a maximum number of un-acknowledge messages (the webcam frames), then stops waiting for half of the window to free up.
The consumer sends a "PROCESSED" message back acknowleding one or more messages (just a counter, not by id).
What I see on the consumer log is that the messages are processed and the answer is sent back but these acks vanish somewhere in the network.
I have little experience with asynchio so I wanted to be sure that I'm not missing any yield, annotation or something else.
This is the consumer side log:
2017-05-13 18:59:54+0200 [-] TX Frame to tcp4:192.168.0.5:48964 : fin = True, rsv = 0, opcode = 1, mask = -, length = 21, repeat_length = None, chopsize = None, sync = False, payload = {"type": "PROCESSED"}
2017-05-13 18:59:54+0200 [-] TX Octets to tcp4:192.168.0.5:48964 : sync = False, octets = 81157b2274797065223a202250524f434553534544227d
This is neat code. I believe the reason you need a sleep in your sendMessages thread is because, otherwise, it keeps calling enqueueMessage as fast as possible, millions of times per second. Since enqueueMessage does not wait for the enqueued message to be processed, it keeps calling IOLoop.add_callback as fast as it can, without giving the loop enough opportunity to execute the callbacks.
The loop might make some progress running on the main thread, since you're not actually blocking it. But the sendMessages thread adds callbacks much faster than the loop can handle them. By the time the loop has popped one message from the queue and has begun to process it, millions of new callbacks are added already, which the loop must execute before it can advance to the next stage of message-processing.
Therefore, for your test code, I think it's correct to sleep between calls to enqueueMessage on the thread.
I'm using Autobahn to connect to a websocket like this.
class MyComponent(ApplicationSession):
#inlineCallbacks
def onJoin(self, details):
print("session ready")
def oncounter(*args, **args2):
print("event received: args: {} args2: {}".format(args, args2))
try:
yield self.subscribe(oncounter, u'topic')
print("subscribed to topic")
except Exception as e:
print("could not subscribe to topic: {0}".format(e))
if __name__ == '__main__':
addr = u"wss://mywebsocketaddress.com"
runner = ApplicationRunner(url=addr, realm=u"realm1", debug=False, debug_app=False)
runner.run(MyComponent)
This works great, and I am able to receive messages. However, after around 3-4 hours, sometimes much sooner, the messages abruptly stop coming. It appears that the websocket times out (does that happen?), possibly due to connection issues.
How can I auto-reconnect with autobahn when that happens?
Here's my attempt, but the reconnecting code is never called.
class MyClientFactory(ReconnectingClientFactory, WampWebSocketClientFactory):
maxDelay = 10
maxRetries = 5
def startedConnecting(self, connector):
print('Started to connect.')
def clientConnectionLost(self, connector, reason):
print('Lost connection. Reason: {}'.format(reason))
ReconnectingClientFactory.clientConnectionLost(self, connector, reason)
def clientConnectionFailed(self, connector, reason):
print('Connection failed. Reason: {}'.format(reason))
ReconnectingClientFactory.clientConnectionFailed(self, connector, reason)
class MyApplicationRunner(object):
log = txaio.make_logger()
def __init__(self, url, realm, extra=None, serializers=None,
debug=False, debug_app=False,
ssl=None, proxy=None):
assert(type(url) == six.text_type)
assert(realm is None or type(realm) == six.text_type)
assert(extra is None or type(extra) == dict)
assert(proxy is None or type(proxy) == dict)
self.url = url
self.realm = realm
self.extra = extra or dict()
self.serializers = serializers
self.debug = debug
self.debug_app = debug_app
self.ssl = ssl
self.proxy = proxy
def run(self, make, start_reactor=True):
if start_reactor:
# only select framework, set loop and start logging when we are asked
# start the reactor - otherwise we are running in a program that likely
# already tool care of all this.
from twisted.internet import reactor
txaio.use_twisted()
txaio.config.loop = reactor
if self.debug or self.debug_app:
txaio.start_logging(level='debug')
else:
txaio.start_logging(level='info')
isSecure, host, port, resource, path, params = parseWsUrl(self.url)
# factory for use ApplicationSession
def create():
cfg = ComponentConfig(self.realm, self.extra)
try:
session = make(cfg)
except Exception as e:
if start_reactor:
# the app component could not be created .. fatal
self.log.error(str(e))
reactor.stop()
else:
# if we didn't start the reactor, it's up to the
# caller to deal with errors
raise
else:
session.debug_app = self.debug_app
return session
# create a WAMP-over-WebSocket transport client factory
transport_factory = MyClientFactory(create, url=self.url, serializers=self.serializers,
proxy=self.proxy, debug=self.debug)
# supress pointless log noise like
# "Starting factory <autobahn.twisted.websocket.WampWebSocketClientFactory object at 0x2b737b480e10>""
transport_factory.noisy = False
# if user passed ssl= but isn't using isSecure, we'll never
# use the ssl argument which makes no sense.
context_factory = None
if self.ssl is not None:
if not isSecure:
raise RuntimeError(
'ssl= argument value passed to %s conflicts with the "ws:" '
'prefix of the url argument. Did you mean to use "wss:"?' %
self.__class__.__name__)
context_factory = self.ssl
elif isSecure:
from twisted.internet.ssl import optionsForClientTLS
context_factory = optionsForClientTLS(host)
from twisted.internet import reactor
if self.proxy is not None:
from twisted.internet.endpoints import TCP4ClientEndpoint
client = TCP4ClientEndpoint(reactor, self.proxy['host'], self.proxy['port'])
transport_factory.contextFactory = context_factory
elif isSecure:
from twisted.internet.endpoints import SSL4ClientEndpoint
assert context_factory is not None
client = SSL4ClientEndpoint(reactor, host, port, context_factory)
else:
from twisted.internet.endpoints import TCP4ClientEndpoint
client = TCP4ClientEndpoint(reactor, host, port)
d = client.connect(transport_factory)
# as the reactor shuts down, we wish to wait until we've sent
# out our "Goodbye" message; leave() returns a Deferred that
# fires when the transport gets to STATE_CLOSED
def cleanup(proto):
if hasattr(proto, '_session') and proto._session is not None:
if proto._session.is_attached():
return proto._session.leave()
elif proto._session.is_connected():
return proto._session.disconnect()
# when our proto was created and connected, make sure it's cleaned
# up properly later on when the reactor shuts down for whatever reason
def init_proto(proto):
reactor.addSystemEventTrigger('before', 'shutdown', cleanup, proto)
return proto
# if we connect successfully, the arg is a WampWebSocketClientProtocol
d.addCallback(init_proto)
# if the user didn't ask us to start the reactor, then they
# get to deal with any connect errors themselves.
if start_reactor:
# if an error happens in the connect(), we save the underlying
# exception so that after the event-loop exits we can re-raise
# it to the caller.
class ErrorCollector(object):
exception = None
def __call__(self, failure):
self.exception = failure.value
reactor.stop()
connect_error = ErrorCollector()
d.addErrback(connect_error)
# now enter the Twisted reactor loop
reactor.run()
# if we exited due to a connection error, raise that to the
# caller
if connect_error.exception:
raise connect_error.exception
else:
# let the caller handle any errors
return d
The error I'm getting:
2016-10-09T21:00:40+0100 Connection to/from tcp4:xxx.xx.xx.xx:xxx was lost in a non-clean fashion: Connection lost
2016-10-09T21:00:40+0100 _connectionLost: [Failure instance: Traceback (failure with no frames): : Connection to the other side was lost in a non-clean fashion: Connection l
ost.
]
2016-10-09T21:00:40+0100 WAMP-over-WebSocket transport lost: wasClean=False, code=1006, reason="connection was closed uncleanly (peer dropped the TCP connection without previous WebSocket closing handshake)"
2016-10-09T21:10:39+0100 EXCEPTION: no messages received
2016-10-09T21:10:39+0100 Traceback (most recent call last):
You can use a ReconnectingClientFactory if you're using Twisted. There's a simple example by the autobahn developer on github. Unfortunately there doesn't seem to be an implementation of an ApplicationRunner that comes pre-built with this functionality but it doesn't look too difficult to implement on your own. Here's an asyncio variant which should be straight forward to Twisted. You could follow this issue as it seems there is a desire by the dev team to incorporate reconnecting client.
Have you tried pip3 install autobahn-autoreconnect ?
# from autobahn.asyncio.wamp import ApplicationRunner
from autobahn_autoreconnect import ApplicationRunner
Here is an example of an automatically reconnecting ApplicationRunner. The important line to enable auto-reconnect is:
runner.run(session, auto_reconnect=True)
You will also want to activate automatic WebSocket ping/pong (eg in Crossbar.io), to a) minimize connection drops because of timeouts, and b) to allow fast detection of lost connections.
I'm trying to implement a websocket/wamp client using autobahn|python
and asyncio, and while it's somewhat working, there are parts that have
eluded me.
What I'm really trying to do is implement WAMP in qt5/QML, but this
seemed like an easier path for the moment.
This simplified client mostly copied from online does work. It reads the
time service when the onJoin occurs.
What I'd like to do is trigger this read from an external source.
The convoluted approach I've taken is to run the asyncio event loop in a
thread, and then to send a command over a socket to trigger the read. I
have so far unable to figure out where to put the routine/coroutine so
that it can be found from the reader routine.
I suspect there's a simpler way to go about this but I haven't found it
yet. Suggestions are welcome.
#!/usr/bin/python3
try:
import asyncio
except ImportError:
## Trollius >= 0.3 was renamed
import trollius as asyncio
from autobahn.asyncio import wamp, websocket
import threading
import time
from socket import socketpair
rsock, wsock = socketpair()
def reader() :
data = rsock.recv(100)
print("Received:", data.decode())
class MyFrontendComponent(wamp.ApplicationSession):
def onConnect(self):
self.join(u"realm1")
#asyncio.coroutine
def onJoin(self, details):
print('joined')
## call a remote procedure
##
try:
now = yield from self.call(u'com.timeservice.now')
except Exception as e:
print("Error: {}".format(e))
else:
print("Current time from time service: {}".format(now))
def onLeave(self, details):
self.disconnect()
def onDisconnect(self):
asyncio.get_event_loop().stop()
def start_aloop() :
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
transport_factory = websocket.WampWebSocketClientFactory(session_factory,
debug = False,
debug_wamp = False)
coro = loop.create_connection(transport_factory, '127.0.0.1', 8080)
loop.add_reader(rsock,reader)
loop.run_until_complete(coro)
loop.run_forever()
loop.close()
if __name__ == '__main__':
session_factory = wamp.ApplicationSessionFactory()
session_factory.session = MyFrontendComponent
## 4) now enter the asyncio event loop
print('starting thread')
thread = threading.Thread(target=start_aloop)
thread.start()
time.sleep(5)
print("IN MAIN")
# emulate an outside call
wsock.send(b'a byte string')
You can listen on a socket asynchronous inside the event loop, using loop.sock_accept. You can just call a coroutine to setup the socket inside of onConnect or onJoin:
try:
import asyncio
except ImportError:
## Trollius >= 0.3 was renamed
import trollius as asyncio
from autobahn.asyncio import wamp, websocket
import socket
class MyFrontendComponent(wamp.ApplicationSession):
def onConnect(self):
self.join(u"realm1")
#asyncio.coroutine
def setup_socket(self):
# Create a non-blocking socket
self.sock = socket.socket()
self.sock.setblocking(0)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind(('localhost', 8889))
self.sock.listen(5)
loop = asyncio.get_event_loop()
# Wait for connections to come in. When one arrives,
# call the time service and disconnect immediately.
while True:
conn, address = yield from loop.sock_accept(self.sock)
yield from self.call_timeservice()
conn.close()
#asyncio.coroutine
def onJoin(self, details):
print('joined')
# Setup our socket server
asyncio.async(self.setup_socket())
## call a remote procedure
##
yield from self.call_timeservice()
#asyncio.coroutine
def call_timeservice(self):
try:
now = yield from self.call(u'com.timeservice.now')
except Exception as e:
print("Error: {}".format(e))
else:
print("Current time from time service: {}".format(now))
... # The rest is the same
Thanks for the response dano. Not quite the solution I needed but it pointed me in the right direction. Yes, I wish to have the client mae remote RPC calls from an external trigger.
I came up with the following which allows me to pass a string for the specific call ( though only one is implemented right now)
Here's what I came up with, though I'm not sure how elegant it is.
import asyncio
from autobahn.asyncio import wamp, websocket
import threading
import time
import socket
rsock, wsock = socket.socketpair()
class MyFrontendComponent(wamp.ApplicationSession):
def onConnect(self):
self.join(u"realm1")
#asyncio.coroutine
def setup_socket(self):
# Create a non-blocking socket
self.sock = rsock
self.sock.setblocking(0)
loop = asyncio.get_event_loop()
# Wait for connections to come in. When one arrives,
# call the time service and disconnect immediately.
while True:
rcmd = yield from loop.sock_recv(rsock,80)
yield from self.call_service(rcmd.decode())
#asyncio.coroutine
def onJoin(self, details):
# Setup our socket server
asyncio.async(self.setup_socket())
#asyncio.coroutine
def call_service(self,rcmd):
print(rcmd)
try:
now = yield from self.call(rcmd)
except Exception as e:
print("Error: {}".format(e))
else:
print("Current time from time service: {}".format(now))
def onLeave(self, details):
self.disconnect()
def onDisconnect(self):
asyncio.get_event_loop().stop()
def start_aloop() :
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
transport_factory = websocket.WampWebSocketClientFactory(session_factory,
debug = False,
debug_wamp = False)
coro = loop.create_connection(transport_factory, '127.0.0.1', 8080)
loop.run_until_complete(coro)
loop.run_forever()
loop.close()
if __name__ == '__main__':
session_factory = wamp.ApplicationSessionFactory()
session_factory.session = MyFrontendComponent
## 4) now enter the asyncio event loop
print('starting thread')
thread = threading.Thread(target=start_aloop)
thread.start()
time.sleep(5)
wsock.send(b'com.timeservice.now')
time.sleep(5)
wsock.send(b'com.timeservice.now')
time.sleep(5)
wsock.send(b'com.timeservice.now')