I'm setting up a geventwebscoket app in python using gevent-websocket.
In on of the examples (chat-app) that is pretty much the same as my application, I define an app that handles websocket connections and messages like this:
import json
from gevent import monkey
monkey.patch_all()
from flask import Flask, render_template
from werkzeug.debug import DebuggedApplication
from geventwebsocket import WebSocketServer, WebSocketApplication, Resource
flask_app = Flask(__name__)
flask_app.debug = True
class ChatApplication(WebSocketApplication):
def on_open(self):
print("Some client connected!")
def on_message(self, message):
if message is None:
return
message = json.loads(message)
if message['msg_type'] == 'message':
self.broadcast(message)
elif message['msg_type'] == 'update_clients':
self.send_client_list(message)
def send_client_list(self, message):
current_client = self.ws.handler.active_client
current_client.nickname = message['nickname']
self.ws.send(json.dumps({
'msg_type': 'update_clients',
'clients': [
getattr(client, 'nickname', 'anonymous')
for client in self.ws.handler.server.clients.values()
]
}))
def broadcast(self, message):
for client in self.ws.handler.server.clients.values():
client.ws.send(json.dumps({
'msg_type': 'message',
'nickname': message['nickname'],
'message': message['message']
}))
def on_close(self, reason):
print("Connection closed!")
#flask_app.route('/')
def index():
return render_template('index.html')
WebSocketServer(
('0.0.0.0', 8000),
Resource([
('^/chat', ChatApplication),
('^/.*', DebuggedApplication(flask_app))
]),
debug=False
).serve_forever()
I want to have some scheduled processes in my code that send a message to every client connected to the websocket.
In the examples and the limited documentation I find no way of calling the broadcast method from somewhere else in the project. Every message/broadcast has to be sent as a reaction to a received message (as I understand it).
I tried to figure it so I tried broadcasting a message every time someone visits the index page:
#flask_app.route('/')
def index():
chat_application = ChatApplication()
chat_application.broadcast("A new user on the page!")
return render_template('index.html')
This throws an error:
chat_application = ChatApplication()
TypeError: __init__() missing 1 required positional argument: 'ws'
Long story short:
I do not know how to send a message to every client on the websocket since I need the ChatApplication instance to access the broadcast function and I can't seem to figure out how to create a ChatApplication object to let me call that function.
I figured it out.
By starting a server like this
server = WebSocketServer(
('0.0.0.0', 8000),
Resource([
('^/chat', ChatApplication),
('^/.*', DebuggedApplication(flask_app))
]),
debug=False
)
server.serve_forever()
you can access all the clients and send them a message like this
for client in server.clients.values():
client.ws.send("whatever you want to send")
Related
I have a Django project that reads messages from mqtt using mqttasgi
I can see the messages in the mqttasgi output
mqttasgi --host localhost --port 1883 myapp.asgi:application
2023-01-10 18:08:55.380206 -- Received a message at topic: test/device/online
With payload: b'true'
And QOS: 1
what I want to do is search for device in the message and then update the model just like I do with the web interaction
I tried importing the model into the consumer
import json
from datetime import datetime
from mqttasgi.consumers import MqttConsumer
import django
import os
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'myproj.settings')
django.setup()
from .models import Device
class MyMqttConsumer(MqttConsumer):
async def connect(self):
await self.subscribe('test/#', 2)
async def receive(self, mqtt_message):
print(f"{datetime.now()} -- Received a message at topic: { mqtt_message['topic'] }")
print(f"With payload: { mqtt_message['payload'] }")
print(f"And QOS: { mqtt_message['qos']}")
(site, devicename, topic) = f"{mqtt_message['topic']}".split('/',2)
device = Device.objects.get(name=devicename)
if topic == 'online':
if mqtt_message['payload'].decode('UTF-8') == 'false':
device.online = False
device.save()
else:
device.online = True
device.save()
pass
async def disconnect(self):
await self.unsubscribe('test/#')
but as soon as I do that then mqttasgi complains about it
django.core.exceptions.SynchronousOnlyOperation: You cannot call this from an async context - use a thread or sync_to_async.
How do I modify the code so that the db update works?
I am not sure but just try adding await before device.save()
Is there a straightforward way to asynchronously chain GRPC calls in Python?
This feels like the kind of things that "should" be feasible, but I can't seem to find it.
Here's a rough idea of what I feel I should be able to do:
class MyServer(my_grpc.MyServicer):
def __init__(self, child_stub):
self.child_stub_ = child_stub
def MyMethod(self, request, context):
child_result = self.child_stub_.ChildMethod.future(my_grpc.ChildMethodParams())
child_result.add_done_callback(something_that_completes_MyMethod)
return presumably_something
Is there something I'm missing here? It feels like this would be a common use-case, yet I can't seem to find anything related to it in the docs.
Edit: I believe you're trying to send two response for one request on a unary request/response setup, which I don't believe to be possible. Instead you should do a unary request and streaming responses, which will allow for many responses.
client
import grpc
import test_pb2_grpc as pb_grpc
import test_pb2 as pb2
def test():
channel = grpc.insecure_channel('localhost:50051')
stub = pb_grpc.TestStub(channel=channel)
for response in stub.Produce(pb2.Empty()):
print(response.result)
if __name__ == '__main__':
test()
Server
import test_pb2_grpc as pb_grpc
import test_pb2 as pb2
import time
import grpc
from concurrent import futures
class test_servcie(pb_grpc.TestServicer):
def Produce(self, request, context):
my_method_results = [50, 200]
for result in my_method_results:
yield pb2.Resp(result=result)
def serve():
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
pb_grpc.add_TestServicer_to_server(test_servcie(), server)
server.add_insecure_port('[::]:50051')
print("service started")
server.start()
try:
while True:
time.sleep(3600)
except KeyboardInterrupt:
server.stop(0)
if __name__ == '__main__':
serve()
proto
syntax = "proto3";
package api;
service Test {
rpc Produce (Empty) returns (stream Resp);
}
message Empty {}
message Resp{
int32 result = 1;
}
App Description
So I'm trying to create an application that does real-time sentiment analysis on tweets(as close to real time as I'm able to get it) and these tweets have to be based on user input. So in the main page of my application, I have a simple search bar where the user can enter a topic they would like to perform sentiment analysis on and when they press enter, it would take them to another page where they see a line chart displaying all the data in real time.
Problem 1
The first problem I'm facing at the moment is that I don't know how I can get tweepy to change what it is tracking when two or more people make a request. If I were to have global streaming that I simply disconnect and reconnect every time the user makes a new query, then it is also going to disconnect for other users as well which I don't want. On the other hand, if I were to allocate a streaming object for each user that connects, then this strategy should work. This still poses a problem. Twitter does not allow you to hold more than one connection at a time it seems given this StackOverflow post.
Does Tweepy support running multiple Streams to collect data?
If I still were to go along with this, I risk getting my IP banned. So both of these solutions are no good.
Problem 2
The last problem I'm having is figuring out who the message belongs to. At the moment, I'm using RabbitMQ to store all incoming messages in one single queue called twitter_topic_feed. For every tweet that I receive from tweepy, I publish it in that queue. Then RabbiMQ consumes the message and sends it to every available connection. Obviously, that behaviour is not what I'm looking for. Consider two users who search for pizza and sports. Both users will receive tweets pertaining to football and pizza when one user asked for sports tweets and the other asked for pizza tweets.
One idea is to create a queue with a unique identifier for each available connection. The identifier would have the form {Search Term}_{Hash ID}.
For generating the hash ID, I can use the UUID package that is available in python and create the ID when the connection opens and delete it when it closes. Of course, when they close the connection I also need to delete the queue. I'm not sure how well this solution would scale. If we were to have 10,000 connections, we would have 10,000 queues and each queue could potentially have a lot of messages stored in it. Seems like it would be very memory intensive.
Design
tornado Framework for WebSockets,
tweepy API for streaming tweets
RabbitMQ For publishing messages to the queue whenever tweepy receives a new tweet. RabbitMQ will then consume that message and send it to the WebSocket.
Attempt(What I currently have so far)
TweetStreamListener uses the tweepy API to listen for tweets based on the user's input. Whatever tweet it gets, it calculates the polarity of that tweet and publishes it to rabbitMQ twitter_topic_feed queue.
import logging
from tweepy import StreamListener, OAuthHandler, Stream, API
from sentiment_analyzer import calculate_polarity_score
from constants import SETTINGS
auth = OAuthHandler(
SETTINGS["TWITTER_CONSUMER_API_KEY"], SETTINGS["TWITTER_CONSUMER_API_SECRET_KEY"])
auth.set_access_token(
SETTINGS["TWITTER_ACCESS_KEY"], SETTINGS["TWITTER_ACCESS_SECRET_KEY"])
api = API(auth, wait_on_rate_limit=True)
class TweetStreamListener(StreamListener):
def __init__(self):
self.api = api
self.stream = Stream(auth=self.api.auth, listener=self)
def start_listening(self):
pass
def on_status(self, status):
if not hasattr(status, 'retweeted_status'):
polarity = calculate_polarity_score(status.text)
message = {
'polarity': polarity,
'timestamp': status.created_at
}
# TODO(Luis) Need to figure who to send this message to.
logging.debug("Message received from Twitter: {0}".format(message))
# limit handling
def on_limit(self, status):
logging.info(
'Limit threshold exceeded. Status code: {0}'.format(status))
def on_timeout(self, status):
logging.error('Stream disconnected. continuing...')
return True # Don't kill the stream
"""
Summary: Callback that executes for any error that may occur. Whenever we get a 420 Error code, we simply
stop streaming tweets as we have reached our rate limit. This is due to making too many requests.
Returns: False if we are sending too many tweets, otherwise return true to keep the stream going.
"""
def on_error(self, status_code):
if status_code == 420:
logging.error(
'Encountered error code 420. Disconnecting the stream')
# returning False in on_data disconnects the stream
return False
else:
logging.error('Encountered error with status code: {}'.format(
status_code))
return True # Don't kill the stream
WS_Handler is in charge of maintaining a list of open connections and sending any message that it receives back to every client(This behaviour is something I don't want).
import logging
import json
from uuid import uuid4
from tornado.web import RequestHandler
from tornado.websocket import WebSocketHandler
class WSHandler(WebSocketHandler):
def check_origin(self, origin):
return True
#property
def sess_id(self):
return self._sess_id
def open(self):
self._sess_id = uuid4().hex
logging.debug('Connection established.')
self.application.pc.register_websocket(self._sess_id, self)
# When messages arrives via RabbitMQ, write it to websocket
def on_message(self, message):
logging.debug('Message received: {0}'.format(message))
self.application.pc.redirect_incoming_message(
self._sess_id, json.dumps(message))
def on_close(self):
logging.debug('Connection closed.')
self.application.pc.unregister_websocket(self._sess_id)
The PikaClient module contains the PikaClient that will allows to keep track of inbound and outbound channels as well as keeping track of the websockets that currently running.
import logging
import pika
from constants import SETTINGS
from pika import PlainCredentials, ConnectionParameters
from pika.adapters.tornado_connection import TornadoConnection
pika.log = logging.getLogger(__name__)
class PikaClient(object):
INPUT_QUEUE_NAME = 'in_queue'
def __init__(self):
self.connected = False
self.connecting = False
self.connection = None
self.in_channel = None
self.out_channels = {}
self.websockets = {}
def connect(self):
if self.connecting:
return
self.connecting = True
# Setup rabbitMQ connection
credentials = PlainCredentials(
SETTINGS['RABBITMQ_USERNAME'], SETTINGS['RABBITMQ_PASSWORD'])
param = ConnectionParameters(
host=SETTINGS['RABBITMQ_HOST'], port=SETTINGS['RABBITMQ_PORT'], virtual_host='/', credentials=credentials)
return TornadoConnection(param, on_open_callback=self.on_connected)
def run(self):
self.connection = self.connect()
self.connection.ioloop.start()
def stop(self):
self.connected = False
self.connecting = False
self.connection.ioloop.stop()
def on_connected(self, unused_Connection):
self.connected = True
self.in_channel = self.connection.channel(self.on_conn_open)
def on_conn_open(self, channel):
self.in_channel.exchange_declare(
exchange='tornado_input', exchange_type='topic')
channel.queue_declare(
callback=self.on_input_queue_declare, queue=self.INPUT_QUEUE_NAME)
def on_input_queue_declare(self, queue):
self.in_channel.queue_bind(
callback=None, exchange='tornado_input', queue=self.INPUT_QUEUE_NAME, routing_key="#")
def register_websocket(self, sess_id, ws):
self.websockets[sess_id] = ws
self.create_out_channel(sess_id)
def unregister_websocket(self, sess_id):
self.websockets.pop(sess_id)
if sess_id in self.out_channels:
self.out_channels[sess_id].close()
def create_out_channel(self, sess_id):
def on_output_channel_creation(channel):
def on_output_queue_declaration(queue):
channel.basic_consume(self.on_message, queue=sess_id)
self.out_channels[sess_id] = channel
channel.queue_declare(callback=on_output_queue_declaration,
queue=sess_id, auto_delete=True, exclusive=True)
self.connection.channel(on_output_channel_creation)
def redirect_incoming_message(self, sess_id, message):
self.in_channel.basic_publish(
exchange='tornado_input', routing_key=sess_id, body=message)
def on_message(self, channel, method, header, body):
sess_id = method.routing_key
if sess_id in self.websockets:
self.websockets[sess_id].write_message(body)
channel.basic_ack(delivery_tag=method.delivery_tag)
else:
channel.basic_reject(delivery_tag=method.delivery_tag)
Server.py is the main entry point of the application.
import logging
import os
from tornado import web, ioloop
from tornado.options import define, options, parse_command_line
from client import PikaClient
from handlers import WSHandler, MainHandler
define("port", default=3000, help="run on the given port.", type=int)
define("debug", default=True, help="run in debug mode.", type=bool)
def main():
parse_command_line()
settings = {
"debug": options.debug,
"static_path": os.path.join(os.path.dirname(__file__), "web/static")
}
app = web.Application(
[
(r"/", MainHandler),
(r"/stream", WSHandler),
],
**settings
)
# Setup PikaClient
app.pc = PikaClient()
app.listen(options.port)
logging.info("Server running on http://localhost:3000")
try:
app.pc.run()
except KeyboardInterrupt:
app.pc.stop()
if __name__ == "__main__":
main()
I am following Alex Hadik's Flask Socketio tutorial which builds a very simple flask chat app.
http://www.alexhadik.com/blog/2015/1/29/using-socketio-with-python-and-flask-on-heroku
I would like to broadcast a message to all connected users except the sender. I have gone through the flasksocketio init.py but I'm still not sure how to do this.
Here's the server code.
from flask import Flask, render_template,request
from flask.ext.socketio import SocketIO,emit,send
import json,sys
app = Flask(__name__)
socketio = SocketIO(app)
clients = {}
#app.route("/")
def index():
return render_template('index.html',)
#socketio.on('send_message')
def handle_source(json_data):
text = json_data['message'].encode('ascii', 'ignore')
current_client = request.namespace
current_client_id = request.namespace.socket.sessid
update_client_list(current_client,current_client_id)
if clients.keys():
for client in clients.keys():
if not current_client_id in client:
clients[client].socketio.emit('echo', {'echo': 'Server Says: '+text})
def update_client_list(current_client,current_client_id):
if not current_client_id in clients: clients[current_client_id] = current_client
return
if __name__ == "__main__":
socketio.run(app,debug = False)
It's currently just broadcasting to all connected clients. I created a connected clients dict (clients) which stores the request.namespace indexed by the client id.
Calling clients[client].socketio.emit() for all clients except the sending client still results in the message being broadcast to call users.
Does anyone have any thoughts on how I can broadcast messages to all connected users except the sender?
You don't have to save users ids and manage individual emissions, you can specify a broadcast without the sender with emit('my_event', {data:my_data}, broadcast=True, include_self=False). In your case it would be something like this:
#socketio.on('send_message')
def handle_source(json_data):
text = json_data['message'].encode('ascii', 'ignore')
emit('echo', {'echo': 'Server Says: '+text}, broadcast=True, include_self=False)
If you have to send messages for a specific group of clients you can create rooms and then use emit('my_event', {data:my_data}, room=my_room, include_self=False) for sending messages to clients who joined my_room. You can check the reference of flask_socketio.emit for more details.
I can't comment on #Alex's response because I don't have enough reputation, but if you want to emit a broadcast message, this is how it is done in Python:
emit('echo', {'data':'what ever you are trying to send'}, broadcast=True)
https://flask-socketio.readthedocs.io/en/latest/#flask_socketio.SocketIO.emit
You're looking for the skip_sid parameter for socketio.emit().
skip_sid – The session id of a client to ignore when broadcasting or
addressing a room. This is typically set to the originator of the
message, so that everyone except that client receive the message. To
skip multiple sids pass a list.
You can try socket.broadcast.emit instead of socket.emit. I'm not sure if this works in the Python library, but it is the syntax for what you're looking for in Socket.io for Node.js.
Below is a simple app to send mesg to the browser. if there is a new mesg from the redis channel it will be sent other wise send the last know value in a non-blocking way.
But i am doing something wrong. can someone please help me understand
from gevent import monkey, Greenlet
monkey.patch_all()
from flask import Flask,render_template,request,redirect,url_for,abort,session,Response,jsonify
app = Flask(__name__)
myglobaldict = {'somedata':''}
class RedisLiveData:
def __init__(self, channel_name):
self.channel_name = channel_name
self.redis_conn = redis.Redis(host='localhost', port=6379, db=0)
pubsub = self.redis_conn.pubsub()
gevent.spawn(self.sub, pubsub)
def sub(self,pubsub):
pubsub.subscribe(self.channel_name)
for message in pubsub.listen():
gevent.spawn(process_rcvd_mesg, message['data'])
def process_rcvd_mesg(mesg):
print "Received new message %s " % mesg
myglobaldict['somedata'] = mesg
g = RedisLiveData("test_channel")
#app.route('/latestmessage')
def latestmessage():
return Response(myglobaldict,mimetype="application/json")
if __name__ == '__main__':
app.run()
on the javascript side i am just using a simple $.ajax get to view the messages.
but the client http://localhost:5000/latestmessage has the old message even after the redis update.
It should be the cache issue.
You can add a timestamp or a random number to the request http://localhost:5000/latestmessage?t=timestamp sent from the ajax.
I suggest you to use POST instead of GET as http method, you eliminate the caching problem and some annoying behaviour from browsers like chrome where the requests after the first will wait for the first to complete before being sent to the webserver.
If you want to keep the GET method then you can ask jquery to make the request non cache-able by the browser with the setting parameter cache
$.ajax(..., {cache:false})