How to send object in worker from master? - python

Is have code
class Tasks(SequentialTaskSet):
#task
def shopping(self):
with self.parent.environment.parsed_options.shopping.request_airshopping(
client=self.client, catch_response=True) as response:
self.run_req(response, self.parent.environment.parsed_options.rsa)
class WebUser(HttpUser):
tasks = [Tasks]
min_wait = 0
max_wait = 0
#staticmethod
#events.test_start.add_listener
def on_test_start(environment: Environment, **kwargs):
os.environ["ENABLE_SLEEP"] = "False"
credentials = {
"login": environment.parsed_options.login,
"password": environment.parsed_options.password,
"structure_unit_code": environment.parsed_options.suid,
}
login = Login(base_url=environment.host)
response = login.request_login(credentials)
parse_xml = parsing_xml_response(response.text)
headers = {
"Content-Type": "application/xml",
"Authorization": f'Bearer {parse_xml.xpath("//Token")[0].text}'}
shopping = Shopping(
base_url=environment.parsed_options.host,
headers=headers
)
set_paxs(environment, shopping)
set_flight(environment, shopping)
environment.parsed_options.__dict__.update({"shopping": shopping})
If start without worker - success working
If start workers - errors: TypeError: can not serialize 'Shopping' object
How to send object Shopping in worker from master?
Try https://docs.locust.io/en/stable/running-distributed.html

You can do this by sending a message from worker to master or master to worker. The message is just a string but you can include a data payload with it. You need a function that registers what your message is and what to do when it receives that message, which many times will be a function that does something with the data payload. The docs use this example:
from locust import events
from locust.runners import MasterRunner, WorkerRunner
# Fired when the worker receives a message of type 'test_users'
def setup_test_users(environment, msg, **kwargs):
for user in msg.data:
print(f"User {user['name']} received")
environment.runner.send_message('acknowledge_users', f"Thanks for the {len(msg.data)} users!")
# Fired when the master receives a message of type 'acknowledge_users'
def on_acknowledge(msg, **kwargs):
print(msg.data)
#events.init.add_listener
def on_locust_init(environment, **_kwargs):
if not isinstance(environment.runner, MasterRunner):
environment.runner.register_message('test_users', setup_test_users)
if not isinstance(environment.runner, WorkerRunner):
environment.runner.register_message('acknowledge_users', on_acknowledge)
The #events.init.add_lisenter means Locust will run that function when it starts up and the key part is that register_message call. The first argument is an arbitrary string, whatever you want it to be. The second argument is the function to call when that message is received.
Note the arguments the function to be called has defined. One of them is msg, which you need to use to get the data that is sent.
Then you can send the message and data payload any time you want. The docs use this example:
users = [
{"name": "User1"},
{"name": "User2"},
{"name": "User3"},
]
environment.runner.send_message('test_users', users)
The string is the message you want the receiver to match and the second argument users in this case is the actual data you want to send to the other instance(s).

Related

Control an asynchronous routine elegantly in Django

Using Django, I need to poll updates from a GraphQL subscription and being able to turn the updates on and off.
My implementation uses websockets package wrapped in an async function to poll updates from a local container providing a GraphQL subscription and stores them in Django database.
I need to find a way to control this polling feature with an on/off GraphQL mutation that would start or stop the readings and database updates.
I've tried using celery by starting a celery task upon Django apps.py ready() method, but I think it became overkill and I ended up having multiple tasks being too hard to manage.
I've also thought about using a database record to keep the status and run the asynchronous polling code in a management command, but it seems not a great idea to continuously read the feed status from database without any hook or so.
My last attempt was to trigger a GraphQL mutation on my own service using a management command at the start of my docker-compose fleet to start the readings, but the asyncio event loop ends up locking the main thread for some reason.
Here's my current implementation unsing asyncio :
""" Feed listener class, responsible for connecting to the feed and processing temperature updates """
class FeedListener:
__instance = None
read: bool = False
task: asyncio.Task = None
def __new__(cls,*args, **kwargs):
""" Singleton implementation """
if FeedListener.__instance is None :
FeedListener.__instance = super(FeedListener, cls).__new__(cls, *args, **kwargs)
return FeedListener.__instance
def start_feed_readings(self) -> None:
""" Starts the feed listener if it is not already running """
self.read = True
if not self.task:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
tasks = [
asyncio.ensure_future(FeedListener.capture_data()),
]
loop.run_until_complete(asyncio.wait(tasks))
loop.close()
def stop_feed_readings(self) -> None:
""" Stops the feed listener if it is running """
self.read = False
self.task = None
#staticmethod
async def capture_data():
""" Connects to the feed, processes temperature updates and stores them in the database """
uri: str = f"ws://{settings.TEMPERATURE_FEED['HOST']}:{settings.TEMPERATURE_FEED['PORT']}/graphql"
start: dict = {
"type": "start",
"payload": {"query": "subscription { temperature }"}
}
async with websockets.connect(uri, subprotocols=["graphql-ws"]) as websocket:
print("Connected to feed")
await websocket.send(json.dumps(start))
while True:
data = json.loads(await websocket.recv())
print(data)
sync_to_async(TemperatureRecord.objects.create)(value=data["payload"]["data"]["temperature"])
Started by the following management command :
""" This management command is used to control the reading status of the feed.
It is used to start and stop the feed readings through a graphql mutation to the main app."""
class Command(BaseCommand):
help = 'Controls the reading status of the feed'
def add_arguments(self, parser):
parser.add_argument('status', nargs='+', type=str)
def handle(self, *args, **options):
status: str = options['status'][0]
if (status != "on" and status != "off"):
raise CommandError("Invalid status")
query = """
mutation {
toggleFeed(input: {status: "%s"}) {
status
}
}
""" % status
url = "http://app:8000/graphql"
response = requests.post(url, json={'query': query})
GraphQL mutation code :
class ToggleFeedMutation(graphene.Mutation):
"""
Mutation to toggle the feed on and off
"""
class Arguments:
input = ToggleFeedInputType(required=True)
status = graphene.String()
""" Toggles the feed status on and off. Throws an exception if the status is invalid """
def mutate(self, info, input):
if (input.status != "on" and input.status != "off"):
raise Exception("Invalid status")
input.status == "on" and FeedListener().start_feed_readings() or FeedListener().stop_feed_readings()
How would you proceed to achieve this in an elegant way ?

Executing the auth token once per run - Locust

Ideally I want to grab the token one time (1 request) and then pass that token into the other 2 requests as they execute. When I run this code through Locust though...
from locust import HttpUser, constant, task
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
class ProcessRequests(HttpUser):
host = 'https://hostURL'
wait_time = constant(1)
def on_start(self):
tenant_id = "tenant123"
client_id = "client123"
secret = "secret123"
scope = "api://123/.default"
body ="grant_type=client_credentials&client_id=" + client_id + "&client_secret=" + secret + "&scope=" + scope
tokenResponse = self.client.post(
f"https://login.microsoftonline.com/{tenant_id}/oauth2/v2.0/token",
body,
headers = { "ContentType": "application/x-www-form-urlencoded"}
)
response = tokenResponse.json()
responseToken = response['access_token']
self.headers = {'Authorization': 'Bearer ' + responseToken}
#task
def get_labware(self):
self.client.get("/123", name="Labware",headers=self.headers)
#task
def get_instruments(self):
self.client.get("/456", name="Instruments", headers=self.headers)
It ends up firing off multiple token requests that don't stop..
Any ideas how to fix this to make the token only run once?
In your case it runs once per user so my expectation is that you spawned 24 users and the number of Labware and Instruments is at least 2x times higher so it seems to work exactly according to the documentation.
Users (and TaskSets) can declare an on_start method and/or on_stop method. A User will call its on_start method when it starts running, and its on_stop method when it stops running. For a TaskSet, the on_start method is called when a simulated user starts executing that TaskSet, and on_stop is called when the simulated user stops executing that TaskSet (when interrupt() is called, or the user is killed).
If you want to get the token only once and then share it across all the virtual users you can go for the workaround from this question
#events.test_start.add_listener
def _(environment, **kwargs):
global token
token = get_token(environment.host)
and put into this get_token() function what you have in on_start() one.
More information: Locust with Python: Introduction, Correlating Variables, and Basic Assertions

How can I publish custom broker messages in minos?

I want to send a message to notify about something in one of my microservices, but I don't want to do that through a domain event, which requires to create, update or delete one of the entities of the microservice.
Is there another way to send a message such that another microservices can handle them?
Yes! You can do that directly using the BrokerPublisher instance injected in the corresponding service.
If you want to send a message you can do as follows:
from minos.common import ModelType
from minos.cqrs import Service
from minos.networks import Request, enroute
MyContent = ModelType.build("MyContent", {"text": str, "number": int})
class MySenderService(Service):
#enroute.rest.command("/send/my-channel", "POST")
async def handle_send_my_channel(self, request: Request) -> Response:
# First, create the message.
message = BrokerMessageV1(
"MyChannel", BrokerMessageV1Payload(MyContent("foo", 56))
)
# Then, send it!
await self.broker_publisher.send(message)
In this case, "MyChannel" refers to the channel (or topic) on which the message will be sent.
Note that MyContent is a convenient ModelType created just to give the message's content some structure (but it could be another type, like int, str, dict and so on).
Finally, if you want to handle it in another microservice, you can do that as any other message:
from minos.cqrs import Service
from minos.networks import Request, enroute
class MyReceiverService(Service):
#enroute.broker.event("MyChannel")
async def handle_my_channel(self, request: Request):
# Print the received message's content!
print(await request.content())

Waiting for websocket.receive consumer in django-channels

How to wait for a response from client after sending the client something using django-channels?
Whenever Group.send() is called from function send_to_client() and upon receiving the message by client, send_to_client() is expecting a response back from client which is getting received on websocket.receive channel.
Is there a way to return the response_msg in send_to_client() function?
Now I have reached here
Sample Code for consumers.py:
def ws_receive(message):
response_msg = message.content['text']
return response_msg
def send_to_client(param1, param2, param3):
Group.send({
"text" : json.dumps({
"First" : param1,
"Second" : param2,
})
})
So once the message reaches at the client side, the client will send a response back to the server which will be received by the ws_receive(message) function through the websocket.receive channel which is defined in the urls.py file,
channel_patterns = [
route("websocket.receive", ws_receive),
...
]
Is there a way to do this so that my function would look like this?
def send_to_client(...):
Group.send(...)
response_msg = #response message from client
Since you are recieving via a websocket, I am not sure if you would be able to even tell if the recieving thing is directly as a response for your request. I would rather put an id variable or something in the ongoing request, and maybe ask the client to put that id in the response as well. That might require both the sender and reciever to know the value of id as well (probably store in the db?)
Also, it do not seem logical to be blocked waiting for the response from websocket as well.

Listen RabbitMQ queue in Flask view

I have a view in which i send one message (request id) to workers. Then i need to listen result queue and return response only when i received a response msg (The response must contain this msg). How do i do it correctly?
This is my code:
def get(self):
ruid = rand_ruid()
# add msg to q1
write_ch = current_app.amqp_conn.channel()
write_ch.queue_declare(queue='q1', durable=True)
msg = mkmsg(ruid=ruid)
write_ch.basic_publish(exchange='', routing_key='q1', body=msg)
write_ch.close()
# then wait results from qbus
listen_ch = current_app.amqp_conn.channel()
listen_ch.exchange_declare(exchange='exbus', type='direct')
listen_ch.queue_declare(queue='bus')
listen_ch.queue_bind(exchange='exbus', queue='bus', routing_key=ruid)
while 1:
for method, properties, body in listen_ch.consume('bus'):
if method and body:
listen_ch.basis_ack(delivery_tag=method.delivery_tag)
listen_ch.close()
return make_response(body)
Updated
My question is incorrect, as my approach. I wanted to do asynchronous action (waiting for result from the queue) in synchronous part of the program (flask view).

Categories