I am trying to do something where I assign the locust tasks dynamically based on the host value passed through the UI. In this example, if host is passed in as "hello", the test should run the hello task else it should run the world task.
from locust import HttpUser, TaskSet, task, events
class RandomTask1(TaskSet):
#task(1)
def soemthing(self):
print("Hello!")
class RandomTask2(TaskSet):
#task(1)
def soemthing(self):
print("World!")
class LoadTestUser(HttpUser):
def on_start(self):
host_config = self.host
if host_config == "hello":
tasks = {RandomTask1:1}
else:
tasks = {RandomTask2:1}
The example below does not work and I get the following error
Exception: No tasks defined on LoadTestUser. use the #task decorator or set the tasks property of the User (or mark it as abstract = True if you only intend to subclass it)
Any idea how I can achieve something like this? I have simplified this for the example, but for all intents and purposes, let's assume that the locust instance is already running and cannot be stopped or restarted and the tasks need to be assigned dynamically.
Edit:
Tried doing this:
class LoadTestUser(HttpUser):
def on_start(self):
if self.host == "hello":
self.tasks = {HelloTask: 1}
else:
self.tasks = {WorldTask: 1}
#task
def nothing(self):
pass
class HelloTask(TaskSet):
#task
def something(self):
print("Hello")
class WorldTask(TaskSet):
#task
def something(self):
print("World")
Now I see the following error:
Traceback (most recent call last):
File "/Users/user/project/venv/lib/python3.8/site-packages/locust/user/task.py", line 285, in run
self.schedule_task(self.get_next_task())
File "/Users/user/project/venv/lib/python3.8/site-packages/locust/user/task.py", line 420, in get_next_task
return random.choice(self.user.tasks)
File "/Users/user/opt/anaconda3/lib/python3.8/random.py", line 291, in choice
return seq[i]
KeyError: 0
Create a single task and put the logic in that task for what you want to have it run.
class LoadTestUser(HttpUser):
def something1(self):
print("Hello!")
def something2(self):
print("World!")
#task
def task_logic(self):
if self.host == "hello":
self.something1()
else:
self.something2()
However, you can just address the error you're getting directly. You need to have a task defined in the class even if you intend to overwrite or change the tasks with your TaskSets. There's an example in the documentation but just add a task with pass so it doesn't do anything, then your overrides should work.
class LoadTestUser(HttpUser):
def on_start(self):
host_config = self.host
if host_config == "hello":
self.tasks = {RandomTask1:1}
else:
self.tasks = {RandomTask2:1}
#task
def nothing(self):
pass
EDIT:
This should work but looks like there could be a bug in the current version of Locust where it only accepts a dictionary for tasks when Locust first starts and then only accepts a list afterward. Until it's fixed, the example in the other answer works.
import sys
from locust import HttpUser, TaskSet, task, events
class LoadTestUser(HttpUser):
def locust_class(self, name):
module = sys.modules[__name__]
return getattr(module, f"{name.capitalize()}Task")
def get_weighted_tasks(self, task_list):
new_tasks = []
for item in task_list:
if "locust_task_weight" in dir(item):
for i in range(item.locust_task_weight):
new_tasks.append(item)
return new_tasks
def get_locust_tasks(self, cls):
tasks = []
for maybe_task in cls.__dict__.values():
if hasattr(maybe_task, "locust_task_weight"):
tasks.append(maybe_task)
return tasks
def on_start(self):
task_cls = self.locust_class(self.host)
task_list = self.get_locust_tasks(task_cls)
self.tasks = self.get_weighted_tasks(task_list)
#task(1)
def nothing(self):
pass
class HelloTask(TaskSet):
#task(1)
def something(self):
print("Hello")
#task(100)
def something_else(self):
print("hello")
class WorldTask(TaskSet):
#task(1)
def something(self):
print("World")
#task(10)
def something_else(self):
print("world")
Related
in the code posted below, i am subclassing Process. And as stated in some tutorials, .start must be called to start the worked process.
the problem I am facing, is that i can not invoke the .start() method because it causes the app to crash and generate the error message posted below. instead, i invoke .run() method.
please let me know why i can not use .start() and whey it generates that error message ans how to fix it so i can use .start()
Note: the code posted below is in a webservice
error msg:
File "C:\Python310\lib\multiprocessing\process.py", line 100, in _check_closed
if self._closed:
AttributeError: 'KeyGridCellsProcessing' object has no attribute '_closed'
code:
class KeyGridCellsProcessing(Process):
#staticmethod
def newInstance(pNameSuffix,row,col,_pixelsValuesSatisfyThresholdInWindowedSegment,pixelsValuesSatisfyThresholdInTIFFImageDatasetCnt):
keyGridCellsProcessing = KeyGridCellsProcessing(pNameSuffix,row,col,_pixelsValuesSatisfyThresholdInWindowedSegment,pixelsValuesSatisfyThresholdInTIFFImageDatasetCnt)
KeyGridCellsProcessing.processesCollector.append(KeyGridCellsProcessing)
return keyGridCellsProcessing
#staticmethod
def setDatasetElevationsTIFFInEPSG25832(datasetElevationsTIFFInEPSG25832):
KeyGridCellsProcessing.datasetElevationsTIFFInEPSG25832 = datasetElevationsTIFFInEPSG25832
#staticmethod
def setNDVIsTIFFWindowedSegmentContentsInEPSG25832(NDVIsTIFFWindowedSegmentContentsInEPSG25832):
KeyGridCellsProcessing.NDVIsTIFFWindowedSegmentContentsInEPSG25832 = NDVIsTIFFWindowedSegmentContentsInEPSG25832
#staticmethod
def setMainTIFFImageDatasetContents(mainTIFFImageDatasetContents):
KeyGridCellsProcessing.mainTIFFImageDatasetContents = mainTIFFImageDatasetContents
#staticmethod
def waitForProcessToFinish():
for p in KeyGridCellsProcessing.processesCollector:
p.join()
# logger.debug(f"process:{p}")
# exit()
def __init__(self,pNameSuffix,row,col,_pixelsValuesSatisfyThresholdInWindowedSegment,pixelsValuesSatisfyThresholdInTIFFImageDatasetCnt):
# self.queue = queue
self.pNameSuffix = pNameSuffix
self.row = row
self.col = col
self._pixelsValuesSatisfyThresholdInWindowedSegment = _pixelsValuesSatisfyThresholdInWindowedSegment
self.pixelsValuesSatisfyThresholdInTIFFImageDatasetCnt = pixelsValuesSatisfyThresholdInTIFFImageDatasetCnt
self.fourCornersOfKeyWindowInEPSG4326 = []
def run(self):
runStartTime = time.time()
#logic
...
...
runEndTime = time.time() - runStartTime
KeyGridCellsProcessing.runMethodForKeyGridCellsExecutionTimeAccumulator+=runEndTime
#staticmethod
def enqueue():
KeyGridCellsProcessing.keyGridCellsQueue.put([KeyGridCellsProcessing.NDVIsPer10mX10mForKeyWindow,
......,
......,
......,
......,
......,
], block=True, timeout=None)
#staticmethod
def dequeue():
deqeueStartTime = time.time()
item = KeyGridCellsProcessing.keyGridCellsQueue.get(block=True, timeout=None)
KeyGridCellsProcessing.executionTimeOfDequeueProcess = time.time() - deqeueStartTime
return item
#staticmethod
def shutdownQueue():
KeyGridCellsProcessing.keyGridCellsQueue.close()
KeyGridCellsProcessing.keyGridCellsQueue.join_thread()
def startProcessing(self):
#self.start()#<=========== causes the app to crash
self.run() #works
start() first action is to call self._check_closed() in BaseProcess (base class for Process)
def _check_closed(self):
if self._closed:
raise ValueError("process object is closed")
self._closed is initialized in BaseProcess __init__, but since you don't have super().__init__() in your class it's never initialized. Adding it should solve the issue
class KeyGridCellsProcessing(Process):
def __init__(self, ...):
super().__init__()
...
I want to make an event listener
like this:
#some.event
async def on_ready(some_info):
print(some_info)
#some.event
async def on_error(err):
print(err)
So for when something is ready, or if a message is received in like WebSockets, using this for Discord since some info is only available for when the Bot is Identified or Ready
I've seen something like:
def add_listener(func, name):
# ...
def remove_listener(func, name):
# ...
But I don't really know how to use it or create one
Here's a simple class-based solution I quickly coded:
class Event:
def __init__(self):
# Initialise a list of listeners
self.__listeners = []
# Define a getter for the 'on' property which returns the decorator.
#property
def on(self):
# A declorator to run addListener on the input function.
def wrapper(func):
self.addListener(func)
return func
return wrapper
# Add and remove functions from the list of listeners.
def addListener(self,func):
if func in self.__listeners: return
self.__listeners.append(func)
def removeListener(self,func):
if func not in self.__listeners: return
self.__listeners.remove(func)
# Trigger events.
def trigger(self,args = None):
# Run all the functions that are saved.
if args is None:
args = []
for func in self.__listeners: func(*args)
It allows you to create an Event that functions can 'subscribe' to:
evn = Event()
# Some code...
evn.trigger(['arg x','arg y'])
The functions can both subscribe to the event with decorators:
#evn.on
def some_function(x,y): pass
Or with the addListener method:
def some_function(x,y): pass
evn.addListener(some_function)
You can also remove listeners:
evn.removeListener(some_function)
To create something similar to what you asked for you can do something like this:
# some.py
from event import Event
class SomeClass:
def __init__(self):
# Private event variable
self.__event = Event()
# Public event variable (decorator)
self.event = self.__event.on
some = SomeClass()
And then use it like so:
# main.py
from some import some
#some.event
async def on_ready(some_info):
print(some_info)
#some.event
async def on_error(err):
print(err)
Quick example :
################################################################################
# the code for the "framework"
event1_listeners = []
event2_listeners = []
def listen_event1(func):
event1_listeners.append(func)
return func
def listen_event2(func):
event2_listeners.append(func)
return func
def process_event(event):
if event["type"] == 1:
for func in event1_listeners:
func(event)
elif event["type"] == 2:
for func in event2_listeners:
func(event)
else:
raise NotImplementedError(f"{event['type']=!r}")
################################################################################
# your code
#listen_event1
def handle_event1_v1(event):
print(f"handle_event1_v1 : {event!r}")
#listen_event1
def handle_event1_v2(event):
print(f"handle_event1_v2 : {event!r}")
#listen_event2
def handle_event2(event):
print(f"handle_event2 : {event!r}")
################################################################################
# the events processed by the framework
process_event({"type": 1, "msg": "hello"})
process_event({"type": 2, "msg": "world"})
handle_event1_v1 : {'type': 1, 'msg': 'hello'}
handle_event1_v2 : {'type': 1, 'msg': 'hello'}
handle_event2 : {'type': 2, 'msg': 'world'}
Essentially, the decorators will store the function someplace, and when an event is received, the framework iterates over the functions registered for it.
Removing a listener dynamically is basically just removing the func reference from the list.
The decorator in this case is simply "sugar" to not having to do event1_listeners.append(func) yourself.
I'm getting stuck on what I think is a basic multiprocess and threading issue. I've got a multiprocess set up, and within this a thread. However, when I set up the thread class within the init function, I get the following error:
"TypeError: can't pickle thread.lock objects".
However, this does not happen if the thread is set up outside of the init function. Does anyone know why this is happening? Note I'm using Windows.
Some code is below to illustrate the issue. As typed below, it runs fine. However if print_hello() is called from within the DoStuff init def, then the error occurs, if it's called within the multi-process run() def then it's fine.
Can anyone point me in the right direction so it runs fine when called from init? thanks!
import multiprocessing
import threading
import time
class MyProcess(multiprocessing.Process):
def __init__(self, **kwargs):
super(MyProcess, self).__init__(**kwargs)
self.dostuff = DoStuff()
def run(self):
print("starting DoStuff")
# This works fine if the line below is uncommented and __init__ self.print_hello() is commented...
self.dostuff.print_hello()
class DoStuff(object):
def __init__(self, **kwargs):
super(DoStuff, self).__init__(**kwargs)
# If the following is uncommented, the error occurs...
# Note it also occurs if the lines in start_thead are pasted here...
# self.print_hello()
def print_hello(self):
print "hello"
self.start_thread()
def start_thread(self):
self.my_thread_instance = MyThread()
self.my_thread_instance.start()
time.sleep(0.1)
class MyThread(threading.Thread):
def __init__(self):
super(MyThread, self).__init__()
def run(self):
print("Starting MyThread")
if __name__ == '__main__':
mp_target = MyProcess() # Also pass the pipe to transfer data
# mp_target.daemon = True
mp_target.start()
time.sleep(0.1)
It looks like there is no simple answer, and it appears to be a restriction of Windows (Win 7, python 3.6 in my case); on Windows it looks like you need to start the process before you can start the worker thread inside the owned object.
There appears to be no such restriction on Unix (CentOS 7, python 2.7.5).
As an experiment I modified your code as follows; this version checks the OS and starts either the process first, or the thread first:
import multiprocessing
import threading
import time
import os
class MyProcess(multiprocessing.Process):
def __init__(self, **kwargs):
super(MyProcess, self).__init__(**kwargs)
self.dostuff = DoStuff(self)
def run(self):
print("MyProcess.run()")
print("MyProcess.ident = " + repr(self.ident))
if os.name == 'nt':
self.dostuff.start_thread()
class DoStuff(object):
def __init__(self, owner, **kwargs):
super(DoStuff, self).__init__(**kwargs)
self.owner = owner
if os.name != 'nt':
self.start_thread()
def start_thread(self):
print("DoStuff.start_thread()")
self.my_thread_instance = MyThread(self)
self.my_thread_instance.start()
time.sleep(0.1)
class MyThread(threading.Thread):
def __init__(self, owner):
super(MyThread, self).__init__()
self.owner = owner
def run(self):
print("MyThread.run()")
print("MyThread.ident = " + repr(self.ident))
print("MyThread.owner.owner.ident = " + repr(self.owner.owner.ident))
if __name__ == '__main__':
mp_target = MyProcess() # Also pass the pipe to transfer data
mp_target.daemon = True
mp_target.start()
time.sleep(0.1)
... and got the following on Windows, where the process starts first:
MyProcess.run()
MyProcess.ident = 14700
DoStuff.start_thread()
MyThread.run()
MyThread.ident = 14220
MyThread.owner.owner.ident = 14700
... and the following on Linux, where the thread is started first:
DoStuff.start_thread()
MyThread.run()
MyThread.ident = 140316342347520
MyThread.owner.owner.ident = None
MyProcess.run()
MyProcess.ident = 4358
If it were my code I'd be tempted to always start the process first, then create the thread within that process; the following version works fine for me across both platforms:
import multiprocessing
import threading
import time
class MyProcess(multiprocessing.Process):
def __init__(self, **kwargs):
super(MyProcess, self).__init__(**kwargs)
self.dostuff = DoStuff()
def run(self):
print("MyProcess.run()")
self.dostuff.start_thread()
class DoStuff(object):
def __init__(self, **kwargs):
super(DoStuff, self).__init__(**kwargs)
def start_thread(self):
self.my_thread_instance = MyThread()
self.my_thread_instance.start()
time.sleep(0.1)
class MyThread(threading.Thread):
def __init__(self):
super(MyThread, self).__init__()
def run(self):
print("MyThread.run()")
if __name__ == '__main__':
mp_target = MyProcess() # Also pass the pipe to transfer data
mp_target.daemon = True
mp_target.start()
time.sleep(0.1)
I'm using thread pool while using Tornado to do some work. This is the code:
common/thread_pool.py
import tornado.ioloop
class Worker(threading.Thread):
def __init__(self, queue):
threading.Thread.__init__(self)
self._queue = queue
def run(self):
logging.info('Worker start')
while True:
content = self._queue.get()
if isinstance(content, str) and content == 'quit':
break
#content: (func, args, on_complete)
func = content[0]
args = content[1]
on_complete = content[2]
resp = func(args)
tornado.ioloop.IOLoop.instance().add_callback(lambda: on_complete(resp))
#i dont know is correct to call this
#self._queue.task_done()
logging.info('Worker stop')
class WorkerPool(object):
_workers = []
def __init__(self, num):
self._queue = Queue.Queue()
self._size = num
def start(self):
logging.info('WorkerPool start %d' % self._size)
for _ in range(self._size):
worker = Worker(self._queue)
worker.start()
self._workers.append(worker)
def stop(self):
for worker in self._workers:
self._queue.put('quit')
for worker in self._workers:
worker.join()
logging.info('WorkerPool stopd')
def append(self, content):
self._queue.put(content)
gateway.py
import tornado.ioloop
import tornado.web
from common import thread_pool
workers = None
class MainServerHandler(tornado.web.RequestHandler):
#tornado.web.asynchronous
def get(self):
start_time = time.time()
method = 'get'
content = (self.handle, (method, self.request, start_time), self.on_complete)
workers.append(content)
#tornado.web.asynchronous
def post(self):
start_time = time.time()
method = 'post'
content = (self.handle, (method, self.request, start_time), self.on_complete)
workers.append(content)
def handle(self, args):
method, request, start_time = args
#for test, just return
return 'test test'
def on_complete(self, res):
logging.debug('on_complete')
self.write(res)
self.finish()
return
def main(argv):
global workers
workers = thread_pool.WorkerPool(conf_mgr.thread_num)
workers.start()
application = tornado.web.Application([(r"/", MainServerHandler)])
application.listen(8888)
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main(sys.argv[1:])
When I make many concurrent requests, I get this error:
ERROR: 2014-09-15 18:04:03: ioloop.py:435 * 140500107065056 Exception in callback <tornado.stack_context._StackContextWrapper object at 0x7fc8b4d6b9f0>
Traceback (most recent call last):
File "/home/work/nlp_arch/project/ps/se/nlp-arch/gateway/gateway/../third-party/tornado-2.4.1/tornado/ioloop.py", line 421, in _run_callback
callback()
File "/home/work/nlp_arch/project/ps/se/nlp-arch/gateway/gateway/../common/thread_pool.py", line 39, in <lambda>
tornado.ioloop.IOLoop.instance().add_callback(lambda: on_complete(resp))
File "/home/work/nlp_arch/project/ps/se/nlp-arch/gateway/gateway/gateway.py", line 92, in on_complete
self.write(res)
File "/home/work/nlp_arch/project/ps/se/nlp-arch/gateway/gateway/../third-party/tornado-2.4.1/tornado/web.py", line 489, in write
raise RuntimeError("Cannot write() after finish(). May be caused "
RuntimeError: Cannot write() after finish(). May be caused by using async operations without the #asynchronous decorator.
But I didn't call write after finish. I'm also using the #asynchronous decorator. At the same time, in the logs I see that write/finish is called by same thread.
The issue is with the way you're adding the callback to the I/O loop. Add it like this:
tornado.ioloop.IOLoop.instance().add_callback(on_complete, resp)
And the errors will go away.
You're seeing this strange behavior because when you use a lambda function, you're creating a closure in the local scope of the function, and the variables used in that closure get bound at the point the lambda is executed, not when its created. Consider this example:
funcs = []
def func(a):
print a
for i in range(5):
funcs.append(lambda: func(i))
for f in funcs:
f()
Output:
4
4
4
4
4
Because your worker method is running in a while loop, on_complete ends up getting redefined several times, which also changes the value of on_complete inside the lambda. That means if one worker thread sets on_complete for a handler A, but then gets another task and sets on_complete for handler B prior to the callback set for handler A running, both callbacks end up up running handler B's on_complete.
If you really wanted to use a lambda, you could also avoid this by binding on_complete in the local scope of the lambda:
tornado.ioloop.IOLoop.instance().add_callback(lambda on_complete=on_complete: on_complete(resp))
But just adding the function and its argument directly is much nicer.
I have the following problem with this little tornado-test:
class SimpleIOLoopTests(tornado.testing.AsyncTestCase):
def setUp(self):
super(SimpleIOLoopTests, self).setUp()
def test_executor_future(self):
self.executor = ThreadPoolExecutor(2)
#run_on_executor
def wait_and_return_a_value():
time.sleep(2)
return 20
#coroutine
def async_compare(callback):
val = yield wait_and_return_a_value()
assert_that(val, equal_to(20))
callback()
async_compare(self.stop)
self.wait()
The point is the the test simply loops until a timeout occurs. Debugging the code it looks as if the executor-future is created as done() and, hence, no even started by the io_loop.
What am I doing wrong here? Help with this issue is really appreciated
btw: the same happens if I create a trivial future using the #return_future decorator like this one (for which the it is accidently true that is is already done)
#return_future
def get_value(callback):
callback(10)
thanks & regards
markus
Problem is that executors must "live" in a class where the io_loop and the executor is defined (this can be seen when you check the #run_on_executor decorator).
def test_executor_future(self):
class Executor():
def __init__(self, io_loop=None):
self.io_loop = io_loop or IOLoop.instance()
self.executor = ThreadPoolExecutor(2)
#tornado.concurrent.run_on_executor
def wait_and_return_a_value(self):
return 20
def destroy(self):
self.executor.shutdown(1)
#tornado.gen.coroutine
def async_compare(callback):
executor = Executor()
val = yield executor.wait_and_return_a_value()
assert_that(val, equal_to(20))
executor.destroy()
callback()
async_compare(self.stop)
self.wait()