locust ignoring extended sub classes and instantiates base classes - python

i want to write base http user class and base load test shape and then extend them in sub classes but locust doesn't under stands extended classes and instatiate bas classes
these are base classes
helpers.py:
from locust.contrib.fasthttp import FastHttpUser
import string
from locust import LoadTestShape, constant_pacing
from dotenv import load_dotenv
import os
load_dotenv()
# init parameters
host_address = "127.0.0.1"
class BaseHttpUser(FastHttpUser):
host = host_address
wait_time = constant_pacing(5)
chars = string.ascii_uppercase + string.ascii_lowercase + string.digits
start_time = 0
class BaseRps(LoadTestShape):
time_limit = 600
user_spawn = {1: (1500, 10)}
def tick(self):
step = len(self.user_spawn.keys())
run_time = self.get_run_time()
print(step, )
for idx in range(1, step+1):
print(run_time , idx , self.time_limit)
if run_time < idx * self.time_limit / step:
print("here", self.user_spawn.get(idx))
return self.user_spawn.get(idx)
return None
and this is the file that I run
minio.py
from locust import task
from helpers import BaseHttpUser, BaseRps
import os
host_address = "127.0.0.1"
test_name = "minio"
log_file_path = 'log.log'
base_url = os.getenv("MINIO_URL")
class HttpUser(BaseHttpUser):
host = host_address
base_url = base_url
#task
def download(self):
self.client.get(f'{self.base_url}/magnix-server-media/ads-images/ff.png', name='download')
class Rps(BaseRps):
user_spawn = {1: (10000, 100)}

Base User classes need an attribute abstract = True to not be instantiated. https://docs.locust.io/en/stable/api.html#locust.User.abstract
I dont think you can do the same with load shape classes, but you can use class attributes (which you can manipulate in your locustfile after importing it)
Like removing Rps class and instead just doing
BaseRps.user_spawn = {1: (10000, 100)}

Related

Prometheus Python exporter

I have written an exporter in Python that converts json to metrics from Prometheus. It works with few data, but when I test this with very many datasets whose spacing is miliseconds, it stops working.
JSON (extract):
{
"Acquisition": {
"refTriggerName": "NO_REF_TRIGGER",
"refTriggerStamp": 1666592215243657724,
"channelTimeSinceRefTrigger": [0e+00, 2.5e-04, ...]
"channelValues": {
"values": [4.861855e+00, 4.8581786e+00,
...}
json_exporter.py:
from prometheus_client import start_http_server, Metric, REGISTRY
import json
import requests
import sys
import time
class JsonCollector(object):
def __init__(self, endpoint):
self._endpoint = endpoint
def collect(self):
# Fetch the JSON
response = json.loads(requests.get(self._endpoint).content.decode('UTF-8'))
metric = Metric('fair_acquisition_signal', 'single sinus signal example', 'gauge')
valuesArray = response['Acquisition']['channelValues']['values']
refTriggerStamp = response['Acquisition']['refTriggerStamp']
timestampArray = response['Acquisition']['channelTimeSinceRefTrigger']
counter = 0
while(counter < len(valuesArray)) and (counter < len(timestampArray)):
timestampV = refTriggerStamp/ 1e9 + timestampArray[counter]
metric.add_sample('fair_acquisition_signal', value=valuesArray[counter], timestamp=timestampV, labels={})
#print(str(datetime.fromtimestamp(timstampV)) + ' ' + str(valuesArray[counter]))
counter += 1
#for sample in metric.samples:
# print(sample)
#print(str(len(metric.samples)))
yield metric
if __name__ == '__main__':
# Usage: json_exporter.py port endpoint
start_http_server(int(sys.argv[1]))
REGISTRY.register(JsonCollector(sys.argv[2]))
while True: time.sleep(1)
The data should actually result in a sinus, but in Prometheus it looks like this.
visualization in Prometheus:
prometheus visualization
Does anyone know where my error is?

Unable to successfully patch functions of Azure ContainerClient

I have been trying to patch the list_blobs() function of ContainerClient, have not been able to do this successfully, this code outputs a MagicMock() function - but the function isn't patched as I would expect it to be (Trying to patch with a list ['Blob1', 'Blob2'].
#################Script File
import sys
from datetime import datetime, timedelta
import pyspark
import pytz
import yaml
# from azure.storage.blob import BlobServiceClient, ContainerClient
from pyspark.dbutils import DBUtils as dbutils
import azure.storage.blob
# Open Config
def main():
spark_context = pyspark.SparkContext.getOrCreate()
spark_context.addFile(sys.argv[1])
stream = None
stream = open(sys.argv[1], "r")
config = yaml.load(stream, Loader=yaml.FullLoader)
stream.close()
account_key = dbutils.secrets.get(scope=config["Secrets"]["Scope"], key=config["Secrets"]["Key Name"])
target_container = config["Storage Configuration"]["Container"]
target_account = config["Storage Configuration"]["Account"]
days_history_to_keep = config["Storage Configuration"]["Days History To Keep"]
connection_string = (
"DefaultEndpointsProtocol=https;AccountName="
+ target_account
+ ";AccountKey="
+ account_key
+ ";EndpointSuffix=core.windows.net"
)
blob_service_client: azure.storage.blob.BlobServiceClient = (
azure.storage.blob.BlobServiceClient.from_connection_string(connection_string)
)
container_client: azure.storage.blob.ContainerClient = (
blob_service_client.get_container_client(target_container)
)
blobs = container_client.list_blobs()
print(blobs)
print(blobs)
utc = pytz.UTC
delete_before_date = utc.localize(
datetime.today() - timedelta(days=days_history_to_keep)
)
for blob in blobs:
if blob.creation_time < delete_before_date:
print("Deleting Blob: " + blob.name)
container_client.delete_blob(blob, delete_snapshots="include")
if __name__ == "__main__":
main()
#################Test File
import unittest
from unittest import mock
import DeleteOldBlobs
class DeleteBlobsTest(unittest.TestCase):
def setUp(self):
pass
#mock.patch("DeleteOldBlobs.azure.storage.blob.ContainerClient")
#mock.patch("DeleteOldBlobs.azure.storage.blob.BlobServiceClient")
#mock.patch("DeleteOldBlobs.dbutils")
#mock.patch("DeleteOldBlobs.sys")
#mock.patch('DeleteOldBlobs.pyspark')
def test_main(self, mock_pyspark, mock_sys, mock_dbutils, mock_blobserviceclient, mock_containerclient):
# mock setup
config_file = "Delete_Old_Blobs_UnitTest.yml"
mock_sys.argv = ["unused_arg", config_file]
mock_dbutils.secrets.get.return_value = "A Secret"
mock_containerclient.list_blobs.return_value = ["ablob1", "ablob2"]
# execute test
DeleteOldBlobs.main()
# TODO assert actions taken
# mock_sys.argv.__get__.assert_called_with()
# dbutils.secrets.get(scope=config['Secrets']['Scope'], key=config['Secrets']['Key Name'])
if __name__ == "__main__":
unittest.main()
Output:
<MagicMock name='BlobServiceClient.from_connection_string().get_container_client().list_blobs()' id='1143355577232'>
What am I doing incorrectly here?
I'm not able to execute your code in this moment, but I have tried to simulate it. To do this I have created the following 3 files in the path: /<path-to>/pkg/sub_pkg1 (where pkg and sub_pkg1 are packages).
File ContainerClient.py
def list_blobs(self):
return "blob1"
File DeleteOldBlobs.py
from pkg.sub_pkg1 import ContainerClient
# Open Config
def main():
blobs = ContainerClient.list_blobs()
print(blobs)
print(blobs)
File DeleteBlobsTest.py
import unittest
from unittest import mock
from pkg.sub_pkg1 import DeleteOldBlobs
class DeleteBlobsTest(unittest.TestCase):
def setUp(self):
pass
def test_main(self):
mock_containerclient = mock.MagicMock()
with mock.patch("DeleteOldBlobs.ContainerClient.list_blobs", mock_containerclient.list_blobs):
mock_containerclient.list_blobs.return_value = ["ablob1", "ablob2"]
DeleteOldBlobs.main()
if __name__ == '__main__':
unittest.main()
If you execute the test code you obtain the output:
['ablob1', 'ablob2']
['ablob1', 'ablob2']
This output means that the function list_blobs() is mocked by mock_containerclient.list_blobs.
I don't know if the content of this post can be useful for you, but I'm not able to simulate better your code in this moment.
I hope you can inspire to my code to find your real solution.
The structure of the answer didn't match my solution, perhaps both will work but it was important for me to patch pyspark even though i never call it, or exceptions would get thrown when my code tried to interact with spark.
Perhaps this will be useful to someone:
#mock.patch("DeleteOldBlobs.azure.storage.blob.BlobServiceClient")
#mock.patch("DeleteOldBlobs.dbutils")
#mock.patch("DeleteOldBlobs.sys")
#mock.patch('DeleteOldBlobs.pyspark')
def test_list_blobs_called_once(self, mock_pyspark, mock_sys, mock_dbutils, mock_blobserviceclient):
# mock setup
config_file = "Delete_Old_Blobs_UnitTest.yml"
mock_sys.argv = ["unused_arg", config_file]
account_key = 'Secret Key'
mock_dbutils.secrets.get.return_value = account_key
bsc_mock: mock.Mock = mock.Mock()
container_client_mock = mock.Mock()
blob1 = Blob('newblob', datetime.today())
blob2 = Blob('oldfile', datetime.today() - timedelta(days=20))
container_client_mock.list_blobs.return_value = [blob1, blob2]
bsc_mock.get_container_client.return_value = container_client_mock
mock_blobserviceclient.from_connection_string.return_value = bsc_mock
# execute test
DeleteOldBlobs.main()
#Assert Results
container_client_mock.list_blobs.assert_called_once()

Infinite loop When I import sentence_transformers in FastAPI

I try to serve STS model by FastAPI framework, but when I import 'from sentence_transformers import SentenceTransformer', It infinitly loops. I want to get pred in content.py and post it to 'predicts/' in main.py.
# main.py
from fastapi import FastAPI
from fastapi import File
import torch
from pydantic import BaseModel
from content import predict_model
app = FastAPI()
class Item(BaseModel):
sentence_1: str
sentence_2: str
#app.post("/predicts")
async def predict(item:Item):
predict_model()
return {}
# content.py
import torch
from sentence_transformers import SentenceTransformer
def cosine_similarity_manual(x, y, small_number=1e-8):
result = torch.dot(x, y) / (torch.linalg.norm(x) * torch.linalg.norm(y) + small_number)
return result
def predict_model():
sent1 = '무엇보다도 호스트분들이 너무 친절하셨습니다.'
sent2 = '무엇보다도, 호스트들은 매우 친절했습니다.'
predict = 0
texts = [sent1, sent2]
model_path = "training_sts-Huffon-sentence-klue-roberta-base"
model = SentenceTransformer(model_path)
corpus_embeddings = model.encode(texts[0], convert_to_tensor=True)
query_embeddings = model.encode(texts[1], convert_to_tensor=True)
print(corpus_embeddings.shape)
print(query_embeddings.shape)
score = cosine_similarity_manual(corpus_embeddings,query_embeddings)
print(score)
if score >= 0.6:
pred = 1
else:
pred = 0
print(pred)```

Meshcat not showing the changes to a Free Body's Pose

I've been trying to create my own ManipulationStation for a different robot arm using Pydrake, but I've been unsuccessful so far in adding clutter to my ManipulationStation. For some odd reason, Meshcat won't show the updated poses of my objects.
import numpy as np
import glob
from pydrake.geometry import MeshcatVisualizerCpp
from pydrake.math import RigidTransform, RotationMatrix
from pydrake.systems.analysis import Simulator
from pydrake.systems.framework import DiagramBuilder
from pydrake.all import (
DiagramBuilder, FindResourceOrThrow,
SceneGraph, Diagram,
MultibodyPlant, Parser, Simulator, MeshcatVisualizerCpp,
UniformlyRandomRotationMatrix, RandomGenerator)
from pydrake.geometry import Meshcat
class DexterPPStation(Diagram):
def __init__(self, time_step, file_path):
super().__init__()
self.time_step = time_step
self.path = file_path
self.plant = MultibodyPlant(self.time_step)
self.scene_graph = SceneGraph()
self.plant.RegisterAsSourceForSceneGraph(self.scene_graph)
self.controller_plant = MultibodyPlant(self.time_step)
self.object_ids = []
self.object_poses = []
def AddObject(self, file, name, pose):
model_idx = Parser(self.plant).AddModelFromFile(file, name)
indices = self.plant.GetBodyIndices(model_idx)
self.object_ids.append(indices[0])
self.object_poses.append(pose)
return model_idx
def CreateBins(self, path, XP_B1, XP_B2):
bin1 = Parser(self.plant).AddModelFromFile(path, "bin1")
self.plant.WeldFrames(self.plant.world_frame(), self.plant.GetFrameByName("bin_base", bin1), XP_B1)
bin2 = Parser(self.plant).AddModelFromFile(path, "bin2")
self.plant.WeldFrames(self.plant.world_frame(), self.plant.GetFrameByName("bin_base", bin2), XP_B2)
def CreateRandomPickingObjects(self, n = 4):
choices = [f for f in glob.glob("/opt/drake/share/drake/manipulation/models/ycb/sdf/*.sdf")]
z = 0.1
rs = np.random.RandomState()
generator = RandomGenerator(rs.randint(1000))
for i in range(n):
obj = choices[i]
pose = RigidTransform(
UniformlyRandomRotationMatrix(generator),
[rs.uniform(.35,0.6), rs.uniform(-.2, .2), z])
model = self.AddObject(obj, obj.split("/")[-1].split(".")[0] + str(i), pose)
body_idx = self.plant.GetBodyIndices(model)[0]
self.object_ids.append(body_idx)
self.object_poses.append(pose)
z+=0.1
def SetRandomPoses(self, station_context):
plant_context = self.GetSubsystemContext(self.plant, station_context)
for i in range(len(self.object_ids)):
self.plant.SetFreeBodyPose(plant_context, self.plant.get_body(self.object_ids[i]), self.object_poses[i])
def Finalize(self):
self.plant.Finalize()
self.controller_plant.Finalize()
builder = DiagramBuilder()
builder.AddSystem(self.plant)
builder.AddSystem(self.controller_plant)
builder.AddSystem(self.scene_graph)
builder.Connect(self.plant.get_geometry_poses_output_port(), self.scene_graph.get_source_pose_port(self.plant.get_source_id()))
builder.Connect(self.scene_graph.get_query_output_port(), self.plant.get_geometry_query_input_port())
builder.ExportOutput(self.scene_graph.get_query_output_port(), "query_object")
builder.ExportOutput(self.plant.get_geometry_poses_output_port(), "geometry_poses")
builder.ExportOutput(self.scene_graph.get_query_output_port(), "geometry_query")
builder.ExportOutput(self.plant.get_contact_results_output_port(),"contact_results")
builder.ExportOutput(self.plant.get_state_output_port(),"plant_continuous_state")
builder.BuildInto(self)
To test my code, I've been running the script below.
def test():
builder = DiagramBuilder()
station = DexterPPStation(1e-4, "drake/manipulation/models/final_dexter_description/urdf/dexter.urdf")
station.CreateBins("/opt/drake/share/drake/examples/manipulation_station/models/bin.sdf", RigidTransform(np.array([0.5,0,0])), RigidTransform(np.array([0,0.5,0])))
station.CreateRandomPickingObjects(1)
station.Finalize()
builder.AddSystem(station)
station_context = station.CreateDefaultContext()
station.SetRandomPoses(station_context)
MeshcatVisualizerCpp.AddToBuilder(builder, station.GetOutputPort("query_object"), meshcat)
diagram = builder.Build()
simulator = Simulator(diagram)
simulator.set_target_realtime_rate(1.0)
simulator.AdvanceTo(0.1)
test()
I've tried to call the SetRandomPoses() function from inside my Finalize() method, but since I needed to pass in a context to the function, I wasn't sure what to do. I'm new to Drake, so any input would be greatly appreciated.
You've created a station_context and set it to the random poses, but then you don't use it anywhere. When you create the simulator, it is creating another Context (with the default values), which is getting published when you call AdvanceTo.
The solution here, I think, is to not create your own station_context, but do e.g.
simulator = Simulator(diagram)
diagram_context = simulator.get_mutable_context()
station_context = station.GetMyMutableContextFromRoot(diagram_context)
station.SetRandomPoses(station_context)
then you can call AdvanceTo.

How to run master/slave Locust runner programmatically so slaves stops at the end

I have this simple master/slave scripts, using locustio==0.13.5. This is the master:
#!/usr/bin/env python3
import logging
import argparse
import os
import sys
import time
import urllib3
import locust
import utils
class TestSomething(locust.TaskSet):
#locust.task(1)
def get_hosts_small(self):
print(self.locust.message)
return self.client.get(url='http://localhost', verify=False)
class TheSomething(locust.HttpLocust):
task_set = TestSomething
wait_time = locust.constant(0)
urllib3.disable_warnings()
logging.basicConfig(level=logging.DEBUG)
options = argparse.Namespace()
options.host = "http://localhost"
options.num_clients = 1
options.hatch_rate = options.num_clients
options.num_requests = 10
options.stop_timeout = 1
options.step_load = False
options.reset_stats = False
options.test_duration = 3
options.master_host = 'localhost'
options.master_port = 5557
options.master_bind_host = '*'
options.master_bind_port = 5557
options.heartbeat_liveness = 3
options.heartbeat_interval = 1
options.expect_slaves = 1
test_set = TheSomething
test_set.message = 'Hello'
locust_runner = locust.runners.MasterLocustRunner([test_set], options)
while len(locust_runner.clients.ready) < options.expect_slaves:
logging.info("Waiting for slaves to be ready, %s of %s connected", len(locust_runner.clients.ready), options.expect_slaves)
time.sleep(1)
locust_runner.start_hatching(locust_count=options.num_clients, hatch_rate=options.hatch_rate)
time.sleep(options.test_duration)
locust_runner.quit()
locusts.events.quitting.fire(reverse=True)
print(locust_runner.stats) # actually using custom function to format results
and this is the slave:
#!/usr/bin/env python3
import logging
import argparse
import os
import sys
import time
import locust
class TestSomething(locust.TaskSet):
#locust.task(1)
def get_hosts_small(self):
print(self.locust.message)
return self.client.get(url='http://localhost', verify=False)
class TheSomething(locust.HttpLocust):
task_set = TestSomething
wait_time = locust.constant(0)
logging.basicConfig(level=logging.DEBUG)
options = argparse.Namespace()
options.host = "http://localhost"
options.num_clients = 1
options.hatch_rate = options.num_clients
options.num_requests = 10
options.stop_timeout = 1
options.step_load = False
options.reset_stats = False
options.test_duration = 3
options.master_host = 'localhost'
options.master_port = 5557
options.master_bind_host = '*'
options.master_bind_port = 5557
options.heartbeat_liveness = 3
options.heartbeat_interval = 1
test_set = TheSomething
test_set.message = 'Hello'
locust_runner = locust.runners.SlaveLocustRunner([test_set], options)
locust_runner.worker()
When I start master and slave, I can see how master waits for a slave to come up, then how slave is executing the test and I see report printed by master before it finished. But slave does not finishes - it hangs running, doing nothing (I assume).
I would like slave to either exit or to restart and attempt to connect to master again in case I just rerun the master script. Does anybody have any idea on how to do that please?
I usually just set any parameters as environment variables and read them from the script (os.environ['MY_ENV_VAR'])
If you're running the slaves on the same server that should be easy (just run export MY_ENV_VAR=Hello before starting the processes), if you are running slaves on different machines it would be a little more complicated but check out locust-swarm that does the work for you (https://github.com/SvenskaSpel/locust-swarm)
As for the "do stuff after the test" there is a "quitting" event that you can subscribe to:
https://docs.locust.io/en/0.14.5/api.html#available-hooks
Or, for the upcoming 1.0 version:
https://docs.locust.io/en/latest/api.html#locust.event.Events.quitting

Categories