Python. prometheus_client ValueError: gauge metric is missing label values - python

I try to use prometheus_client for export RabbitMQ metrics. I have a problem with decorator functions.
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#from prometheus_client import start_http_server, Summary
import prometheus_client as prom
import random
import time
import pika
queue_name = [
"capt",
"dev-capt",
"myBeautifullTest"
]
def get_metric(qname):
queue_descriptor = channel.queue_declare(qname, durable=True)
queue_len = queue_descriptor.method.message_count
return float(queue_len)
params = pika.ConnectionParameters(
host='rabbitmq1.local',
port=5672,
credentials=pika.credentials.PlainCredentials('guest11', 'guest22'),
)
connection = pika.BlockingConnection(parameters=params)
channel = connection.channel()
i = prom.Info("RMQPE", "RabbitMQ Prometheus Exporter")
i.info({'version': '0.0.1'})
# Create a metric to track time spent and requests made.
REQUEST_TIME = prom.Summary('request_processing_seconds', 'Time spent processing request')
# Decorate function with metric.
#REQUEST_TIME.time()
def process_request():
"""A dummy function that takes some time."""
time.sleep(1)
RABBIT_QUEUE = prom.Gauge('rabbitmq_test_exporter', 'queue_length' , ['queue_name'], multiprocess_mode = 'all')
for qname in queue_name:
queue_descriptor = channel.queue_declare(qname, durable=True)
queue_len = queue_descriptor.method.message_count
RABBIT_QUEUE.labels(qname).set(queue_len)
#RABBIT_QUEUE.track_inprogress()
def f():
pass
with RABBIT_QUEUE.track_inprogress():
pass
if __name__ == '__main__':
# Start up the server to expose the metrics.
prom.start_http_server(27015) # Yes, CS port :)
# Generate some requests.
while True:
process_request()
f()
I have a message:
andrey#xps:~/prj/python3/rmq$ ./prj2.py
Traceback (most recent call last):
File "./prj2.py", line 56, in
#RABBIT_QUEUE.track_inprogress()
File "/usr/local/lib/python3.8/dist-packages/prometheus_client/metrics.py", line 372, in track_inprogress
self._raise_if_not_observable()
File "/usr/local/lib/python3.8/dist-packages/prometheus_client/metrics.py", line 66, in _raise_if_not_observable
raise ValueError('%s metric is missing label values' % str(self._type))
ValueError: gauge metric is missing label values
I need 3 metrics. Maybe more.
If I remove the decorator, my code is working, but I haven't updated values.
Please help.
Thank you.

SOLVED!
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#from prometheus_client import start_http_server, Summary
import prometheus_client as prom
import random
import time
import pika
queue_name = [
"capt",
"dev-capt",
"myBeautifullTest"
]
params = pika.ConnectionParameters(
host='rabbitmq1.local',
port=5672,
credentials=pika.credentials.PlainCredentials('guest11', 'guest22'),
)
connection = pika.BlockingConnection(parameters=params)
channel = connection.channel()
i = prom.Info("RMQPE", "RabbitMQ Prometheus Exporter")
i.info({'version': '0.0.1'})
# Create a metric to track time spent and requests made.
REQUEST_TIME = prom.Summary('request_processing_seconds', 'Time spent processing request')
# Decorate function with metric.
#REQUEST_TIME.time()
def process_request():
time.sleep(1)
if __name__ == '__main__':
# name documentation labelnames
RABBIT_QUEUE = prom.Gauge('rabbitmq_test_exporter', 'queue_length', labelnames=['queue_name'])
prom.start_http_server(27015)
while True:
process_request()
for qname in queue_name:
queue_descriptor = channel.queue_declare(qname, durable=True)
queue_len = queue_descriptor.method.message_count
RABBIT_QUEUE.labels(qname).set(queue_len)

Related

Linux NoHup fails for Streaming API IG Markets where file is python

This is quite a specific question regarding nohup in linux, which runs a python file.
Back-story, I am trying to save down streaming data (from IG markets broadcast signal). And, as I am trying to run it via a remote-server (so I don't have to keep my own local desktop up 24/7),
somehow, the nohup will not engage when it 'listen's to a broadcast signal.
Below, is the example python code
#!/usr/bin/env python
#-*- coding:utf-8 -*-
"""
IG Markets Stream API sample with Python
"""
user_ = 'xxx'
password_ = 'xxx'
api_key_ = 'xxx' # this is the 1st api key
account_ = 'xxx'
acc_type_ = 'xxx'
fileLoc = 'marketdata_IG_spx_5min.csv'
list_ = ["CHART:IX.D.SPTRD.DAILY.IP:5MINUTE"]
fields_ = ["UTM", "LTV", "TTV", "BID_OPEN", "BID_HIGH", \
"BID_LOW", "BID_CLOSE",]
import time
import sys
import traceback
import logging
import warnings
warnings.filterwarnings('ignore')
from trading_ig import (IGService, IGStreamService)
from trading_ig.lightstreamer import Subscription
cols_ = ['timestamp', 'data']
# A simple function acting as a Subscription listener
def on_prices_update(item_update):
# print("price: %s " % item_update)
print("xxxxxxxx
))
# A simple function acting as a Subscription listener
def on_charts_update(item_update):
# print("price: %s " % item_update)
print(xxxxxx"\
.format(
stock_name=item_update["name"], **item_update["values"]
))
res_ = [xxxxx"\
.format(
stock_name=item_update["name"], **item_update["values"]
).split(' '))]
# display(pd.DataFrame(res_))
try:
data_ = pd.read_csv(fileLoc)[cols_]
data_ = data_.append(pd.DataFrame(res_, columns = cols_))
data_.to_csv(fileLoc)
print('there is data and we are reading it')
# display(data_)
except:
pd.DataFrame(res_, columns = cols_).to_csv(fileLoc)
print('there is no data and we are saving first time')
time.sleep(60) # sleep for 1 min
def main():
logging.basicConfig(level=logging.INFO)
# logging.basicConfig(level=logging.DEBUG)
ig_service = IGService(
user_, password_, api_key_, acc_type_
)
ig_stream_service = IGStreamService(ig_service)
ig_session = ig_stream_service.create_session()
accountId = account_
################ my code to set sleep function to sleep/read at only certain time intervals
s_time = time.time()
############################
# Making a new Subscription in MERGE mode
subscription_prices = Subscription(
mode="MERGE",
# make sure to put L1 in front of the instrument name
items= list_,
fields= fields_
)
# adapter="QUOTE_ADAPTER")
# Adding the "on_price_update" function to Subscription
subscription_prices.addlistener(on_charts_update)
# Registering the Subscription
sub_key_prices = ig_stream_service.ls_client.subscribe(subscription_prices)
print('this is the line here')
input("{0:-^80}\n".format("HIT CR TO UNSUBSCRIBE AND DISCONNECT FROM \
LIGHTSTREAMER"))
# Disconnecting
ig_stream_service.disconnect()
if __name__ == '__main__':
main()
#######
Then, I try to run it on linux using this command : nohup python marketdata.py
where marketdata.py is basically the python code above.
Somehow, the nohup will not engage....... Any experts/guru who might see what I am missing in my code?

Unable to successfully patch functions of Azure ContainerClient

I have been trying to patch the list_blobs() function of ContainerClient, have not been able to do this successfully, this code outputs a MagicMock() function - but the function isn't patched as I would expect it to be (Trying to patch with a list ['Blob1', 'Blob2'].
#################Script File
import sys
from datetime import datetime, timedelta
import pyspark
import pytz
import yaml
# from azure.storage.blob import BlobServiceClient, ContainerClient
from pyspark.dbutils import DBUtils as dbutils
import azure.storage.blob
# Open Config
def main():
spark_context = pyspark.SparkContext.getOrCreate()
spark_context.addFile(sys.argv[1])
stream = None
stream = open(sys.argv[1], "r")
config = yaml.load(stream, Loader=yaml.FullLoader)
stream.close()
account_key = dbutils.secrets.get(scope=config["Secrets"]["Scope"], key=config["Secrets"]["Key Name"])
target_container = config["Storage Configuration"]["Container"]
target_account = config["Storage Configuration"]["Account"]
days_history_to_keep = config["Storage Configuration"]["Days History To Keep"]
connection_string = (
"DefaultEndpointsProtocol=https;AccountName="
+ target_account
+ ";AccountKey="
+ account_key
+ ";EndpointSuffix=core.windows.net"
)
blob_service_client: azure.storage.blob.BlobServiceClient = (
azure.storage.blob.BlobServiceClient.from_connection_string(connection_string)
)
container_client: azure.storage.blob.ContainerClient = (
blob_service_client.get_container_client(target_container)
)
blobs = container_client.list_blobs()
print(blobs)
print(blobs)
utc = pytz.UTC
delete_before_date = utc.localize(
datetime.today() - timedelta(days=days_history_to_keep)
)
for blob in blobs:
if blob.creation_time < delete_before_date:
print("Deleting Blob: " + blob.name)
container_client.delete_blob(blob, delete_snapshots="include")
if __name__ == "__main__":
main()
#################Test File
import unittest
from unittest import mock
import DeleteOldBlobs
class DeleteBlobsTest(unittest.TestCase):
def setUp(self):
pass
#mock.patch("DeleteOldBlobs.azure.storage.blob.ContainerClient")
#mock.patch("DeleteOldBlobs.azure.storage.blob.BlobServiceClient")
#mock.patch("DeleteOldBlobs.dbutils")
#mock.patch("DeleteOldBlobs.sys")
#mock.patch('DeleteOldBlobs.pyspark')
def test_main(self, mock_pyspark, mock_sys, mock_dbutils, mock_blobserviceclient, mock_containerclient):
# mock setup
config_file = "Delete_Old_Blobs_UnitTest.yml"
mock_sys.argv = ["unused_arg", config_file]
mock_dbutils.secrets.get.return_value = "A Secret"
mock_containerclient.list_blobs.return_value = ["ablob1", "ablob2"]
# execute test
DeleteOldBlobs.main()
# TODO assert actions taken
# mock_sys.argv.__get__.assert_called_with()
# dbutils.secrets.get(scope=config['Secrets']['Scope'], key=config['Secrets']['Key Name'])
if __name__ == "__main__":
unittest.main()
Output:
<MagicMock name='BlobServiceClient.from_connection_string().get_container_client().list_blobs()' id='1143355577232'>
What am I doing incorrectly here?
I'm not able to execute your code in this moment, but I have tried to simulate it. To do this I have created the following 3 files in the path: /<path-to>/pkg/sub_pkg1 (where pkg and sub_pkg1 are packages).
File ContainerClient.py
def list_blobs(self):
return "blob1"
File DeleteOldBlobs.py
from pkg.sub_pkg1 import ContainerClient
# Open Config
def main():
blobs = ContainerClient.list_blobs()
print(blobs)
print(blobs)
File DeleteBlobsTest.py
import unittest
from unittest import mock
from pkg.sub_pkg1 import DeleteOldBlobs
class DeleteBlobsTest(unittest.TestCase):
def setUp(self):
pass
def test_main(self):
mock_containerclient = mock.MagicMock()
with mock.patch("DeleteOldBlobs.ContainerClient.list_blobs", mock_containerclient.list_blobs):
mock_containerclient.list_blobs.return_value = ["ablob1", "ablob2"]
DeleteOldBlobs.main()
if __name__ == '__main__':
unittest.main()
If you execute the test code you obtain the output:
['ablob1', 'ablob2']
['ablob1', 'ablob2']
This output means that the function list_blobs() is mocked by mock_containerclient.list_blobs.
I don't know if the content of this post can be useful for you, but I'm not able to simulate better your code in this moment.
I hope you can inspire to my code to find your real solution.
The structure of the answer didn't match my solution, perhaps both will work but it was important for me to patch pyspark even though i never call it, or exceptions would get thrown when my code tried to interact with spark.
Perhaps this will be useful to someone:
#mock.patch("DeleteOldBlobs.azure.storage.blob.BlobServiceClient")
#mock.patch("DeleteOldBlobs.dbutils")
#mock.patch("DeleteOldBlobs.sys")
#mock.patch('DeleteOldBlobs.pyspark')
def test_list_blobs_called_once(self, mock_pyspark, mock_sys, mock_dbutils, mock_blobserviceclient):
# mock setup
config_file = "Delete_Old_Blobs_UnitTest.yml"
mock_sys.argv = ["unused_arg", config_file]
account_key = 'Secret Key'
mock_dbutils.secrets.get.return_value = account_key
bsc_mock: mock.Mock = mock.Mock()
container_client_mock = mock.Mock()
blob1 = Blob('newblob', datetime.today())
blob2 = Blob('oldfile', datetime.today() - timedelta(days=20))
container_client_mock.list_blobs.return_value = [blob1, blob2]
bsc_mock.get_container_client.return_value = container_client_mock
mock_blobserviceclient.from_connection_string.return_value = bsc_mock
# execute test
DeleteOldBlobs.main()
#Assert Results
container_client_mock.list_blobs.assert_called_once()

How to implement synchronization in multi threading with python and ROS Services?

Im trying to implement multi threading (parallel processing) with python and using mutex threading. I have first process that check the Pressure Value and the modem update(in the code implemented with odom_callback and callback_modem functions), and second process that calls ROS SERVICES ( in the code implemented with ros_serice_server server and imu_client client functions). Here is the implementation code in python
#!/usr/bin/env python3
from __future__ import print_function
import rospy
import numpy as np
from os import system
import time
import threading
import Microcontroller_Manager_Serial as Serial
import IMU_Functions as IMU
import Motors_Functions as Motor
import Pressure_Functions as Pressure
from geometry_msgs.msg import Vector3
import Modem_Functions as Modem
import threading
import time
import serial
import serial.tools.list_ports
from time import sleep
from std_msgs.msg import Float32
from std_msgs.msg import String
from demo_teleop.srv import ImuValue,ImuValueResponse
P0 = 1.01325 #Default Pressure
mutex = threading.Lock()
Communication_Mode_ = 0
pub_pressure = rospy.Publisher('depth',Vector3,queue_size=1)
pub_modem = rospy.Publisher('modem_data',Float32,queue_size=1)
def handle_ros_services(req):
mutex.acquire(blocking=True)
print("Server Read Data:")
global T0
data_received = Pressure.Pressure_Get_Final_Values(1,1)
#print("Server Read Data:")
T0 = (np.int16((data_received[6]<<24) | (data_received[7]<<16) | (data_received[8]<<8) | (data_received[9])))/10000
T=T0
temperature = T
current_x_orientation_s = temperature
print("Returning Service Temperature Data", current_x_orientation_s)
return ImuValueResponse(current_x_orientation_s, True)
mutex.release()
def ros_serice_server():
s = rospy.Service('imu_value', ImuValue, handle_ros_services)
print("Ready to get_value")
def odom_callback():
# reentrang processing
mutex.acquire(blocking=True)
# work serial port here, e.g. send msg to serial port
global P0
data_received = Pressure.Pressure_Get_Final_Values(1,1)
#P1 = (np.int16((data_received_pressure[6]<<24) | (data_received_pressure[7]<<16) | (data_received_pressure[8]<<8) | (data_received_pressure[9])))/10000
P1 = (np.int16((data_received[6]<<24) | (data_received[7]<<16) | (data_received[8]<<8) | (data_received[9])))/10000
#P0 = (np.int16((data_received_pressure[6]<<24) | (data_received_pressure[7]<<16) | (data_received_pressure[8]<<8) | (data_received_pressure[9])))/10000
P0 = (np.int16((data_received[6]<<24) | (data_received[7]<<16) | (data_received[8]<<8) | (data_received[9])))/10000
P = P0 # Relative Measured Pressure
feedback =Vector3()
feedback.x = 0 #Angular Velocity
feedback.y = 0
feedback.z = P/9.81 #Depth
pressure = feedback.z
print("Pressure : ", pressure)
pub_pressure.publish(feedback)
# reentrant processing
mutex.release()
def callback_modem(event):
# reentrant processing
mutex.acquire(blocking=True)
# work serial port here, e.g. check for incoming data
event = Serial.Serial_Port_Receive_Data(20,0.2)
if (event == 1): # Received data from acoustic modem
modem_data= event
pub_modem.publish(modem_data)
print("received ")
else:
print("not received...... ")
mutex.release()
if __name__ == '__main__':
# initialize serial port here
Serial.Serial_Port_Standard()
rospy.init_node('imu_value')
ros_serice_server()
rospy.Timer(rospy.Duration(1), callback_modem)
while not rospy.is_shutdown():
try:
odom_callback()
except:
print('pass')
And the client node
#!/usr/bin/env python3
from __future__ import print_function
import rospy
import sys
import numpy as np
from os import system
import threading
import Microcontroller_Manager_Serial as Serial
import IMU_Functions as IMU
import Motors_Functions as Motor
import Pressure_Functions as Pressure
from geometry_msgs.msg import Vector3
import Modem_Functions as Modem
import time
import serial
import serial.tools.list_ports
from time import sleep
from std_msgs.msg import Float32
from std_msgs.msg import String
from demo_teleop.srv import *
mutex = threading.Lock()
Communication_Mode_ = 0
pub_modem = rospy.Publisher('modem_data',Float32,queue_size=1)
def imu_client():
mutex.acquire(blocking=True)
rospy.wait_for_service('imu_value')
imu_value = rospy.ServiceProxy('imu_value', ImuValue)
print("Request call send")
resp1 = imu_value(0.05)
return resp1.current_x_orientation_s
mutex.release()
if __name__ == "__main__":
rospy.init_node('client_node_f')
while not rospy.is_shutdown():
try:
print("entering client")
value = imu_client()
print(value)
time.sleep(1)
except:
print('pass')
So the output is following. The output of the first process with the ROS Services Server is
Pressure : 0.10602446483180428
Server Read Data:
Returning Service Temperature Data 1.0401
And then after calling the client I got
entering client
Request call send
1.0401
entering client
The problem is that after calling the ROS SERVICE client node the process stop so doesn't continue with the first process (Pressure value and modem update) . The ROS SERVICES process should be call only on demand and should HALT the first process (Pressure and modem) and then is should resume with the work. So, do I need to implement SEMAPHORES for the ROS SERVICES call ? If yes how it should be in the code. So I do need kind of synchronization , right?Please any help?
Your problem is:
def handle_ros_services(req):
mutex.acquire(blocking=True)
...
return ImuValueResponse(current_x_orientation_s, True)
mutex.release()
Because of the return statement, the release is never executed.
You need at the end:
value = ImValueResponse(...)
mutex.release()
return value
Even better would be to use your mutex as part of a with statement:
with mutex:
do anything you want, knowing that the lock will be released
at the end, even if you return or throw an exception.

How to add configuration setting for sasl.mechanism PLAIN (API) and GSSAPI (Kerberos) authentication in python script

Need some help to set the configuration for sasl.mechanism PLAIN (API) and GSSAPI (Kerberos) authentication.
We are using confluent Kafka here, there are two scripts, one a python script and the second one is a bash script which calls the python one. You can find the script below.
Thanks for the help in advance!
import json
import os
import string
import random
import socket
import uuid
import re
from datetime import datetime
import time
import hashlib
import math
import sys
from functools import cache
from confluent_kafka import Producer, KafkaError, KafkaException
topic_name = os.environ['TOPIC_NAME']
partition_count = int(os.environ['PARTITION_COUNT'])
message_key_template = json.loads(os.environ['KEY_TEMPLATE'])
message_value_template = json.loads(os.environ['VALUE_TEMPLATE'])
message_header_template = json.loads(os.environ['HEADER_TEMPLATE'])
bootstrap_servers = os.environ['BOOTSTRAP_SERVERS']
perf_counter_batch_size = int(os.environ.get('PERF_COUNTER_BATCH_SIZE', 100))
messages_per_aggregate = int(os.environ.get('MESSAGES_PER_AGGREGATE', 1))
max_message_count = int(os.environ.get('MAX_MESSAGE_COUNT', sys.maxsize))
def error_cb(err):
""" The error callback is used for generic client errors. These
errors are generally to be considered informational as the client will
automatically try to recover from all errors, and no extra action
is typically required by the application.
For this example however, we terminate the application if the client
is unable to connect to any broker (_ALL_BROKERS_DOWN) and on
authentication errors (_AUTHENTICATION). """
print("Client error: {}".format(err))
if err.code() == KafkaError._ALL_BROKERS_DOWN or \
err.code() == KafkaError._AUTHENTICATION:
# Any exception raised from this callback will be re-raised from the
# triggering flush() or poll() call.
raise KafkaException(err)
def acked(err, msg):
if err is not None:
print("Failed to send message: %s: %s" % (str(msg), str(err)))
producer_configs = {
'bootstrap.servers': bootstrap_servers,
'client.id': socket.gethostname(),
'error_cb': error_cb
}
# TODO: Need to support sasl.mechanism PLAIN (API) and GSSAPI (Kerberos) authentication.
# TODO: Need to support truststores for connecting to private DCs.
producer = Producer(producer_configs)
# generates a random value if it is not cached in the template_values dictionary
def get_templated_value(term, template_values):
if not term in template_values:
template_values[term] = str(uuid.uuid4())
return template_values[term]
def fill_template_value(value, template_values):
str_value = str(value)
template_regex = '{{(.+?)}}'
templated_terms = re.findall(template_regex, str_value)
for term in templated_terms:
str_value = str_value.replace(f"{{{{{term}}}}}", get_templated_value(term, template_values))
return str_value
def fill_template(template, templated_terms):
# TODO: Need to address metadata field, as it's treated as a string instead of a nested object.
return {field: fill_template_value(value, templated_terms) for field, value in template.items()}
#cache
def get_partition(lock_id):
bits = 128
bucket_size = 2**bits / partition_count
partition = (int(hashlib.md5(lock_id.encode('utf-8')).hexdigest(), 16) / bucket_size)
return math.floor(partition)
sequence_number = int(time.time() * 1000)
sequence_number = 0
message_count = 0
producing = True
start_time = time.perf_counter()
aggregate_message_counter = 0
# cache for templated term values so that they match across the different templates
templated_values = {}
try:
while producing:
sequence_number += 1
aggregate_message_counter += 1
message_count += 1
if aggregate_message_counter % messages_per_aggregate == 0:
# reset templated values
templated_values = {}
else:
for term in list(templated_values):
if term not in ['aggregateId', 'tenantId']:
del(templated_values[term])
# Fill in templated field values
message_key = fill_template(message_key_template, templated_values)
message_value = fill_template(message_value_template, templated_values)
message_header = fill_template(message_header_template, templated_values)
ts = datetime.utcnow().isoformat()[:-3]+'Z'
message_header['timestamp'] = ts
message_header['sequence_number'] = str(sequence_number)
message_value['timestamp'] = ts
message_value['sequenceNumber'] = sequence_number
lock_id = message_header['lock_id']
partition = get_partition(lock_id) # partition by lock_id, since key could be random, but a given aggregate_id should ALWAYS resolve to the same partition, regardless of key.
# Send message
producer.produce(topic_name, partition=partition, key=json.dumps(message_key), value=json.dumps(message_value), headers=message_header, callback=acked)
if sequence_number % perf_counter_batch_size == 0:
producer.flush()
end_time = time.perf_counter()
total_duration = end_time - start_time
messages_per_second=(perf_counter_batch_size/total_duration)
print(f'{messages_per_second} messages/second')
# reset start time
start_time = time.perf_counter()
if message_count >= max_message_count:
break
except Exception as e:
print(f'ERROR: %s' % e)
sys.exit(1)
finally:
producer.flush()

How Get a list of MQ queues pymqi?

You want to get a list of the queues for a specific queue manager. I seem to understand how to do this, but when I try, I get an error.
Traceback (most recent call last): File
"D:/project/Work-Project/queue list.py", line 23, in
response = pcf.MQCMD_INQUIRE_Q(args) File "C:\Users\ShevcovAA\AppData\Local\Programs\Python\Python37\lib\site-packages\pymqi_init_.py",
line 2769, in call
message = self._pcf.reply_queue.get(None, get_md, get_opts) File
"C:\Users\ShevcovAA\AppData\Local\Programs\Python\Python37\lib\site-packages\pymqi_init.py",
line 2021, in get
raise MQMIError(rv[-2], rv[-1], message=rv[0], original_length=rv[-3]) pymqi.MQMIError: MQI Error. Comp: 2, Reason
2033: FAILED: MQRC_NO_MSG_AVAILABLE
My Code:
import logging
import re
import pymqi
logging.basicConfig(level=logging.INFO)
queue_manager = 'QM1'
channel = 'DEV.APP.SVRCONN'
host = '127.0.0.1'
port = '1414'
conn_info = '%s(%s)' % (host, port)
prefix = "*"
queue_type = pymqi.CMQC.MQQT_LOCAL
args = {pymqi.CMQC.MQCA_Q_NAME: prefix,
pymqi.CMQC.MQIA_Q_TYPE: queue_type}
qmgr = pymqi.connect(queue_manager, channel, conn_info)
pcf = pymqi.PCFExecute(qmgr)
response = pcf.MQCMD_INQUIRE_Q(args)
for queue_info in response:
queue_name = queue_info[pymqi.CMQC.MQCA_Q_NAME]
if (re.match('^SYSTEM', queue_name) or re.match('^AMQ', queue_name) or re.match('^MQ', queue_name)):
pass
else:
q = pymqi.Queue(qmgr, queue_name)
print(queue_name.strip() + ':' + 'Queue depth:', q.inquire(pymqi.CMQC.MQIA_CURRENT_Q_DEPTH))
q.close()
qmgr.disconnect()
v1.12.0 pymqi uses different logic to get PCF response messages from the response queue.
By default, a timeout of 5 seconds is used to wait for a response.
As a result, if you have a lot of queues or your QM is under heavy load, this may not be enough.
To fix this, you can increase this interval using the response_wait_interval parameter of the PCFExecute constructor.
pcf = pymqi.PCFExecute(qmgr, response_wait_interval=30000) # 30 seconds
v1.11.0 does not have this parameter and uses default interval of 30 seconds.
And avoid querying each queue for depth, just query MQIA_CURRENT_Q_DEPTH attribute.
In new notation, supported in v1.12+, it will be something like:
attrs = [] # type: List[pymqi.MQOpts]
attrs.append(pymqi.CFST(Parameter=pymqi.CMQC.MQCA_Q_NAME,
String=pymqi.ensure_bytes(prefix)))
attrs.append(pymqi.CFIN(Parameter=pymqi.CMQC.MQIA_Q_TYPE,
Value=queue_type))
attrs.append(pymqi.CFIL(Parameter=pymqi.CMQCFC.MQIACF_Q_ATTRS,
Values=[pymqi.CMQC.MQIA_CURRENT_Q_DEPTH]))
object_filters = []
# object_filters.append(
# pymqi.CFIF(Parameter=pymqi.CMQC.MQIA_CURRENT_Q_DEPTH,
# Operator=pymqi.CMQCFC.MQCFOP_GREATER,
# FilterValue=0))
response = pcf.MQCMD_INQUIRE_Q(attrs, object_filters)
for queue_info in response:
queue_name = queue_info[pymqi.CMQC.MQCA_Q_NAME]
queue_depth = queue_info[pymqi.CMQC.MQIA_CURRENT_Q_DEPTH]
print('{}: {} message(s)'.format(queue_name.strip().decode(), queue_depth))
Solved this error by simply installing the version below. That is, Meln had PyMQi 1.12.0, and now it is PyMQI 1.11.0
My Code:
import pymqi
import date_conn
qmgr = pymqi.connect(date_conn.queue_manager, date_conn.channel, date_conn.conn_info)
pcf = pymqi.PCFExecute(qmgr)
c = 0
attrs = {
pymqi.CMQC.MQCA_Q_NAME :'*'
}
result = pcf.MQCMD_INQUIRE_Q(attrs)
for queue_info in result:
queue_name = queue_info[pymqi.CMQC.MQCA_Q_NAME]
print(queue_name)
c+=1
print(c)
qmgr.disconnect()

Categories