Iam working with Celery Worker with Redis as broker and in backend.
Steps:
Add process will pick the data from queue and process and the result will be send to read queue.
Read process will read the data from read queue and print the result.
Before picking up by second task, the message is getting deleted with the below message
[2023-02-02 16:41:29,992: WARNING/MainProcess] Received and deleted unknown message. Wrong destination?!?
The full contents of the message body was: body: {'task_id': 'e54849dc-3bc7-409c-afab-5c69b3310d99', 'status': 'SUCCESS', 'result': 7, 'traceback': None, 'children': []} (120b)
{content_type:'application/json' content_encoding:'utf-8'
delivery_info:{'exchange': '', 'routing_key': 'read'} headers={}}
Code snippet:
from kombu import Queue, Exchange
from celery import Celery
celery = Celery('tasks', broker="redis://localhost:6379/0", backend="rpc://")
CELERY_DEFAULT_QUEUE = 'default'
CELERY_DEFAULT_EXCHANGE = 'default'
CELERY_DEFAULT_EXCHANGE_TYPE = 'direct'
CELERY_DEFAULT_ROUTING_KEY = 'default'
celery.conf.update(
CELERY_ROUTES={
"celery_worker.celery_worker.add": {
"queue": "add",
"routing_key": "add"
},
"celery_worker.celery_worker.read": {
"queue": "read",
"routing_key": "read"
}
},
CELERY_QUEUES = (
Queue(CELERY_DEFAULT_QUEUE, Exchange(CELERY_DEFAULT_EXCHANGE),
routing_key=CELERY_DEFAULT_ROUTING_KEY),
Queue("add", Exchange(CELERY_DEFAULT_EXCHANGE),
routing_key="add"),
Queue("read", Exchange(CELERY_DEFAULT_EXCHANGE),
routing_key="read"),
),
CELERY_CREATE_MISSING_QUEUES = True,
CELERYD_PREFETCH_MULTIPLIER = 1)
#celery.task(name='add',acks_late=True)
def add(x, y):
print(f"Order Complete!{x}, {y}")
return x + y
#celery.task(name='read',acks_late=True, queue="read", routing_key="read")
def read(data):
print("data")
print(f"Order Completedread!{data}")
return data
task_1 = add.apply_async((4,3), queue='add', routing_key="add", reply_to="read")
Help to resolve this issue
Related
I'm trying to grab information from server A in a few channels (where I have no permissions, only view and read) to then take that information and send it to my server B (which I own).
I've shared with you what I've done so far. Which all it does so far it sends me all the messages in all servers and dms to me.
I'm unable to filter out Server A and the few channels in it and to then send it to my server
import websocket
import json
import threading
import time
def send_json_request(ws,request):
ws.send(json.dumps(request))
def recieve_json_response(ws):
response = ws.recv()
if response:
return json.loads(response)
def heartbeat(interval, ws):
print('Search activated')
while True:
time.sleep(interval)
heartbeatJSON = {
"op": 1,
"d": "null"
}
send_json_request(ws, heartbeatJSON)
print("Looking for information.")
ws = websocket.WebSocket()
ws.connect("wss://gateway.discord.gg/?v=6&encording=json")
event = recieve_json_response(ws)
heartbeat_interval = event['d']['heartbeat_interval'] / 1000
threading._start_new_thread(heartbeat, (heartbeat_interval, ws))
token = "DISCORD_TOKEN"
payload = {
"op": 2,
"d": {
"token": token,
"properties": {
"$os": 'windows',
'$browser': 'chrome',
'$device': 'pc'
}
}
}
send_json_request(ws, payload)
while True:
event = recieve_json_response(ws)
try:
print(f"{event['d']['author']['username']}: {event['d']['content']}")
op_code = event('op')
if op_code == 11:
print('heartbeat received')
except:
pass
```
I am trying to solve this issue on which I don't understand fully why is not working.
I have 2 topics. TOPIC_Aon which I can send my messages and I receive them correctly. And once the message has been received, I would like to send it to another topic, TOPIC_B. So far I have been testing this code locally and everything worked just fine. But since when I started using an azure.function servicebus. The code start acting funny. And here is my code:
import logging
import azure.functions as func
import json
import boto3
from azure.keyvault.secrets import SecretClient
from azure.identity import DefaultAzureCredential
from azure.servicebus import ServiceBusClient, ServiceBusMessage
def main(message: func.ServiceBusMessage):
logging.info(message)
print(message)
#KeyVault Configuration
KeyVault_Url = f'url'
credential = DefaultAzureCredential()
client_keyvault = SecretClient(vault_url=KeyVault_Url, credential=credential)
# # Service Bus Connection string
CONNECTION_STR = client_keyvault.get_secret("CONN").value
# For receiving the feedback from campaigns
TOPIC_NAME_A = "TOPICA"
SUBSCRIPTION_NAME = "XXX"
# For sending feedback and results of sentiment analysis and language detection
TOPIC_NAME_B = "TOPICB"
comprehend = boto3.client(service_name='comprehend', region_name='eu-west-1', aws_access_key_id=client_keyvault.get_secret("ID").value, aws_secret_access_key=client_keyvault.get_secret("SECRET").value)
# This block will receiver the messages from the service bus listed above.
# Please mind, once the message get received and printed (json format) that event will be destroyed from the portal service bus.
servicebus_client = ServiceBusClient.from_connection_string(conn_str=CONNECTION_STR)
with servicebus_client:
receiver = servicebus_client.get_subscription_receiver(
topic_name=TOPIC_NAME_A,
subscription_name=SUBSCRIPTION_NAME
)
with receiver:
received_msgs = receiver.receive_messages(max_message_count=10, max_wait_time=60)
output_global = {}
for msg in received_msgs:
message1 = str(msg)
res = json.loads(message1)
# extracting the text from the message from service bus
text = res['Text']
#passing the text to comprehend
result_json= json.dumps(comprehend.detect_sentiment(Text=text, LanguageCode='en'), sort_keys=True, indent=4)
result = json.loads(result_json) # converting json to python dictionary
print(result)
# logging.info("Result from comprehend" , result)
#extracting the sentiment value
sentiment = result["Sentiment"]
#extracting the sentiment score
if sentiment == "POSITIVE":
value = round(result["SentimentScore"]["Positive"] * 100,2)
elif sentiment == "NEGATIVE":
value = round(result["SentimentScore"]["Negative"] * 100,2)
elif sentiment == "NEUTRAL":
value = round(result["SentimentScore"]["Neutral"] * 100,2)
elif sentiment == "MIXED":
value = round(result["SentimentScore"]["Mixed"] * 100,2)
# To detect the language of the feedback, the text received from service bus is passed to the function below
lang_result=json.dumps(comprehend.detect_dominant_language(Text = text), sort_keys=True, indent=4)
#converting languages detection results into a dictionary
lang_result_json=json.loads(lang_result)
#Formatting the score from the results
for line in lang_result_json["Languages"]:
line['Score'] = round(line['Score']* 100, 2)
#storing the output of sentiment analysis, language detection and ids in a dictionary and converting it to JSON
output = {
'XXX': res['XXX'],
'XXX Id': res['XXX'],
'XXX': res['XXX'],
'XXX': res['XXX'],
'XXX': res['XXX'],
'Sentiment': sentiment,
'Value': value,
'Languages': lang_result_json['Languages']
}
# logging.info("Message Body: " + output)
output_json = json.dumps(output, ensure_ascii=False)
#-------------------------------------------------------------------------------------------------------
# Sending the processed output (output_json) in json format to another service bus
def send_output(sender):
message2 = ServiceBusMessage(
output_json,
content_type="XXX", #setting the content type so that the service bus can route it.
ApplicationProperties={b'tenantcode':msg.ApplicationProperties[b'tenantcode']} #setting the tenant code
)
sender.send_messages(message2)
servicebus_client = servicebus_client.from_connection_string(conn_str=CONNECTION_STR, logging_enable=True)
with servicebus_client:
sender = servicebus_client.get_topic_sender(topic_name=TOPIC_NAME_B)
with sender:
send_output(sender)
this is my host.json
{
"version": "2.0",
"extensions": {
"serviceBus": {
"messageHandlerOptions": {
"autoComplete": true
}
}
},
"logging": {
"applicationInsights": {
"samplingSettings": {
"isEnabled": true,
"excludedTypes": "Request"
}
}
},
"extensionBundle": {
"id": "Microsoft.Azure.Functions.ExtensionBundle",
"version": "[2.*, 3.0.0)"
}
}
and this is my function.json
{
"scriptFile": "outthinkServiceBus.py",
"entryPoint": "main",
"bindings": [
{
"name": "message",
"type": "serviceBusTrigger",
"direction": "in",
"topicName": "XXX",
"subscriptionName": "XXX",
"connection": "XXX"
}
]
}
Inside the received I have a for loop msg on which I would like to loop over all the messages inside the topic, and one by one send them to the topicB.
Everything works fine as it is, and in the output of azure function, I can see this message
2021-10-15 15:23:45.124
Message receiver b'receiver-link-' state changed from <MessageReceiverState.Open: 3> to <MessageReceiverState.Closing: 4> on connection: b'SBReceiver-'
Information
2021-10-15 15:23:45.552
Shutting down connection b'SBReceiver-'.
So the processing gets until the receiver, but the function sender, never get triggered.
If I remove the for loop, the code works just fine. I am able to see the sender triggering and completing successfully.
Any help to understand where is the mistake and what I am doing wrong?
Thank you so much for any help you can provide me with. Andplease if you need more info just ask
UPDATE:
I have tried to indent the send_out function outside the for loop, in this case the function sent_out triggers but the azure function Servicebus fails as the output_json is out of scope. So far the only thing I could figure out is that for some reason, the function defined in the loop, never fires.
I don't know a lot about programming but have been working with raspberry Pi for a few years. I wanted to use Alexa in the Pi and run scripts to turn on and off the GPIOS. After trying some tutorials I got to the point where I connected AWS Lambda with the Alexa skill. The problem is that I get an error when testing the skill.
"Skill response was marked as failure
Luces
Request Identifier: amzn1.echo-api.request.49687858-4c4f-482f-b82d-dd0ffedc9841
The target Lambda application returned a failure response"
I check the log on the AWS cloud and this is what I got.
'intent': KeyError
Traceback (most recent call last):
File "/var/task/lambda_function.py", line 40, in lambda_handler
intent_name = event['request']['intent']['name']
KeyError: 'intent
I don't have a clue of what to do. I adapted the code from a hackster.io tutorial. My Lambda code with python 2.7 is:
import boto3
access_key =
access_secret =
region =
queue_url =
def build_speechlet_response(title, output, reprompt_text, should_end_session):
return {
'outputSpeech': {
'type': 'PlainText',
'text': output
},
'card': {
'type': 'Simple',
'title': "SessionSpeechlet - " + title,
'content': "SessionSpeechlet - " + output
},
'reprompt': {
'outputSpeech': {
'type': 'PlainText',
'text': reprompt_text
}
},
'shouldEndSession': should_end_session
}
def build_response(session_attributes, speechlet_response):
return {
'version': '1.0',
'sessionAttributes': session_attributes,
'response': speechlet_response
}
def post_message(client, message_body, url):
response = client.send_message(QueueUrl = url, MessageBody= message_body)
def lambda_handler(event, context):
client = boto3.client('sqs', aws_access_key_id = access_key, aws_secret_access_key = access_secret, region_name = region)
intent_name = event['request']['intent']['name']
if intent_name == "LightOn":
post_message(client, 'on', queue_url)
message = "on"
elif intent_name == "LightOff":
post_message(client, 'off', queue_url)
message = "off"
else:
message = "Unknown"
speechlet = build_speechlet_response("Mirror Status", message, "", "true")
return build_response({}, speechlet)
I just want to turn the led on and then off but I don't know if I need all that code. If there is a simple way to code this in lamba please tell me.
Thank you very much in advance for the help!
There are several different request types, the "standard ones" being: CanFulfillIntentRequest, LaunchRequest, IntentRequest, and SessionEndedRequest.
You first open the skill, it'll be a LaunchRequest that's sent - which does not contain the intent parameter. That's likely why you're getting the KeyError.
A good approach is to check the request type before you try to process it.
if event['request']['type'] == "LaunchRequest":
print("I'm a launch request.")
elif event['request']['type'] == "IntentRequest":
print("I'm an intent request.")
The various request types and the parameters they can hold can be found in the Alexa documentation.
I have written a python script to get instance information over email with cron setup and populate metrics as well. With the following code i can see all the logs in cloudwatch logs console. However "dimension" never gets created under cloudwatch events section and not triggering any mail as well.
import boto3
import json
import logging
from datetime import datetime
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def post_metric(example_namespace, example_dimension_name, example_metric_name, example_dimension_value, example_metric_value):
cw_client = boto3.client("cloudwatch")
response = cw_client.put_metric_data(
Namespace=example_namespace,
MetricData=[
{
'MetricName': example_metric_name,
'Dimensions': [
{
'Name': example_dimension_name,
'Value': example_dimension_value
},
],
'Timestamp': datetime.datetime.now(),
'Value': int(example_metric_value)
},
]
)
def lambda_handler(event, context):
logger.info(event)
ec2_client = boto3.client("ec2")
sns_client = boto3.client("sns")
response = ec2_client.describe_instances(
Filters=[
{
'Name': 'tag:Name',
'Values': [
'jenkins-slave-*'
]
}
]
)['Reservations']
for reservation in response:
ec2_instances = reservation["Instances"]
for instance in ec2_instances:
myInstanceId = (instance['InstanceId'])
myInstanceState = (instance['State']['Name'])
myInstance = \
(
{
'InstanceId': (myInstanceId),
'InstanceState': (myInstanceState),
}
)
logger.info(json.dumps(myInstance)
post_metric("Jenkins", "ciname", "orphaned-slaves", myInstanceId, 1)
# Send message to SNS (Testing purpose)
SNS_TOPIC_ARN = 'arn:aws:sns:us-east-1:1234567890:example-instance-alarms'
sns_client.publish(
TopicArn = SNS_TOPIC_ARN,
Subject = 'Instance Info: ' + myInstanceId,
Message = 'Instance id: ' + myInstanceId
)
Can anyone please help if i am missing anything here. Thanks in advance.
You forgot to add required fields such as EvaluationPeriods, AlarmName and etc. to your put_metric_data according to documentation.
You can use this for an example.
I have the next problem, I'm using a process on Python that must wait X number of second, the process by itself work correctly, the problem is when I put it as task on celery.
When the worker try to do the time.sleep(X) on one task it pause all the tasks in the worker, for example:
I have the Worker A, it can do 4 tasks at the same time (q,w,e and r), the task r have a sleep of 1800 seconds, so the worker is doing the 4 tasks at the same time, but when the r task do the sleep the worker stop q, w and e too.
Is this normal? Do you know how I can solve this problem?
EDIT:
this is an example of celery.py with my beat and queues
app.conf.update(
CELERY_DEFAULT_QUEUE='default',
CELERY_QUEUES=(
Queue('search', routing_key='search.#'),
Queue('tests', routing_key='tests.#'),
Queue('default', routing_key='tasks.#'),
),
CELERY_DEFAULT_EXCHANGE='tasks',
CELERY_DEFAULT_EXCHANGE_TYPE='topic',
CELERY_DEFAULT_ROUTING_KEY='tasks.default',
CELERY_TASK_RESULT_EXPIRES=10,
CELERYD_TASK_SOFT_TIME_LIMIT=1800,
CELERY_ROUTES={
'tests.tasks.volume': {
'queue': 'tests',
'routing_key': 'tests.volume',
},
'tests.tasks.summary': {
'queue': 'tests',
'routing_key': 'tests.summary',
},
'search.tasks.links': {
'queue': 'search',
'routing_key': 'search.links',
},
'search.tasks.urls': {
'queue': 'search',
'routing_key': 'search.urls',
},
},
CELERYBEAT_SCHEDULE={
# heavy one
'each-hour-summary': {
'task': 'tests.tasks.summary',
'schedule': crontab(minute='0', hour='*/1'),
'args': (),
},
'each-hour-volume': {
'task': 'tests.tasks.volume',
'schedule': crontab(minute='0', hour='*/1'),
'args': (),
},
'links-each-cuarter': {
'task': 'search.tasks.links',
'schedule': crontab(minute='*/15'),
'args': (),
},
'urls-each-ten': {
'schedule': crontab(minute='*/10'),
'task': 'search.tasks.urls',
'args': (),
},
}
)
test.tasks.py
#app.task
def summary():
execute_sumary() #heavy task ~ 1 hour aprox
#app.task
def volume():
execute_volume() #no important ~ less than 5 minutes
and search.tasks.py
#app.task
def links():
free = search_links() #return boolean
if free:
process_links()
else:
time.sleep(1080) #<--------sleep with which I have problems
process_links()
#app.task
def urls():
execute_urls() #no important ~ less than 1 minute
Well, I have 2 workers, A for the queue search and B for tests and defaul.
The problem is with A, when it take the task "links" and it execute the time.sleep() it stop the other tasks that the worker is doing.
Because the worker B is working correctly I thinks the problem is the time.sleep() function.
If you only have one process/thread, call to sleep() will block it. This means that no other task will run...
You set CELERYD_TASK_SOFT_TIME_LIMIT=1800 but your sleep is 1080.
Only one or two task can work in this time interval.
Set CELERYD_TASK_SOFT_TIME_LIMIT > (1080+(work time))*3
Set more --concurency (> 4) when start celery worker.