Consolidation of query (task) in Django - python

I have the following task. This task can take a few seconds to complete.
How can make the task below make trips and run faster?
class SendMessage(Task):
name = "Sending SMS"
max_retries = 10
default_retry_delay = 3
def run(self, message_id, gateway_id=None, **kwargs):
logging.debug("About to send a message.")
# Because we don't always have control over transactions
# in our calling code, we will retry up to 10 times, every 3
# seconds, in order to try to allow for the commit to the database
# to finish. That gives the server 30 seconds to write all of
# the data to the database, and finish the view.
try:
message = Message.objects.get(pk=message_id)
except Exception as exc:
raise SendMessage.retry(exc=exc)
if not gateway_id:
if hasattr(message.billee, 'sms_gateway'):
gateway = message.billee.sms_gateway
else:
gateway = Gateway.objects.all()[0]
else:
gateway = Gateway.objects.get(pk=gateway_id)
# Check we have a credits to sent me message
account = Account.objects.get(user=message.sender)
# I'm getting the non-cathed version here, check performance!!!!!
if account._balance() >= message.length:
response = gateway._send(message)
if response.status == 'Sent':
# Take credit from users account.
transaction = Transaction(
account=account,
amount=- message.charge,
description="Debit: SMS Sent",
)
transaction.save()
message.billed = True
message.save()
else:
pass
logging.debug("Done sending message.")

Related

How to get actual slack username instead of user id

I have pulled data from a private slack channel, using conversation history, and it pulls the userid instead of username, how can I change the code to pull the user name so I can identify who each user is? Code below
CHANNEL = ""
MESSAGES_PER_PAGE = 200
MAX_MESSAGES = 1000
SLACK_TOKEN = ""
client = slack_sdk.WebClient(token=SLACK_TOKEN)
# get first page
page = 1
print("Retrieving page {}".format(page))
response = client.conversations_history(
channel=CHANNEL,
limit=MESSAGES_PER_PAGE,
)
assert response["ok"]
messages_all = response['messages']
# get additional pages if below max message and if they are any
while len(messages_all) + MESSAGES_PER_PAGE <= MAX_MESSAGES and response['has_more']:
page += 1
print("Retrieving page {}".format(page))
sleep(1) # need to wait 1 sec before next call due to rate limits
response = client.conversations_history(
channel=CHANNEL,
limit=MESSAGES_PER_PAGE,
cursor=response['response_metadata']['next_cursor']
)
assert response["ok"]
messages = response['messages']
messages_all = messages_all + messages
It isn't possible to change what is returned from the conversations.history method. If you'd like to convert user IDs to usernames, you'll need to either:
Call the users.info method and retrieve the username from the response.
or
Call the users.list method and iterate through the list and create a local copy (or store in a database) and then have your code look it up.

How to separate subscription and common checkout webhook in stripe?

I have made a webhook (flask) for stripe.
My app has both one time payment and subscription.
I would like to know how can I detect if it's normal checkout or subscription?
This is how I am separating now, but I think it's not recommended way.
I am checking stripe_subscription_id and if exist, I considered it as subscription.
(charge.succeeded is called for subscription succeeded also)
#api.route('/stripe/webhook', methods=['GET', 'POST'])
def webhook():
event = None
payload = request.data
try:
event = json.loads(payload)
except Exception as e:
print('Webhook error while parsing basic request.' + str(e))
if not event:
return jsonify({'status': 'failed'}), 400
# one time payout
if event['type'] == 'charge.succeeded':
event_intent = event['data']['object']
payment_intent = event_intent['payment_intent']
# payment
if event['type'] == 'checkout.session.completed':
checkout_session = event['data']['object']
stripe_subscription_id = checkout_session['subscription']
# customer_email = checkout_session['customer_email']
if stripe_subscription_id:
# Create a new subscription and mark it as paid this month.
customer_subscription_mark_paid(checkout_session['id'], stripe_subscription_id)
elif event['type'] == 'invoice.paid':
invoice = event['data']['object']
stripe_subscription_id = invoice['subscription']
if stripe_subscription_id:
# Check if this is the first invoice or a later invoice in the
# subscription lifecycle.
first_invoice = invoice['billing_reason'] == 'subscription_create'
# You already handle marking the first invoice as paid in the
# `checkout.session.completed` handler.
# Only use this for the 2nd invoice and later, so it doesn't conflict.
if not first_invoice:
# Mark the subscription as paid.
customer_subscription_mark_paid(None, stripe_subscription_id)
elif event['type'] == 'invoice.payment_failed':
invoice = event['data']['object']
stripe_subscription_id = invoice['subscription']
if stripe_subscription_id:
customer_subscription_mark_past_due(None, stripe_subscription_id)
Checking to see if the subscription field is filled is fine, but to be extra sure you can check the contents of the mode field.

iot edge direct method handler in python

I have created a module for a Bacnet scan and it will respond with a list of devices and its address as a result. But I'm having trouble implementing a direct method handler in python. When i first tried implementing it myself i got this error. Which could mean I didn't successfully register the direct method callback. I have some references but it was from C# and azure docs is not helping me figure out the right method to register the callback. for IoTHubModuleClient there's a on_method_request_received and a receive_method_request. appreciate any help!
def iothub_client_scan_run():
try:
iot_client = iothub_client_init()
bacnet_scan_listener_thread = threading.Thread(target=device_method_listener, args=(iot_client,))
bacnet_scan_listener_thread.daemon = True
bacnet_scan_listener_thread.start()
while True:
time.sleep(1000)
def device_method_listener(iot_client):
while True:
# Receive the direct method request
method_request = iot_client.receive_method_request()
print (
"\nMethod callback called with:\nmethodName = {method_name}\npayload = {payload}".format(
method_name=method_request.name,
payload=method_request.payload
)
)
if method_request.name == "runBacnetScan":
response = bacnet_scan_device(method_request)
else:
response_payload = {"Response": "Direct method {} not defined".format(method_request.name)}
response_status = 404
# Send a method response indicating the method request was resolved
print('Sending method response')
iot_client.send_method_response(response)
print('Message sent!')
Edit:
Here is my route config
I was able to resolve my issue or at least find the root cause and it was my network configuration under the createOptions. It seems like there's an issue when I'm trying to do NetworkMode: host and connects to the IotModuleClient.connect_from_edge_environment via connect with connection string. I'm still trying to tweak the connection configuration but at least i know its not on the code.
async def method_request_handler(module_client):
while True:
method_request = await module_client.receive_method_request()
print (
"\nMethod callback called with:\nmethodName = {method_name}\npayload = {payload}".format(
method_name=method_request.name,
payload=method_request.payload
)
)
if method_request.name == "method1":
payload = {"result": True, "data": "some data"} # set response payload
status = 200 # set return status code
print("executed method1")
elif method_request.name == "method2":
payload = {"result": True, "data": 1234} # set response payload
status = 200 # set return status code
print("executed method2")
else:
payload = {"result": False, "data": "unknown method"} # set response payload
status = 400 # set return status code
print("executed unknown method: " + method_request.name)
# Send the response
method_response = MethodResponse.create_from_method_request(method_request, status, payload)
await module_client.send_method_response(method_response)
print('Message sent!')
def stdin_listener():
while True:
try:
selection = input("Press Q to quit\n")
if selection == "Q" or selection == "q":
print("Quitting...")
break
except:
time.sleep(10)
# Schedule task for C2D Listener
listeners = asyncio.gather(input1_listener(module_client), twin_patch_listener(module_client), method_request_handler(module_client))

Twiter api.GetFriends sometimes doesn't work

I want to build a complex network of twitter followers.
I'm using the function api.GetFriends :
def get_friend_list_by_user (user, api) :
friends_lists = api.GetFriends(repr(user.id))
return friends_lists
The problem is that for the same twitter users, sometimes it works and sometimes doesn't.
When I'm debugging it,
the code is dead at that part on the api.py:
if enforce_auth:
if not self.__auth:
raise TwitterError("The twitter.Api instance must be authenticated.")
if url and self.sleep_on_rate_limit:
limit = self.CheckRateLimit(url)
if limit.remaining == 0:
try:
stime = max(int(limit.reset - time.time()) + 10, 0)
logger.debug('Rate limited requesting [%s], sleeping for [%s]', url, stime)
time.sleep(stime)
except ValueError:
pass
if not data:
data = {}
The stime value is 443.

Flask Application gets blocked on multithreading

I am trying to use the threading and storing the result on a threads using a single session. And this is working fine most of the time except a few scenarios where my whole application gets, and I am not able to figure out the reason for that.
My application is getting blocked on notification.save() in __filter_notifications_by_status_and_request_type method. notification.save() is saving the data in to the DB.
I am not able to figure out is this a DB issue or a threading or locking issue.
I am using the flask app, which I am hitting using passenger_wsgi via apache. After my application gets blocked, my server stop taking the further request.
DB python library used = SqlAlchemy
class Inference:
##
# #brief initializer of the Inference Handler
#
# #param kwargs keywords Arguments
#
# #return None
def __init__(self, **kwargs):
""" Calling the inference from here and get the result """
if((kwargs.has_key('IRISRequest'))):
self.iris_request = kwargs['IRISRequest']
self.racerx_inference_config = Config.get_racerx_inference_config()
self.thread_lock = threading.Lock()
##
# #brief Call the Infernce
#
# #return Inference Object
def get_inference_object(self):
log.info("get_inference_object is called")
inference_map = {}
inference_map['inference'] = {}
if self.iris_request.system == "athena":
url_to_notification_map = Config.get_url_to_notification_map()
for notification_id, urls in url_to_notification_map.iteritems():
inference_map['inference'][notification_id] = any(url in string.lower(self.iris_request.url) for url in urls)
title_to_notification_map = Config.get_title_to_notification_map()
if self.iris_request.context.has_key('title') :
for notification_id, titles in title_to_notification_map.iteritems():
if not inference_map['inference'].has_key(notification_id) or inference_map['inference'][notification_id] == False:
inference_map['inference'][notification_id] = any(title in string.lower(self.iris_request.context['title']) for title in titles)
return inference_map
##
# #brief
#
# #return the list of the notification required from the reference
def get_notification_name_list(self):
inference_object = self.get_inference_object()
return [y for y in inference_object['inference'] if inference_object['inference'][y] == True]
##
# #brief collect notifications from the various sources
#
# #return notification objects
def get_notifications(self):
if(len(self.iris_request.notification_name_list) > 0):
self.notification_name_list = self.iris_request.notification_name_list # List of Notifciation List is provided by the client
else:
self.notification_name_list = self.get_notification_name_list() # Get Notification Name List from the Inference
string_translations = {}
for notification_name in self.notification_name_list:
config = Config.get_config(notification_name)
nt = {}
nt['message'] = self.__get_message_from_template(config.message_map)
nt['subject'] = self.__get_message_from_template(config.subject_map)
nt['short_message'] = self.__get_message_from_template(config.short_message_map)
nt['impact_summary'] = self.__get_message_from_template(config.impact_summary_map)
action_string_map = {}
for h in config.action_summary_map:
if h.has_key('string_id'):
action_string_map[h['string_id']] = self.__get_message_from_template(h)
nt['action_summary_list'] = action_string_map
help_strings_map = {}
for h in config.help_content:
if h.has_key('string_id'):
help_strings_map[h['string_id']] = self.__get_message_from_template(h)
nt['help_content_strings'] = help_strings_map
string_translations[notification_name] = nt
notifications_map = {}
log.info("starting the thread pool for getting the notifications data")
with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
future_to_notification_name = dict((executor.submit(self.fetch_notifications_by_notification_name, notification_name, string_translations), notification_name)
for notification_name in self.notification_name_list)
log.info("end of threadpool")
log.info("start processing the data produced by the thread pool")
for future in concurrent.futures.as_completed(future_to_notification_name):
notification_name = future_to_notification_name[future]
if future.exception() is not None:
raise Exception("Error occured while fetching the data for notification: "+notification_name+", error: "+str(future.exception()))
if len(future.result()) > 0:
notifications_map[notification_name] = future.result()
log.info("end processing the data produced by the thread pool")
self.iris_request.session.commit()
log.info("Commited the DB session for the notifications")
return notifications_map
###
# #brief This function collect the notifications for the specified notification type, by making object model call
#
# #input notification_name : Type of the notification to be fetched
# #input string_translations : List of string translations
# #input notification_map : Map of notifications, collected notifications will be pushed to this map
def fetch_notifications_by_notification_name (self, notification_name, string_translations):
log.info("fetch_notifications_by_notification_name is called")
object_model = ObjectModel(IRISRequest = self.iris_request, NotificationName = notification_name, StringMap = string_translations[notification_name])
notifications = object_model.get_iris_notification_objects()
filtered_notifications = self.__filter_notifications_by_status_and_request_type(notifications)
if len(filtered_notifications) > 0:
return filtered_notifications
else:
return []
###
# #brief This function filter the notification based on status, i.e. of notification is expired, snoozed or dismissed
# and also based on request type
#
# #input notifications: List of notifications
#
# #return Filtered notification list
def __filter_notifications_by_status_and_request_type(self, notifications):
log.info("__filter_notifications_by_status_and_request_type is called")
filtered_notifications = []
for notification in notifications:
keep_notification = True
# Extracting read status of notifications and storing new notifications
log.info("Acquiring the lock on thread, to save the data into DB")
self.thread_lock.acquire()
notification.save()
self.thread_lock.release()
log.info("Releasing the lock after saving the data into DB")
# Filtering inactive notifications, i.e dismissed notifications
if notification.is_active == False:
keep_notification = False
# Filtering expired notifications, if validity = -1 then notification will never expire
if notification.validity != -1 and (datetime.date.today() - notification.creation_date).days > notification.validity:
keep_notification = False
# Filtering out the snoozed notifications
if notification.snooze_date != None and (datetime.datetime.today() - notification.snooze_date).days <= notification.snooze_duration:
keep_notification = False
# Filtering out unread notification when request type is FETCH_READ
if self.iris_request.notifcation_fetch_type == Constants.FETCH_TYPE_READ and notification.is_read == False:
keep_notification = False
# Filtering out read notification when request type is FETCH_UNREAD
if self.iris_request.notifcation_fetch_type == Constants.FETCH_TYPE_UNREAD and notification.is_read == True:
keep_notification = False
if keep_notification == True:
filtered_notifications.append(notification)
return filtered_notifications
I was using the lock in given manner
self.thread_lock.acquire()
notification.save()
self.thread_lock.release()
when notification.save() is throwing an exception, then system will unable to release the thread.
it could be easily fixed by proper error handling.
self.thread_lock.acquire()
try:
notification.save()
except Exception as e:
log.error("unable to store info in DB")
finally:
self.thread_lock.release()

Categories