iot edge direct method handler in python - python

I have created a module for a Bacnet scan and it will respond with a list of devices and its address as a result. But I'm having trouble implementing a direct method handler in python. When i first tried implementing it myself i got this error. Which could mean I didn't successfully register the direct method callback. I have some references but it was from C# and azure docs is not helping me figure out the right method to register the callback. for IoTHubModuleClient there's a on_method_request_received and a receive_method_request. appreciate any help!
def iothub_client_scan_run():
try:
iot_client = iothub_client_init()
bacnet_scan_listener_thread = threading.Thread(target=device_method_listener, args=(iot_client,))
bacnet_scan_listener_thread.daemon = True
bacnet_scan_listener_thread.start()
while True:
time.sleep(1000)
def device_method_listener(iot_client):
while True:
# Receive the direct method request
method_request = iot_client.receive_method_request()
print (
"\nMethod callback called with:\nmethodName = {method_name}\npayload = {payload}".format(
method_name=method_request.name,
payload=method_request.payload
)
)
if method_request.name == "runBacnetScan":
response = bacnet_scan_device(method_request)
else:
response_payload = {"Response": "Direct method {} not defined".format(method_request.name)}
response_status = 404
# Send a method response indicating the method request was resolved
print('Sending method response')
iot_client.send_method_response(response)
print('Message sent!')
Edit:
Here is my route config

I was able to resolve my issue or at least find the root cause and it was my network configuration under the createOptions. It seems like there's an issue when I'm trying to do NetworkMode: host and connects to the IotModuleClient.connect_from_edge_environment via connect with connection string. I'm still trying to tweak the connection configuration but at least i know its not on the code.

async def method_request_handler(module_client):
while True:
method_request = await module_client.receive_method_request()
print (
"\nMethod callback called with:\nmethodName = {method_name}\npayload = {payload}".format(
method_name=method_request.name,
payload=method_request.payload
)
)
if method_request.name == "method1":
payload = {"result": True, "data": "some data"} # set response payload
status = 200 # set return status code
print("executed method1")
elif method_request.name == "method2":
payload = {"result": True, "data": 1234} # set response payload
status = 200 # set return status code
print("executed method2")
else:
payload = {"result": False, "data": "unknown method"} # set response payload
status = 400 # set return status code
print("executed unknown method: " + method_request.name)
# Send the response
method_response = MethodResponse.create_from_method_request(method_request, status, payload)
await module_client.send_method_response(method_response)
print('Message sent!')
def stdin_listener():
while True:
try:
selection = input("Press Q to quit\n")
if selection == "Q" or selection == "q":
print("Quitting...")
break
except:
time.sleep(10)
# Schedule task for C2D Listener
listeners = asyncio.gather(input1_listener(module_client), twin_patch_listener(module_client), method_request_handler(module_client))

Related

Tornado gives error Cannot write() after finish()

I am using the Tornado chat demo example from here: https://github.com/tornadoweb/tornado/tree/master/demos/chat
and just altering it very slightly.
The code change is just a small class called Connections and a bit in the MessageNewHandler(). All I am doing is just saving a reference to self and trying to write(message) to a previous client.
But when I go to save on this line conns.conns[0].write(message) I get this error message:
[E 220107 23:18:38 web:1789] Uncaught exception POST /a/message/new (::1)
HTTPServerRequest(protocol='http', host='localhost:8888', method='POST', uri='/a/message/new', version='HTTP/1.1', remote_ip='::1')
Traceback (most recent call last):
File "/home/joe/dev/tornado/lib/python3.8/site-packages/tornado/web.py", line 1702, in _execute
result = method(*self.path_args, **self.path_kwargs)
File "server.py", line 89, in post
MessageNewHandler.clients[0].write(message)
File "/home/joe/dev/tornado/lib/python3.8/site-packages/tornado/web.py", line 833, in write
raise RuntimeError("Cannot write() after finish()")
RuntimeError: Cannot write() after finish()
[E 220107 23:18:38 web:2239] 500 POST /a/message/new (::1) 5.98ms
Here is the code:
import asyncio
import tornado.escape
import tornado.ioloop
import tornado.locks
import tornado.web
import os.path
import uuid
from tornado.options import define, options, parse_command_line
define("port", default=8888, help="run on the given port", type=int)
define("debug", default=True, help="run in debug mode")
class Connections(object):
def __init__(self):
self.conns = []
def add_connection(self, conn_self):
self.conns.append(conn_self)
def conns(self):
return self.conns
conns = Connections()
class MessageBuffer(object):
def __init__(self):
# cond is notified whenever the message cache is updated
self.cond = tornado.locks.Condition()
self.cache = []
self.cache_size = 200
def get_messages_since(self, cursor):
"""Returns a list of messages newer than the given cursor.
``cursor`` should be the ``id`` of the last message received.
"""
results = []
for msg in reversed(self.cache):
if msg["id"] == cursor:
break
results.append(msg)
results.reverse()
return results
def add_message(self, message):
self.cache.append(message)
if len(self.cache) > self.cache_size:
self.cache = self.cache[-self.cache_size :]
self.cond.notify_all()
# Making this a non-singleton is left as an exercise for the reader.
global_message_buffer = MessageBuffer()
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.render("index.html", messages=global_message_buffer.cache)
class MessageNewHandler(tornado.web.RequestHandler):
"""Post a new message to the chat room."""
def post(self):
message = {"id": str(uuid.uuid4()), "body": self.get_argument("body")}
# render_string() returns a byte string, which is not supported
# in json, so we must convert it to a character string.
message["html"] = tornado.escape.to_unicode(
self.render_string("message.html", message=message)
)
conns.add_connection(self)
if (len(conns.conns)>2):
conns.conns[0].write(message)
self.finish()
class MessageUpdatesHandler(tornado.web.RequestHandler):
"""Long-polling request for new messages.
Waits until new messages are available before returning anything.
"""
async def post(self):
cursor = self.get_argument("cursor", None)
messages = global_message_buffer.get_messages_since(cursor)
while not messages:
# Save the Future returned here so we can cancel it in
# on_connection_close.
self.wait_future = global_message_buffer.cond.wait()
try:
await self.wait_future
except asyncio.CancelledError:
return
messages = global_message_buffer.get_messages_since(cursor)
if self.request.connection.stream.closed():
return
self.write(dict(messages=messages))
def on_connection_close(self):
self.wait_future.cancel()
def main():
parse_command_line()
app = tornado.web.Application(
[
(r"/", MainHandler),
(r"/a/message/new", MessageNewHandler),
(r"/a/message/updates", MessageUpdatesHandler),
],
cookie_secret="__TODO:_GENERATE_YOUR_OWN_RANDOM_VALUE_HERE__",
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "static"),
xsrf_cookies=True,
debug=options.debug,
)
app.listen(options.port)
tornado.ioloop.IOLoop.current().start()
if __name__ == "__main__":
main()
You're writing to an already closed connection, that's why you're seeing the error.
If you want to write to a previously connected client, you've keep that connection open.
However, this - conns.add_connection(self) - doesn't make sense to track regular http connections.
You should consider using websockets if you want to keep previous connections open and track them.
Update: Here's how you can keep a connection open. If I understand correctly, you want to send a message from current client to the previous client.
1. Using tornado.locks.Condition():
import tornado.locks
class MessageNewHandler(tornado.web.RequestHandler):
"""Post a new message to the chat room."""
clients = []
condition = tornado.locks.Condition()
async def post(self):
message = {"id": str(uuid.uuid4()), "body": self.get_argument("body")}
# render_string() returns a byte string, which is not supported
# in json, so we must convert it to a character string.
message["html"] = tornado.escape.to_unicode(
self.render_string("message.html", message=message)
)
MessageNewHandler.clients.append(self)
if len(MessageNewHandler.clients) < 2:
# less than 2 clients
# wait until notified
await MessageNewHandler.condition.wait()
else:
# at least 2 clients
# write to previous client's response
MessageNewHandler.clients[0].finish(message)
# notify the first waiting client
# so it can send the response
MessageNewHandler.condition.notify()
# Note: since you've finished previous client's response
# you should also remove it from clients list
# since you can't use that connection again
2. Using tornado.concurrent.Future():
import tornado.concurrent
class MessageNewHandler(tornado.web.RequestHandler):
"""Post a new message to the chat room."""
waiters = []
async def post(self):
message = {"id": str(uuid.uuid4()), "body": self.get_argument("body")}
# render_string() returns a byte string, which is not supported
# in json, so we must convert it to a character string.
message["html"] = tornado.escape.to_unicode(
self.render_string("message.html", message=message)
)
future = tornado.concurrent.Future() # create a future
# instead of a saving the reference to the client,
# save the future
MessageNewHandler.waiters.append(future)
if len(MessageNewHandler.waiters) < 2:
# less than 2 clients
# wait for next client's message
msg_from_next_client = await future
# the future will resolve when the next client
# sets a result on it
# then python will execute the following code
self.finish(msg_from_next_client)
# Note: since you've finished this connection
# you should remove this future from the waiters list
# since you can't reuse this connection again
else:
# at least 2 clients
# set the current client's message
# as a result on previous client's future
previous_client_future = MessageNewHandler.waiters[0]
if not previous_client_future.done():
# only set a result if you haven't set it already
# otherwise you'll get an error
previous_client_future.set_result(message)
3: A more practical example using tornado.concurrent.Future():
import tornado.concurrent
class Queue:
"""We'll keep the future related code in this class.
This will allow us to present a cleaner, more intuitive usage api.
"""
waiters = []
#classmethod
def get_message_from_next_client(cls):
future = tornado.concurrent.Future()
cls.waiters.append(future)
return future
#classmethod
def send_message_to_prev_client(cls, message):
previous_client_future = cls.waiters[0]
if not previous_client_future.done():
previous_client_future.set_result(message)
class MessageNewHandler(tornado.web.RequestHandler):
"""Post a new message to the chat room."""
async def post(self):
message = {"id": str(uuid.uuid4()), "body": self.get_argument("body")}
message["html"] = tornado.escape.to_unicode(
self.render_string("message.html", message=message)
)
if len(Queue.waiters) < 2:
msg_from_next_client = await Queue.get_message_from_next_client()
self.finish(msg_from_next_client)
else:
Queue.send_message_to_prev_client(message)
I had a look at the RequestHandler code on https://github.com/tornadoweb/tornado/blob/master/tornado/web.py
I got rid of the Connection class and changed the MessageNewHandler to this..
class MessageNewHandler(tornado.web.RequestHandler):
"""Post a new message to the chat room."""
clients =[]
def post(self):
self._auto_finish =False
message = {"id": str(uuid.uuid4()), "body": self.get_argument("body")}
# render_string() returns a byte string, which is not supported
# in json, so we must convert it to a character string.
message["html"] = tornado.escape.to_unicode(
self.render_string("message.html", message=message)
)
MessageNewHandler.clients.append(self)
if len(MessageNewHandler.clients)>1:
MessageNewHandler.clients[0].finish(message)
So the 2 key things that made it work were self._auto_finish =False
and MessageNewHandler.clients[0].finish(message)

Issue with saving OAuth request token using Tweepy as part of Facebook Messenger bot

I am developing a Facebook messenger bot using Flask and want to utilize the Twitter API for a feature of the bot. I am therefore using Tweepy to simplify the process. However, I am unable to get OAuth working in my program. I believe the source of the issue is that the request token is not saving or being received properly, because when I do auth.get_access_token I get an error - either "OAuth has no object request_token" or "string indices must be integers" depending on how I'm saving the OAuth handler instance. Sometimes, it also fails to get the request_token and doesn't send the link back to the user. I tried to check this by printing out the request token in my oauth_verification() function and it was blank. I've been stuck on this for a few hours, and any help would be greatly appreciated. My code is as follows:
PAT = '[pat here]'
auth = tweepy.OAuthHandler('[key here]', '[secret here]')
auth_req_token = ''
#app.route('/', methods=['GET'])
def handle_verification():
print("Handling Verification.")
if request.args.get('hub.verify_token', '') == '[verification token]':
print("Verification successful!")
return request.args.get('hub.challenge', '')
else:
print("Verification failed!")
return 'Error, wrong validation token'
#app.route('/', methods=['POST'])
def handle_messages():
print("Handling Messages")
payload = request.get_data()
print(payload)
for sender, message in messaging_events(payload):
print("Incoming from %s: %s" % (sender, message))
parse_message(PAT, sender, message)
return "ok"
def parse_message(PAT, sender, message):
original_message = message
message = str(message.decode('unicode_escape'))
message = message.replace("?", "")
if message.isdigit():
oauth_verification(PAT, sender, original_message.decode("utf-8"))
else:
split_msg = message.split(" ")
print(split_msg)
try:
platform = split_msg[split_msg.index("followers") - 1]
does_location = split_msg.index("does") + 1
have_location = split_msg.index("have")
name = split_msg[does_location:have_location]
name = " ".join(name)
print("Name: " +name + " Platform: " + platform)
init_oauth(name, PAT, sender)
except ValueError:
reply_error(PAT, sender)
def init_oauth(name, token, recipient):
try:
redirect_url = auth.get_authorization_url()
auth_req_token = auth.request_token
r = requests.post("https://graph.facebook.com/v2.6/me/messages",
params={"access_token": token},
data=json.dumps({
"recipient": {"id": recipient},
"message": {"text": "Please login to Twitter, and reply with your verification code " + redirect_url}
}),
headers={'Content-type': 'application/json'})
except tweepy.TweepError:
print('Error! Failed to get request token.')
def oauth_verification(token, recipient, verifier):
auth.request_token = auth_req_token
try:
auth.get_access_token(verifier) # issue is here - I am able to get authentication link, but not able to get access token
api = tweepy.API(auth)
r = requests.post("https://graph.facebook.com/v2.6/me/messages",
params={"access_token": token},
data=json.dumps({
"recipient": {"id": recipient},
"message": {"text": "Successfully authenticated Twitter!"}
}),
headers={'Content-type': 'application/json'})
except tweepy.TweepError:
print('Error! Failed to get access token.')
As auth_req_token is a global variable, you need to use the global keyword to change its value in init_oauth:
def init_oauth(name, token, recipient):
global auth_req_token
try:
redirect_url = auth.get_authorization_url()
auth_req_token = auth.request_token
# ...

Flask Application gets blocked on multithreading

I am trying to use the threading and storing the result on a threads using a single session. And this is working fine most of the time except a few scenarios where my whole application gets, and I am not able to figure out the reason for that.
My application is getting blocked on notification.save() in __filter_notifications_by_status_and_request_type method. notification.save() is saving the data in to the DB.
I am not able to figure out is this a DB issue or a threading or locking issue.
I am using the flask app, which I am hitting using passenger_wsgi via apache. After my application gets blocked, my server stop taking the further request.
DB python library used = SqlAlchemy
class Inference:
##
# #brief initializer of the Inference Handler
#
# #param kwargs keywords Arguments
#
# #return None
def __init__(self, **kwargs):
""" Calling the inference from here and get the result """
if((kwargs.has_key('IRISRequest'))):
self.iris_request = kwargs['IRISRequest']
self.racerx_inference_config = Config.get_racerx_inference_config()
self.thread_lock = threading.Lock()
##
# #brief Call the Infernce
#
# #return Inference Object
def get_inference_object(self):
log.info("get_inference_object is called")
inference_map = {}
inference_map['inference'] = {}
if self.iris_request.system == "athena":
url_to_notification_map = Config.get_url_to_notification_map()
for notification_id, urls in url_to_notification_map.iteritems():
inference_map['inference'][notification_id] = any(url in string.lower(self.iris_request.url) for url in urls)
title_to_notification_map = Config.get_title_to_notification_map()
if self.iris_request.context.has_key('title') :
for notification_id, titles in title_to_notification_map.iteritems():
if not inference_map['inference'].has_key(notification_id) or inference_map['inference'][notification_id] == False:
inference_map['inference'][notification_id] = any(title in string.lower(self.iris_request.context['title']) for title in titles)
return inference_map
##
# #brief
#
# #return the list of the notification required from the reference
def get_notification_name_list(self):
inference_object = self.get_inference_object()
return [y for y in inference_object['inference'] if inference_object['inference'][y] == True]
##
# #brief collect notifications from the various sources
#
# #return notification objects
def get_notifications(self):
if(len(self.iris_request.notification_name_list) > 0):
self.notification_name_list = self.iris_request.notification_name_list # List of Notifciation List is provided by the client
else:
self.notification_name_list = self.get_notification_name_list() # Get Notification Name List from the Inference
string_translations = {}
for notification_name in self.notification_name_list:
config = Config.get_config(notification_name)
nt = {}
nt['message'] = self.__get_message_from_template(config.message_map)
nt['subject'] = self.__get_message_from_template(config.subject_map)
nt['short_message'] = self.__get_message_from_template(config.short_message_map)
nt['impact_summary'] = self.__get_message_from_template(config.impact_summary_map)
action_string_map = {}
for h in config.action_summary_map:
if h.has_key('string_id'):
action_string_map[h['string_id']] = self.__get_message_from_template(h)
nt['action_summary_list'] = action_string_map
help_strings_map = {}
for h in config.help_content:
if h.has_key('string_id'):
help_strings_map[h['string_id']] = self.__get_message_from_template(h)
nt['help_content_strings'] = help_strings_map
string_translations[notification_name] = nt
notifications_map = {}
log.info("starting the thread pool for getting the notifications data")
with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
future_to_notification_name = dict((executor.submit(self.fetch_notifications_by_notification_name, notification_name, string_translations), notification_name)
for notification_name in self.notification_name_list)
log.info("end of threadpool")
log.info("start processing the data produced by the thread pool")
for future in concurrent.futures.as_completed(future_to_notification_name):
notification_name = future_to_notification_name[future]
if future.exception() is not None:
raise Exception("Error occured while fetching the data for notification: "+notification_name+", error: "+str(future.exception()))
if len(future.result()) > 0:
notifications_map[notification_name] = future.result()
log.info("end processing the data produced by the thread pool")
self.iris_request.session.commit()
log.info("Commited the DB session for the notifications")
return notifications_map
###
# #brief This function collect the notifications for the specified notification type, by making object model call
#
# #input notification_name : Type of the notification to be fetched
# #input string_translations : List of string translations
# #input notification_map : Map of notifications, collected notifications will be pushed to this map
def fetch_notifications_by_notification_name (self, notification_name, string_translations):
log.info("fetch_notifications_by_notification_name is called")
object_model = ObjectModel(IRISRequest = self.iris_request, NotificationName = notification_name, StringMap = string_translations[notification_name])
notifications = object_model.get_iris_notification_objects()
filtered_notifications = self.__filter_notifications_by_status_and_request_type(notifications)
if len(filtered_notifications) > 0:
return filtered_notifications
else:
return []
###
# #brief This function filter the notification based on status, i.e. of notification is expired, snoozed or dismissed
# and also based on request type
#
# #input notifications: List of notifications
#
# #return Filtered notification list
def __filter_notifications_by_status_and_request_type(self, notifications):
log.info("__filter_notifications_by_status_and_request_type is called")
filtered_notifications = []
for notification in notifications:
keep_notification = True
# Extracting read status of notifications and storing new notifications
log.info("Acquiring the lock on thread, to save the data into DB")
self.thread_lock.acquire()
notification.save()
self.thread_lock.release()
log.info("Releasing the lock after saving the data into DB")
# Filtering inactive notifications, i.e dismissed notifications
if notification.is_active == False:
keep_notification = False
# Filtering expired notifications, if validity = -1 then notification will never expire
if notification.validity != -1 and (datetime.date.today() - notification.creation_date).days > notification.validity:
keep_notification = False
# Filtering out the snoozed notifications
if notification.snooze_date != None and (datetime.datetime.today() - notification.snooze_date).days <= notification.snooze_duration:
keep_notification = False
# Filtering out unread notification when request type is FETCH_READ
if self.iris_request.notifcation_fetch_type == Constants.FETCH_TYPE_READ and notification.is_read == False:
keep_notification = False
# Filtering out read notification when request type is FETCH_UNREAD
if self.iris_request.notifcation_fetch_type == Constants.FETCH_TYPE_UNREAD and notification.is_read == True:
keep_notification = False
if keep_notification == True:
filtered_notifications.append(notification)
return filtered_notifications
I was using the lock in given manner
self.thread_lock.acquire()
notification.save()
self.thread_lock.release()
when notification.save() is throwing an exception, then system will unable to release the thread.
it could be easily fixed by proper error handling.
self.thread_lock.acquire()
try:
notification.save()
except Exception as e:
log.error("unable to store info in DB")
finally:
self.thread_lock.release()

Consolidation of query (task) in Django

I have the following task. This task can take a few seconds to complete.
How can make the task below make trips and run faster?
class SendMessage(Task):
name = "Sending SMS"
max_retries = 10
default_retry_delay = 3
def run(self, message_id, gateway_id=None, **kwargs):
logging.debug("About to send a message.")
# Because we don't always have control over transactions
# in our calling code, we will retry up to 10 times, every 3
# seconds, in order to try to allow for the commit to the database
# to finish. That gives the server 30 seconds to write all of
# the data to the database, and finish the view.
try:
message = Message.objects.get(pk=message_id)
except Exception as exc:
raise SendMessage.retry(exc=exc)
if not gateway_id:
if hasattr(message.billee, 'sms_gateway'):
gateway = message.billee.sms_gateway
else:
gateway = Gateway.objects.all()[0]
else:
gateway = Gateway.objects.get(pk=gateway_id)
# Check we have a credits to sent me message
account = Account.objects.get(user=message.sender)
# I'm getting the non-cathed version here, check performance!!!!!
if account._balance() >= message.length:
response = gateway._send(message)
if response.status == 'Sent':
# Take credit from users account.
transaction = Transaction(
account=account,
amount=- message.charge,
description="Debit: SMS Sent",
)
transaction.save()
message.billed = True
message.save()
else:
pass
logging.debug("Done sending message.")

Tornado testing async requests

I need an advice regards testing tornado app. For now I just playing with demo chat application, but it looks like real-life problem.
In the handler I have:
class MessageUpdatesHandler(BaseHandler):
#tornado.web.authenticated
#tornado.web.asynchronous
def post(self):
cursor = self.get_argument("cursor", None)
global_message_buffer.wait_for_messages(self.on_new_messages,
cursor=cursor)
def on_new_messages(self, messages):
# Closed client connection
if self.request.connection.stream.closed():
return
self.finish(dict(messages=messages))
class MessageBuffer(object):
def __init__(self):
....
def wait_for_messages(self, callback, cursor=None):
if cursor:
new_count = 0
for msg in reversed(self.cache):
if msg["id"] == cursor:
break
new_count += 1
if new_count:
callback(self.cache[-new_count:])
return
self.waiters.add(callback)
def cancel_wait(self, callback):
.....
def new_messages(self, messages):
logging.info("Sending new message to %r listeners", len(self.waiters))
for callback in self.waiters:
try:
callback(messages)
except:
logging.error("Error in waiter callback", exc_info=True)
self.waiters = set()
self.cache.extend(messages)
if len(self.cache) > self.cache_size:
self.cache = self.cache[-self.cache_size:]
As I metioned full source code is in torndado demos
In my test I have:
#wsgi_safe
class MessageUpdatesHandlerTest(LoginedUserHanldersTest):
Handler = MessageUpdatesHandler
def test_add_message(self):
from chatdemo import global_message_buffer
kwargs = dict(
method="POST",
body='',
)
future = self.http_client.fetch(self.get_url('/'), callback=self.stop, **kwargs)
message = {
"id": '123',
"from": "first_name",
"body": "hello",
"html": "html"
}
global_message_buffer.new_messages([message])
response = self.wait()
self.assertEqual(response.code, 200)
self.mox.VerifyAll()
What happens:
It creates a future object
It sends a hello message, in this moment no waiter is registered
in MessageBuffer so callback is not called
In wait starts IoLoop and makes, a post fetch and waiter becomes
registered in MessageBuffer
Callback is never called and my response remains empty, so
everything fails with
AssertionError: Async operation timed out
after 5 seconds
What I want it to do:
On post register itself as a waiter
Receive some messages
Return to me a 200 response
Thank you for your help

Categories