Async call and not wait while using pubnub with django - python

This is a module from PubNub that I'm using to publish a message to a topic from an API. By design I've kept the PubNub object singleton.
class Pubnub:
instance = None
#classmethod
def get(cls):
if cls.instance is None:
cls.instance = cls()
return cls.instance
def __init__(self):
with open('config/config.yaml', 'r') as stream:
try:
conf = yaml.load(stream)
pnconfig = PNConfiguration()
pnconfig.subscribe_key = conf['pubnub']['publish_key']
pnconfig.publish_key = conf['pubnub']['subscribe_key']
pnconfig.ssl = False
self.pubnub = PubNub(pnconfig)
except yaml.YAMLError as e:
logger.error(str(e))
def publish(self, channel):
try:
envelope = self.pubnub.publish().channel(channel).message({
'message': True
}).sync()
print("publish timetoken: %d" % envelope.result.timetoken)
except PubNubException as e:
logger.error(str(e))
This is how I'm calling it,
class SendCommunityTextMessage(views.APIView):
def post(self, request, **kwargs):
try:
client_id = request.GET['client_id']
client_secret = request.GET['client_secret']
if Authenticator.authenticate_client(client_id, client_secret):
try:
//do something
try:
//do something more
pubbub = Pubnub.get()
pubbub.publish(receiver.hex_code)
return Response({"Success": CommunityTextMessageSerializer(message).data},
status=status.HTTP_200_OK)
except KeyError as e:
return Response({"Failure": str(e)}, status=status.HTTP_400_BAD_REQUEST)
except (User.DoesNotExist, CommunityRoom.DoesNotExist) as e:
return Response({"Failure": str(e)}, status=status.HTTP_404_NOT_FOUND)
else:
return Response({"Failure": "Invalid client"}, status=status.HTTP_403_FORBIDDEN)
except KeyError as _:
return Response({"Failure": "Probably a typo, read the docs to use this API."},
status=status.HTTP_400_BAD_REQUEST)
The issue is this slows down the API by minutes. How can I call the two lines,
pubbub = Pubnub.get()
pubbub.publish(receiver.hex_code)
asynchronously and return out of the view without waiting for the call to finish.
Thanks in anticipation.

Related

Change "Error: response status" details in FastAPI / OpenAPI

I implemented some Custom Exceptions. And now I want to display the Exception name at the red box. Where can I set this value?
class CustomExceptionHandler(Exception):
def __init__(self, exception):
self.message = exception.message
self.status_code_number = exception.status_code_number
#app.exception_handler(CustomExceptionHandler)
async def data_processing_error_handler(request: Request, exc: CustomExceptionHandler):
return JSONResponse(
status_code=exc.status_code_number,
content=jsonable_encoder({exc.message}),
)

Want to generate databricks notebook URL to send alerts

def _get_dbutils():
try:
import IPython
ip_shell = IPython.get_ipython()
if ip_shell is None:
raise _NoDbutilsError
return ip_shell.ns_table["user_global"]["dbutils"]
except ImportError:
raise _NoDbutilsError
except KeyError:
raise _NoDbutilsError
class _NoDbutilsError(Exception):
pass
def _get_java_dbutils():
dbutils = _get_dbutils()
return dbutils.notebook.entry_point.getDbutils()
def _get_command_context():
return _get_java_dbutils().notebook().getContext()
def _get_extra_context(context_key):
return _get_command_context().extraContext().get(context_key).get()
def _get_context_tag(context_tag_key):
tag_opt = _get_command_context().tags().get(context_tag_key)
if tag_opt.isDefined():
return tag_opt.get()
else:
return None
def acl_path_of_acl_root():
try:
return _get_command_context().aclPathOfAclRoot().get()
except Exception:
return _get_extra_context("aclPathOfAclRoot")
def _get_property_from_spark_context(key):
try:
from pyspark import TaskContext # pylint: disable=import-error
task_context = TaskContext.get()
if task_context:
return task_context.getLocalProperty(key)
except Exception:
return None
def is_databricks_default_tracking_uri(tracking_uri):
return tracking_uri.lower().strip() == "databricks"
def is_in_databricks_notebook():
if _get_property_from_spark_context("spark.databricks.notebook.id") is not None:
return True
try:
return acl_path_of_acl_root().startswith("/workspace")
except Exception:
return False
def is_in_databricks_job():
try:
return get_job_id() is not None and get_job_run_id() is not None
except Exception:
return False
def is_in_databricks_runtime():
try:
# pylint: disable=unused-import,import-error,no-name-in-module,unused-variable
import pyspark.databricks
return True
except ModuleNotFoundError:
return False
def is_dbfs_fuse_available():
with open(os.devnull, "w") as devnull_stderr, open(os.devnull, "w") as devnull_stdout:
try:
return (
subprocess.call(
["mountpoint", "/dbfs"], stderr=devnull_stderr, stdout=devnull_stdout
)
== 0
)
except Exception:
return False
def is_in_cluster():
try:
spark_session = _get_active_spark_session()
return (
spark_session is not None
and spark_session.conf.get("spark.databricks.clusterUsageTags.clusterId") is not None
)
except Exception:
return False
def get_notebook_id():
"""Should only be called if is_in_databricks_notebook is true"""
notebook_id = _get_property_from_spark_context("spark.databricks.notebook.id")
if notebook_id is not None:
return notebook_id
acl_path = acl_path_of_acl_root()
if acl_path.startswith("/workspace"):
return acl_path.split("/")[-1]
return None
def get_notebook_path():
"""Should only be called if is_in_databricks_notebook is true"""
path = _get_property_from_spark_context("spark.databricks.notebook.path")
if path is not None:
return path
try:
return _get_command_context().notebookPath().get()
except Exception:
return _get_extra_context("notebook_path")
def get_databricks_runtime():
if is_in_databricks_runtime():
spark_session = _get_active_spark_session()
if spark_session is not None:
return spark_session.conf.get(
"spark.databricks.clusterUsageTags.sparkVersion", default=None
)
return None
def get_cluster_id():
spark_session = _get_active_spark_session()
if spark_session is None:
return None
return spark_session.conf.get("spark.databricks.clusterUsageTags.clusterId")
def get_job_group_id():
try:
dbutils = _get_dbutils()
job_group_id = dbutils.entry_point.getJobGroupId()
if job_group_id is not None:
return job_group_id
except Exception:
return None
def get_job_id():
try:
return _get_command_context().jobId().get()
except Exception:
return _get_context_tag("jobId")
def get_job_run_id():
try:
return _get_command_context().idInJob().get()
except Exception:
return _get_context_tag("idInJob")
def get_job_type():
"""Should only be called if is_in_databricks_job is true"""
try:
return _get_command_context().jobTaskType().get()
except Exception:
return _get_context_tag("jobTaskType")
def get_command_run_id():
try:
return _get_command_context().commandRunId().get()
except Exception:
# Older runtimes may not have the commandRunId available
return None
def get_webapp_url():
"""Should only be called if is_in_databricks_notebook or is_in_databricks_jobs is true"""
url = _get_property_from_spark_context("spark.databricks.api.url")
if url is not None:
return url
try:
return _get_command_context().apiUrl().get()
except Exception:
return _get_extra_context("api_url")
def get_workspace_id():
try:
return _get_command_context().workspaceId().get()
except Exception:
return _get_context_tag("orgId")
def get_browser_hostname():
try:
return _get_command_context().browserHostName().get()
except Exception:
return _get_context_tag("browserHostName")
def get_workspace_info_from_dbutils():
dbutils = _get_dbutils()
if dbutils:
browser_hostname = get_browser_hostname()
workspace_host = "https://" + browser_hostname if browser_hostname else get_webapp_url()
workspace_id = get_workspace_id()
browserHash=_get_context_tag('browserHash')
return workspace_host+'/?o='+workspace_id+browserHash
return None, None
**This code helps me generate notebook URL when I call get_workspace_info_from_dbutils()
I get
https://odyssey-lakehouse-dev-bronze.cloud.databricks.com/?o=7808874896028593#notebook/3018684734636397/command/3018684734636399
But when I run the same notebook as a job in databricks, the browsrhostname and browserhash doesnt get generated
and I get something like this
'https://ireland.cloud.databricks.com/?o=7808874896028593#/api/2.0/workspace/get-notebook-snapshot' **
You are not getting browserhostname and browserhash probably because when it runs as a job, it doesn't have a notebook interface in the browser. Instead, the code just gets executed in the cluster (which is probably the url you are getting).
Since notebooks generally reside inside a workspace/databricks account, you can have the hostname and the workspace id as a constant. You can try getting the notebook information for a job using the Jobs API and then use the Workspace API to get the rest of the information.

Python3: Thread still alive after joined

This my code
def timeout(seconds_before_timeout):
def deco(func):
#functools.wraps(func)
def wrapper(*args, **kwargs):
res = [
Exception("function [%s] timeout [%s seconds] exceeded!"
% (func.__name__, seconds_before_timeout))
]
def new_func():
try:
res[0] = func(*args, **kwargs)
except Exception as ex:
res[0] = ex
thread = Thread(target=new_func)
thread.daemon = True
try:
thread.start()
thread.join(seconds_before_timeout)
except Exception as ex:
print("error starting thread")
raise ex
ret = res[0]
if isinstance(ret, BaseException):
raise ret
return ret
return wrapper
return deco
And timeout function i used for:
#timeout(2)
def listen_for_a_new_campaign(self):
"""
Start listening for new campaign in list_campaign queue
"""
while True:
try:
for method_frame, properties, body \
in self.obj_requester_channel.consume(LIST_CAMPAIGN_QUEUE):
body_dict = literal_eval(body.decode("utf-8"))
message_number = body_dict["Msg_Count"]
n_message = min(message_number, BATCH_SIZE)
identify(n_message)
a_request = {
"campaign_request": body_dict,
"campaign_ack" : method_frame.delivery_tag,
"n_message" : n_message
}
identify(a_request)
return a_request
# Acknowledge the message
n_requeued_messages = self.obj_requester_channel.cancel()
print("Requeued %i messages" % n_requeued_messages)
break
except pika.exceptions.ConnectionWrongStateError:
print("Create connection ...")
self.create_connection()
continue
except pika.exceptions.ChannelWrongStateError:
print("Create connection ...")
self.create_connection()
self.obj_requester_channel = self.obj_connection.channel()
self.obj_requester_channel.queue_declare(queue=LIST_CAMPAIGN_QUEUE)
self.obj_campaign_channel = self.obj_connection.channel()
continue
When I run my program, I checked all process by htop and below is result, all thread is alive:
I don't know what's wrong with that.
I run this code on my laptop everything was OK, but when I deploy them to EC2 instance I found that problems.
Help me!!

Exception object and missing __context__ and __cause__ attributes in IDE

The python docs https://docs.python.org/3/library/exceptions.html#built-in-exceptions
and this SO question mention the __cause__ and __context__ attributes on an exception object.
Python 3.x (beazley): __context__ vs __cause__ attributes in exception handling
However, when using a debugger to inspect an exception object (with the debuggers set to work on a raise) the exception object doesn't appear to have these attributes and just appears as a tuple.
(eg from my debugger - pycharm) https://imgur.com/a/63oW1fV
This occured with the debugger started based on the raise on the last line.
try:
response = requests.request(method, url, json=payload, headers=HEADERS)
except (requests.ConnectionError, requests.Timeout) as e:
logger.exception("Api unavailable")
raise errors.Unavailable('Api unavailable') from e
try:
response.raise_for_status()
except requests.exceptions.HTTPError as e:
logger.exception("Api HTTP error")
try:
raw_data = response.json()
except json.JSONDecodeError as e:
raise errors.ApiHTTPError(f'{response.status_code}, {response.text}',
text=response.text) from e
api_errors = raw_data.get('errors')
message = raw_data.get('message')
raise errors.ApiHTTPError(f'HTTP Error {response.status_code}, {message}, {api_errors}', text=response.text,
api_errors=api_errors) from e
errors.py
class SwitcherError(Exception):
pass
class Unavailable(SwitcherError):
pass
class ApiHTTPError(SwitcherError):
def __init__(self, message=None, text=None, api_errors=None):
self.text = text
self.message = message
self.errors = api_errors
def __str__(self):
return self.message
class ApiJsonError(SwitcherError):
def __init__(self, message=None, text=None):
self.text = text
self.message = message
def __str__(self):
return self.message
class ApiError(SwitcherError):
def __init__(self, message, status_code, data, status=None,):
self.message = message
self.status_code = status_code
self.data = data
self.status = status
def __str__(self):
return self.message
That __exception__ thing isn't an exception. It looks like PyCharm has taken sys.exc_info() and stuffed it into an __exception__ variable. The exception is the second tuple element, and that's where you should be looking for __cause__ and __context__.

Exception in thread StompReceiverThread-1

I'm having trouble with this error:
Exception in thread StompReceiverThread-1 (most likely raised during
interpreter shutdown):
That is no traceback at all.. just that.
Usualy everything works fine but rarely it happens and then the action does not conclude.
Any tips?
My code:
class Listener(stomp.ConnectionListener):
def __init__(self, conn, request):
self.conn = conn
self.request = request
def on_error(self, headers, message):
global WAITING_RESPONSE
print('received an error: ' + message)
WAITING_RESPONSE = False
def on_message(self, headers, message):
global WAITING_RESPONSE
try:
msg = json.loads(message)
if str(msg.get('transaction_id','')) == str(CURRENT_ID):
printDebugLine('Queue response:'+str(message))
manageQueueResponse(message,self.request)
WAITING_RESPONSE = False
self.conn.ack(headers['message-id'], '11')
except stomp.exception.ConnectFailedException:
print('Stomp error on message')
sys.exit(3)
except Exception as e:
print('ERROR: %s' % str(e))
sys.exit(3)
class Queue(object):
def __init__(self):
self.host = xx
self.port = xx
self.login = xx
self.passwd = xx
self.request = {}
self.start()
def start(self):
try:
self.conn = stomp.Connection(host_and_ports=[(self.host, self.port)])
self.conn.start()
self.conn.connect(self.login, self.passwd, wait=True)
self.conn.set_listener('xx', Listener(self.conn, self.request))
self.conn.subscribe(destination='xx', id='xx', ack='xx')
except stomp.exception.ConnectFailedException:
print('ERROR: unable to connect')
sys.exit(3)
except Exception as e:
print('ERROR: %s' % str(e))
sys.exit(3)
def send(self, data):
global CURRENT_ID
while WAITING_RESPONSE:
time.time(0.1)
try:
CURRENT_ID = str(uuid.uuid4())
data.update({'transaction_id': CURRENT_ID})
b = json.dumps(data)
self.request.update(data)
printDebugLine('Queue request:'+str(data))
self.conn.send(body=b, destination='xx')
timeout(data,self.request,29)
except stomp.exception.ConnectFailedException:
print('ERROR: unable to connect')
except Exception as e:
print('ERROR: %s' % str(e))
It looks like your main program is exiting, the interpreter is cleaning up things, but the stomp receiver thread was not shutdown first. The receiver thread goes to do something but basic modules are no longer available, so it gives an exception message, but cannot print a Traceback because that fuctionality is no longer available due to the program exiting.
Look at why the main program would be exiting.

Categories