How to send email notification using Cloud Pubsub - python

How to send email notification from Pub sub using python script when files are uploaded in google cloud compute engine.

The following examples illustrate the creation of notification channels with Python
def restore(project_name, backup_filename):
print(
"Loading alert policies and notification channels from {}.".format(
backup_filename
)
)
record = json.load(open(backup_filename, "rt"))
is_same_project = project_name == record["project_name"]
# Convert dicts to AlertPolicies.
policies_json = [json.dumps(policy) for policy in record["policies"]]
policies = [
monitoring_v3.AlertPolicy.from_json(policy_json)
for policy_json in policies_json
]
# Convert dicts to NotificationChannels
channels_json = [json.dumps(channel) for channel in record["channels"]]
channels = [
monitoring_v3.NotificationChannel.from_json(channel_json)
for channel_json in channels_json
]
# Restore the channels.
channel_client = monitoring_v3.NotificationChannelServiceClient()
channel_name_map = {}
for channel in channels:
updated = False
print("Updating channel", channel.display_name)
# This field is immutable and it is illegal to specify a
# non-default value (UNVERIFIED or VERIFIED) in the
# Create() or Update() operations.
channel.verification_status = (
monitoring_v3.NotificationChannel.VerificationStatus.VERIFICATION_STATUS_UNSPECIFIED
)
if is_same_project:
try:
channel_client.update_notification_channel(notification_channel=channel)
updated = True
except google.api_core.exceptions.NotFound:
pass # The channel was deleted. Create it below.
if not updated:
# The channel no longer exists. Recreate it.
old_name = channel.name
del channel.name
new_channel = channel_client.create_notification_channel(
name=project_name, notification_channel=channel
)
channel_name_map[old_name] = new_channel.name
# Restore the alerts
alert_client = monitoring_v3.AlertPolicyServiceClient()
for policy in policies:
print("Updating policy", policy.display_name)
# These two fields cannot be set directly, so clear them.
del policy.creation_record
del policy.mutation_record
# Update old channel names with new channel names.
for i, channel in enumerate(policy.notification_channels):
new_channel = channel_name_map.get(channel)
if new_channel:
policy.notification_channels[i] = new_channel
updated = False
if is_same_project:
try:
alert_client.update_alert_policy(alert_policy=policy)
updated = True
except google.api_core.exceptions.NotFound:
pass # The policy was deleted. Create it below.
except google.api_core.exceptions.InvalidArgument:
# Annoying that API throws InvalidArgument when the policy
# does not exist. Seems like it should throw NotFound.
pass # The policy was deleted. Create it below.
if not updated:
# The policy no longer exists. Recreate it.
old_name = policy.name
del policy.name
for condition in policy.conditions:
del condition.name
policy = alert_client.create_alert_policy(
name=project_name, alert_policy=policy
)
print("Updated", policy.name)
For more information I recommend you read the following documentation about Managing notification channels by API
This product or feature is covered by the Pre-GA Offerings Terms of the Google Cloud Terms of Service. Pre-GA products and features might have limited support, and changes to pre-GA products and features might not be compatible with other pre-GA versions. For more information, see the launch stage descriptions.
Additionally You want to check this information about Setting up a messaging app this describes how to use Pub/Sub and Cloud Functions to enable near real-time notifications for SendGrid Email API, Slack, and WebEx Teams

Related

Automatically subscribe to YouTube channels via Channel_ID

I'm fairly new to Python and I'm trying to migrate subscriptions from an older youtube account to a newer one that I'll use going forward. I pulled my subscriptions export from the old one and have around 470+ subs that I'll need to migrate over.
I found this article which absolutely works with automatically subscribing to a youtube channel via their channel_id but it seems like in the key value pair I can only run the .py script once per value.
I tried all sorts of googling to see how I can include multiple values in the key (channelId) but it always only auto subs to the last one in the dictionary.
Can someone please help show me what I'm missing? I feel like there has to be a way to add multiple channelId values in there key dictionary, right?!
Here's what my code looks like > screenshot
import os
import google.oauth2.credentials
import google_auth_oauthlib.flow
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
from google_auth_oauthlib.flow import InstalledAppFlow
# The CLIENT_SECRETS_FILE variable specifies
# the name of a file that contains
# client_id and client_secret.
CLIENT_SECRETS_FILE = "client_secret.json"
# This scope allows for full read/write access
# to the authenticated user's account and
# requires requests to use an SSL connection.
SCOPES = ['https://www.googleapis.com/auth/youtube.force-ssl']
API_SERVICE_NAME = 'youtube'
API_VERSION = 'v3'
def get_authenticated_service():
flow = InstalledAppFlow.from_client_secrets_file(CLIENT_SECRETS_FILE, SCOPES)
credentials = flow.run_console()
return build(API_SERVICE_NAME, API_VERSION, credentials = credentials)
def print_response(response):
print(response)
# Build a resource based on a list of
# properties given as key-value pairs.
# Leave properties with empty values out
# of the inserted resource.
def build_resource(properties):
resource = {}
for p in properties:
# Given a key like "snippet.title", split into
# "snippet" and "title", where "snippet" will be
# an object and "title" will be a property in that object.
prop_array = p.split('.')
ref = resource
for pa in range(0, len(prop_array)):
is_array = False
key = prop_array[pa]
# For properties that have array values, convert a name like
# "snippet.tags[]" to snippet.tags, and set a flag to handle
# the value as an array.
if key[-2:] == '[]':
key = key[0:len(key)-2:]
is_array = True
if pa == (len(prop_array) - 1):
# Leave properties without values
# out of inserted resource.
if properties[p]:
if is_array:
ref[key] = properties[p].split(', ')
else:
ref[key] = properties[p]
elif key not in ref:
# For example, the property is "snippet.title",
# but the resource does not yet have a "snippet"
# object. Create the snippet object here.
# Setting "ref = ref[key]" means that in the
# next time through the "for pa in range ..." loop,
# we will be setting a property in the
# resource's "snippet" object.
ref[key] = {}
ref = ref[key]
else:
# For example, the property is "snippet.description",
# and the resource already has a "snippet" object.
ref = ref[key]
return resource
# Remove keyword arguments that are not set
def remove_empty_kwargs(**kwargs):
good_kwargs = {}
if kwargs is not None:
for key, value in kwargs.items():
if value:
good_kwargs[key] = value
return good_kwargs
def subscriptions_insert(client, properties, **kwargs):
resource = build_resource(properties)
kwargs = remove_empty_kwargs(**kwargs)
response = client.subscriptions().insert(
body = resource,**kwargs).execute()
return print_response(response)
if __name__ == '__main__':
# When running locally, disable OAuthlib's
# HTTPs verification. When running in production
# * do not * leave this option enabled.
os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = '1'
client = get_authenticated_service()
subscriptions_insert(client,
{'snippet.resourceId.kind': 'youtube# channel',
'snippet.resourceId.channelId': 'UC09fL42MpkktKZWmWxYiDhw', 'UC0Q7Hlz75NYhYAuq6O0fqHw'},
part ='snippet')```
According to YouTube Data API v3 documentation (Subscriptions: insert endpoint and Subscriptions resource), it seems that you can only subscribe a channel at a time. As you have by default 10,000 of quota per day, except if you request extended quota, because Subscriptions: insert costs 50 of quota, then for 470+ subscriptions, you would need 3 days to proceed.
Otherwise you can proceed as follows, it seems that the first time I tried with ~500 channels I have been subscribed to ~290 of them but now I mostly only receive (when removing -H 'Accept-Encoding: gzip, deflate, br' from the cURL request):
{
"error": {
"code": 429,
"message": "Resource has been exhausted (e.g. check quota).",
"errors": [
{
"message": "Resource has been exhausted (e.g. check quota).",
"domain": "global",
"reason": "rateLimitExceeded"
}
],
"status": "RESOURCE_EXHAUSTED"
}
}
So it's an unsure method that you can try to deepen.
Ever wondered how to do that in a single request without using any quota?
Go on an ad hoc YouTube channel YOUR_CHANNEL that you want to subscribed to: https://www.youtube.com/channel/YOUR_CHANNEL_ID
Open the Network tab of your web-browser by using Ctrl + Shift + E (on Firefox) and filter XHR requests.
Now click on Subscribe.
You should see a request to subscribe, copy it as cURL (by right-clicking).
Change at the end
"channelIds":["YOUR_CHANNEL_ID"]
to:
"channelIds":["YOUR_CHANNEL_ID_0, YOUR_CHANNEL_ID_1, ..., YOUR_CHANNEL_ID_499"]
Where YOUR_CHANNEL_ID_0 is your YOUR_CHANNEL_ID and YOUR_CHANNEL_ID_1 the second channel you want to subscribe to and so forth.
Execute the modified cURL request in a terminal and that's it!
Note that this webpage contains a subscriptions count and this one contains all your subscriptions.
To get more than 249 different channels, I used:
import requests, json
channelIds = set()
pageToken = ''
API_KEY = 'AIzaSy...'
i = 0
while len(channelIds) < 250:
url = f'https://www.googleapis.com/youtube/v3/search?q={i}&type=channel&maxResults=50&key={API_KEY}'
if pageToken != '':
url += f"&pageToken={pageToken}"
content = requests.get(url).text
data = json.loads(content)
for item in data['items']:
channelIds.add(item['id']['channelId'])
print(len(channelIds))
if 'nextPageToken' in data:
pageToken = data['nextPageToken']
else:
break
i += 1
print('["' + '","'.join(channelIds) + '"]', len(channelIds))
As #Benjamin Loison has mentioned, there is a quota on the limit on the usage of the API. If you'd like to raise the limit, I think there is a form you can fill out to request more. However, I don't recommend you do so since the form is applicable mainly for a large application that will be used for a long time and involve a long process of human inspection on what you're trying to build (This is based on my personal experience, might not be entirely accurate).
My suggestion would be to use the script you have to print out a list of channel links, and you can click into each of them and press the subscribe button. 470-ish channels should not take you a long time.

Power Bi Embedding Paginated Reports

I am having difficulties setting my python application up to embed a paginated power BI report. I can get it working fine on a normal report/visual but I can not on a paginated report.
Does anyone have an example of python code they use to render the paginated report inside their application?
I have been trolling the web and documentation and I just must be missing some simple config.
'''def get_embed_params_for_multiple_reports(self, workspace_id, report_ids, additional_dataset_ids=None):
'''Get embed params for multiple reports for a single workspace
Args:
workspace_id (str): Workspace Id
report_ids (list): Report Ids
additional_dataset_ids (list, optional): Dataset Ids which are different than the ones bound to the reports. Defaults to None.
Returns:
EmbedConfig: Embed token and Embed URLs
'''
# Note: This method is an example and is not consumed in this sample app
dataset_ids = []
# To store multiple report info
reports = []
for report_id in report_ids:
report_url = f'https://api.powerbi.com/v1.0/myorg/groups/{workspace_id}/reports/{report_id}'
api_response = requests.get(report_url, headers=self.get_request_header())
if api_response.status_code != 200:
abort(api_response.status_code, description=f'Error while retrieving Embed URL\n{api_response.reason}:\t{api_response.text}\nRequestId:\t{api_response.headers.get("RequestId")}')
api_response = json.loads(api_response.text)
report_config = ReportConfig(api_response['id'], api_response['name'], api_response['embedUrl'])
reports.append(report_config.__dict__)
dataset_ids.append(api_response['datasetId'])
# Append additional dataset to the list to achieve dynamic binding later
if additional_dataset_ids is not None:
dataset_ids.extend(additional_dataset_ids)
embed_token = self.get_embed_token_for_multiple_reports_single_workspace(report_ids, dataset_ids, workspace_id)
embed_config = EmbedConfig(embed_token.tokenId, embed_token.token, embed_token.tokenExpiry, reports)
return json.dumps(embed_config.__dict__)'''
def get_embed_token_for_multiple_reports_multiple_workspaces(self, report_ids, dataset_ids, target_workspace_ids=None):
'''Get Embed token for multiple reports, multiple datasets, and optional target workspaces
Args:
report_ids (list): Report Ids
dataset_ids (list): Dataset Ids
target_workspace_ids (list, optional): Workspace Ids. Defaults to None.
Returns:
EmbedToken: Embed token
'''
# Note: This method is an example and is not consumed in this sample app
request_body = EmbedTokenRequestBody()
for dataset_id in dataset_ids:
request_body.datasets.append({'id': dataset_id})
for report_id in report_ids:
request_body.reports.append({'id': report_id})
if target_workspace_ids is not None:
for target_workspace_id in target_workspace_ids:
request_body.targetWorkspaces.append({'id': target_workspace_id})
# Generate Embed token for multiple workspaces, datasets, and reports. Refer https://aka.ms/MultiResourceEmbedToken
embed_token_api = 'https://api.powerbi.com/v1.0/myorg/GenerateToken'
api_response = requests.post(embed_token_api, data=json.dumps(request_body.__dict__), headers=self.get_request_header())
if api_response.status_code != 200:
abort(api_response.status_code, description=f'Error while retrieving Embed token\n{api_response.reason}:\t{api_response.text}\nRequestId:\t{api_response.headers.get("RequestId")}')
api_response = json.loads(api_response.text)
embed_token = EmbedToken(api_response['tokenId'], api_response['token'], api_response['expiration'])
return embed_token
As you have mentioned that the normal report and visuals are getting embedded, we can assume that the access part with embed URLs and the embed tokens is working as intended.
For Paginated reports, since you have not mentioned what configuration you have used for paginated reports, make sure that the embed config contains the IPaginatedReportSettings:
export interface IEmbedConfigurationBase {
...
settings?: IPaginatedReportSettings;
}
For embedding Paginated Reports, we need to enable the paginated report workload on your capacity and then assign the workspace containing the paginated report to it.
Note: This is only necessary for Embedded Gen1.
To enable the paginated report, Sign into Power BI > Admin portal > Capacity settings.
References:
https://learn.microsoft.com/power-bi/developer/embedded/embed-paginated-reports?tabs=customers#step-4---enable-paginated-reports-workload
https://learn.microsoft.com/javascript/api/powerbi/powerbi-models/ipaginatedreportsettings

cannot keep a stable session open with bloomberg blpapi Python

I recently encountered an issue that I have not been able to solve, despite calling the Bloomberg helpdesk and researching thoroughly the internet for similar cases.
In short, I am using the official Python blpapi from Bloomberg (https://github.com/msitt/blpapi-python) and now am experiencing some connectivity issue: I cannot leave a session opened.
Here is the code I am running: https://github.com/msitt/blpapi-python/blob/master/examples/SimpleHistoryExample.py
I simply added a "while True loop" and a "time.sleep" in it so that I can keep the session open and refresh my data every 30 seconds (this is my use case).
This use to run perfectly fine for days, however, since last Friday, I am now getting those log messages:
22FEB2021_08:54:18.870 29336:26880 WARN blpapi_subscriptionmanager.cpp:7437 blpapi.session.subscriptionmanager.{1} Could not find a service for serviceCode: 90.
22FEB2021_08:54:23.755 29336:26880 WARN blpapi_platformcontroller.cpp:377 blpapi.session.platformcontroller.{1} Connectivity lost, no connected endpoints.
22FEB2021_08:54:31.867 29336:26880 WARN blpapi_platformcontroller.cpp:344 blpapi.session.platformcontroller.{1} Connectivity restored.
22FEB2021_08:54:32.731 29336:26880 WARN blpapi_subscriptionmanager.cpp:7437 blpapi.session.subscriptionmanager.{1} Could not find a service for serviceCode: 90.
which goes on and on and on, along with those responses as well:
SessionConnectionDown = {
server = "localhost:8194"
}
ServiceDown = {
serviceName = "//blp/refdata"
servicePart = {
publishing = {
}
}
}
SessionConnectionUp = {
server = "localhost:8194"
encryptionStatus = "Clear"
compressionStatus = "Uncompressed"
}
ServiceUp = {
serviceName = "//blp/refdata"
servicePart = {
publishing = {
}
}
}
I still can pull the data from the bloomberg API: I see the historical data request results just fine. However:
Those service/session status messages messes up my code (I could still ignore them)
For some reason the connect/reconnect also messes my Excel BBG in the background and prevent me from using the BBG excel add-in at all! I now have those "#N/A Connection" outputs in all of my workbooks using bloomberg formulas.
screenshot from excel
Has anyone ever encountered such cases? If yes, please do not hesitate to share your experience, any help is more than appreciated!
Wishing you all a great day,
Adrien
I cannot comment yet so I will try to "answer" it. I use blpapi everyday and pull data all day. I am a BBG Anywhere user and never have any session issues. If you log in from a different device it will kill your session for the Python app. Once you log back in where the python app is running it will connect again.
Why do you have another while loop and sleep to keep the session alive? You should create a separate session and always call it to run your request. You should not need any "keep alive" code inside the request. Just don't call session.stop(). This is what I ended up doing after much trial and error from not knowing what to do.
I run my model using Excel, trying to move away from any substantial code in Excel and use it as a GUI until I can migrate to a custom GUI. I also have BDP functions in my Excel and they work fine.
import blpapi
# removed optparse because it is deprecated.
from argparse import ArgumentParser
SERVICES = {}
def parseCmdLine():
parser = ArgumentParser(description='Retrieve reference data.')
parser.add_argument('-a',
'--ip',
dest='host',
help='server name or IP (default: %(default)s)',
metavar='ipAddress',
default='localhost')
parser.add_argument('-p',
dest='port',
type=int,
help='server port (default: %(default)s)',
metavar='tcpPort',
default=8194)
args = parser.parse_args()
return args
def start_session():
"""Standard session for synchronous refdata requests. Upon creation
the obj is held in SERVICES['session'].
Returns:
obj: A session object.
"""
args = parseCmdLine()
# Fill SessionOptions
sessionOptions = blpapi.SessionOptions()
sessionOptions.setServerHost(args.host)
sessionOptions.setServerPort(args.port)
# Create a Session
session = blpapi.Session(sessionOptions)
# Start a Session
session.start()
SERVICES['session'] = session
return SERVICES['session']
def get_refDataService():
"""Create a refDataService object for functions to use. Upon creation
it is held in SERVICES['refDataService'].
Returns:
obj: refDataService object.
"""
# return the session and request because requests need session.send()
global SERVICES
if 'session' not in SERVICES:
start_session()
session = SERVICES['session']
# Check for SERVICES['refdata'] not needed because start_session()
# is called by this function and start_session() is never called on its own.
session.openService("//blp/refdata")
refDataService = session.getService("//blp/refdata")
SERVICES['refDataService'] = refDataService
session = SERVICES['session']
refDataService = SERVICES['refDataService']
print('get_refDataService called. Curious when this is called.')
return session, refDataService
# sample override request
def ytw_oride_muni(cusip_dict):
"""Requests the Price To Worst for a dict of cusips and Yield To Worst values.
The dict must be {'cusip' : YTW}. Overrides apply to each request, so this
function is designed for different overrides for each cusip. Although they could
all be the same as that is not a restriction.
Returns: Single level nested dict
{'cusip Muni': {'ID_BB_SEC_NUM_DES': 'val', 'PX_ASK': 'val1', 'YLD_CNV_ASK': 'val2'}}
"""
session, refDataService = get_refDataService()
fields1 = ["ID_BB_SEC_NUM_DES", "PX_ASK", "YLD_CNV_ASK"]
try:
values_dict = {}
# For different overrides you must send separate requests.
# This loops and creates separate messages.
for cusip, value in cusip_dict.items():
request = refDataService.createRequest("ReferenceDataRequest")
# append security to request
request.getElement("securities").appendValue(f"{cusip} Muni")
# append fields to request
request.getElement("fields").appendValue(fields1[0])
request.getElement("fields").appendValue(fields1[1])
request.getElement("fields").appendValue(fields1[2])
# add overrides
overrides = request.getElement("overrides")
override1 = overrides.appendElement()
override1.setElement("fieldId", "YLD_CNV_ASK")
override1.setElement("value", f"{value}")
session.sendRequest(request)
# Process received events
while(True):
# We provide timeout to give the chance to Ctrl+C handling:
ev = session.nextEvent(500)
# below msg.messageType == ReferenceDataResponse
for msg in ev:
if msg.messageType() == "ReferenceDataResponse":
if msg.hasElement("responseError"):
print(msg)
if msg.hasElement("securityData"):
data = msg.getElement("securityData")
num_cusips = data.numValues()
for i in range(num_cusips):
sec = data.getValue(i).getElement("security").getValue()
try:
des = data.getValue(i).getElement("fieldData").getElement("ID_BB_SEC_NUM_DES").getValue()
except:
des = None
try:
ptw = data.getValue(i).getElement("fieldData").getElement("PX_ASK").getValue()
except:
ptw = None
try:
ytw = data.getValue(i).getElement("fieldData").getElement("YLD_CNV_ASK").getValue()
except:
ytw = None
values = {'des': des, 'ptw': ptw, 'ytw': ytw}
# Response completly received, so we could exit
if ev.eventType() == blpapi.Event.RESPONSE:
values_dict.update({sec: values})
break
finally:
# Stop the session
# session.stop()
return values_dict

How do I get the currently registered channel in notify_run, Python

I am using notify_run module to create a new channel, followed by using it to push notifications. I am successful in crating channel from within the code and using it as well. But, I would like to get the details of the channel registered/currently in use. I have tried to store the output from command prompt, and use that later.
notify = Notify()
k = str(notify.register())
l = str(k.split('\n')[0])
channel_link = l.split(': ')[1]
I would like to know is there is any other method to do so. The documentation does not specify much.
It looks like you're parsing the response for the endpoint URL, if that's the case you can do the following to get it..
notify = Notify()
channel = notify.register()
endpoint = channel.endpoint
print(endpoint) # https://notify.run/<channel_code>
You can also get the channel_page, which is a page that has the channel info, QR code, and link to subscribe.
channel_page = channel.channel_page
print(channel_page) # https://notify.run/c/<channel_page_code>

How to delete only the first occurrence of a recurrent event using google calendar API

I'm using Google API v3 for Google Calendar. I'm able to add a recurrent event to Google Calendar but when trying to delete only the first occurrence of the series, the whole series gets deleted.
I'm using Python and authomatic_inst library and I'm passing the event_id as follows:
baseID_yyyymmddThhmmssZ
The delete request acts as if I only passed the baseID but I've checked the whole ID (ID with date and time) is passed.
I've used this reference (https://developers.google.com/google-apps/calendar/v3/reference/events) to retrieve the instances of the series and make sure the id of the first event is correct, but although deleting it from the try it! tool works, it does not work for me from Python.
def delete_gcal_event(google_event_id):
# google_event_id in the form baseID_yyyymmddThhmmssZ
# Deserialize the user's API credentials from the session storage
credentials = authomatic_inst.credentials(session['google_credentials'])
if not credentials.valid:
credentials.refresh()
session['google_credentials'] = credentials.serialize()
response = authomatic_inst.access(
credentials,
api_url('calendars/{}/events/{}', 'primary', google_event_id),
method='DELETE',
headers=JSON_HEADERS)
if (response.status == 204):
return dict(success = True,
message = "Event deleted successfully",
status = response.status)
else:
return dict(success = False,
message = "Error when deleting event from google",
status = response.status)
Did anyone came across this issue before?
Many thanks,
Javier

Categories