label all sql instances in gcp that matches specific label - python

I am using the instances.list method from cloud sql admin-api to retrieve the information about instances in project. I am using the example code provided by google and it provides information about the instance. I am retrieving the name of the instance and then I need to update the instance labels if the label is matching the provided 'RC_PlatformCode'. This needs to be done to all sql instances in project matching the specific label. How can this be achieved as my code is not working. Or is there an easier way to do this in Python?
from config import Config, log, get_secret
from botocore.exceptions import ClientError
from typing import Dict, Iterable
from pprint import pprint
from googleapiclient import discovery
import json
import os
config = Config()
os.environ["GOOGLE_APPLICATION_CREDENTIALS"]="creds.json"
def updateSqlLabels(data, account):
log.info("-----")
log.info("updating Cloud Storage labels")
RC_PlatformCode = data['NewImage']['PlatformCode']['S']
platformcode_gcp = 'rc_platformcode'
tagKey = data['NewImage']['Key']['S']
tagValue = data['NewImage']['Value']['S']
char_to_replace = {
'#': '_at_',
'.': '_'
}
tagKey = tagKey.lower()
for key, value in char_to_replace.items():
tagValue = tagValue.replace(key, value)
service = discovery.build('sqladmin', 'v1beta4')
project = account # TODO: Update placeholder value.
request = service.instances().list(project=project)
while request is not None:
response = request.execute()
for database_instance in response['items']:
# TODO: Change code below to process each `database_instance` resource:
log.info("db_name = " + database_instance['name'])
update_tag = False
try:
labels = database_instance['settings']['userLabels']
log.info("tags -> " + str(labels))
except ClientError:
continue
for label in labels:
if labels["rc_platformcode"] == RC_PlatformCode:
log.info(
f"RC_PlatformCode [{RC_PlatformCode}] present for instance [{database_instance['name']}]")
update_tag = True
break
if update_tag:
create_tag = True
log.info("processing instance -> " + database_instance['name'])
log.info("setting tag Key -> " + tagKey)
log.info("setting tag Value -> " + tagValue)
for label in labels:
log.info("checking tag -> " + label)
labels[tagKey] = tagValue
instance_labels = labels
database_instance_body = {
'settings': {
'userLabels': instance_labels
}
}
log.info("project = " + project)
log.info("instance = " + database_instance['name'])
log.info("labels = " + str(database_instance_body))
request = service.instances().patch(project=project, instance=database_instance['name'], body=database_instance_body)
response = request.execute
break
I am receiving following error :
Response
{
"errorMessage": "'items'",
"errorType": "KeyError",
"stackTrace": [
" File \"/var/task/handler.py\", line 63, in handler\n updateSqlLabels(data, account)\n",
" File \"/var/task/cloudSql.py\", line 32, in updateSqlLabels\n for database_instance in response['items']:\n"
]
}
Any tips and help would be appreciated

Related

Create Lambda - DynamoDB Count Function

I am creating a SAM web app, with the backend being an API in front of a Python Lambda function with a DynamoDB table that maintains a count of the number of HTTP calls to the API. The API must also return this number. The yaml code itself loads normally. My problem is writing the Lambda function to iterate and return the count. Here is my code:
def lambda_handler(event, context):
dynamodb = boto3.resource("dynamodb")
ddbTableName = os.environ["databaseName"]
table = dynamodb.Table(ddbTableName)
# Update item in table or add if doesn't exist
ddbResponse = table.update_item(
Key={"id": "VisitorCount"},
UpdateExpression="SET count = count + :value",
ExpressionAttributeValues={":value": Decimal(context)},
ReturnValues="UPDATED_NEW",
)
# Format dynamodb response into variable
responseBody = json.dumps({"VisitorCount": ddbResponse["Attributes"]["count"]})
# Create api response object
apiResponse = {"isBase64Encoded": False, "statusCode": 200, "body": responseBody}
# Return api response object
return apiResponse
I can get VisitorCount to be a string, but not a number. I get this error: [ERROR] TypeError: lambda_handler() missing 1 required positional argument: 'cou    response = request_handler(event, lambda_context)le_event_request
What is going on?
[UPDATE] I found the original error, which was that the function was not properly received by the SAM app. Changing the name fixed this, and it is now being read. Now I have to troubleshoot the actual Python. New Code:
import json
import boto3
import os
dynamodb = boto3.resource("dynamodb")
ddbTableName = os.environ["databaseName"]
table = dynamodb.Table(ddbTableName)
Key = {"VisitorCount": { "N" : "0" }}
def handler(event, context):
# Update item in table or add if doesn't exist
ddbResponse = table.update_item(
UpdateExpression= "set VisitorCount = VisitorCount + :val",
ExpressionAttributeValues={":val": {"N":"1"}},
ReturnValues="UPDATED_NEW",
)
# Format dynamodb response into variable
responseBody = json.dumps({"VisitorCount": ddbResponse["Attributes"]["count"]})
# Create api response object
apiResponse = {"isBase64Encoded": False, "statusCode": 200,"body": responseBody}
# Return api response object
return apiResponse
I am getting a syntax error on Line 13, which is
UpdateExpression= "set VisitorCount = VisitorCount + :val",
But I can't tell where I am going wrong on this. It should update the DynamoDB table to increase the count by 1. Looking at the AWS guide it appears to be the correct syntax.
Not sure what the exact error is but ddbResponse will be like this:
ddbResponse = table.update_item(
Key={
'key1': aaa,
'key2': bbb
},
UpdateExpression= "set VisitorCount = VisitorCount + :val",
ExpressionAttributeValues={":val": Decimal(1)},
ReturnValues="UPDATED_NEW",
)
Specify item to be updated with Key (one item for one Lambda call)
Set Decimal(1) for ExpressionAttributeValues
https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GettingStarted.Python.03.html#GettingStarted.Python.03.04

Unable to import module - Lambda handler Error

When I am trying to run my python code in lambda passing the handler to the function.module getting the below error, any suggestions how i could resolve this?
the below file test_client_visitor is triggered to call the client_visitor and send an email to the clients accordingly, when i run thd python file test_client_visitor in my local i get the email triggered successfully but in lambda facing the issue.
file_name: test_client_visitor
function = __import__('client_visitor')
handler = function.scan_clients
class TestFunction(unittest.TestCase):
def test_function(self):
file = open('event.json', 'rb')
try:
ba = bytearray(file.read())
event = jsonpickle.decode(ba)
print('## EVENT')
print(jsonpickle.encode(event))
context = {'requestid': '1234'}
result = handler(event, context)
print(result)
self.assertTrue(result, 'Emails could not be sent!')
finally:
file.close()
file.close()
if __name__ == '__main__':
unittest.main()
file_name: client_visitor.py
import datetime
import boto3
from aws_ses import send_bulk_templated_email
# boto3.set_stream_logger('botocore', level='DEBUG')
from mongodb import get_mongo_db
def process_clients(clients, developers, clients_to_be_notified, days):
if not clients:
pass
check_date = datetime.datetime.now() + datetime.timedelta(days)
for client in clients:
client_id_ = client['client_id']
if 'developer_id' in client:
developers[client_id_] = client['developer_id']
else:
if 'secrets' in client:
secrets = client['secrets']
for secret in secrets:
if 'not_on_or_after' in secret and secret['not_on_or_after'] < check_date.timestamp():
clients_to_be_notified.append({'client_id': client_id_,
'expiration_date': datetime.datetime.fromtimestamp(
secret['not_on_or_after']).strftime('%m/%d/%Y')})
print("adding client to notify List", client_id_, ":", client['sort'])
def notify_clients(clients_to_be_notified, developers):
developer_id_list = []
for client_secret in clients_to_be_notified:
developer_id_list.append(developers[client_secret['client_id']])
if developer_id_list:
db = get_mongo_db()
if db:
users = list(db.users.find({'guid': {'$in': developer_id_list}}, {'email', 'guid'}))
need_to_send_email = False
for user in users:
for client_secret in clients_to_be_notified:
if developers[client_secret['client_id']] == user['guid']:
client_secret['email'] = user['email']
need_to_send_email = True
break
if need_to_send_email:
return send_bulk_templated_email(clients_to_be_notified)
else:
return False
return True
def scan_clients(event, context):
local = False
if 'local' in event:
local = event['local'] == 'True'
if local:
dynamodb = boto3.resource('dynamodb', endpoint_url="http://localhost:8000")
else:
dynamodb = boto3.resource('dynamodb')
days = 30
if 'days' in event:
days = int(event['days'])
print(f"Scanning Clients with {days} or less to secret expiration")
table = dynamodb.Table('****')
scan_kwargs = {
'ProjectionExpression': 'client_id, sort, developer_id, secrets, approved'
}
test = False
if 'test' in event:
test = event['test'] == 'True'
done = False
start_key = None
developers = {}
clients_to_be_notified = []
if test:
developers['idm-portal1'] = '***'
clients_to_be_notified = [{'client_id': 'idm-portal1', 'expiration_date': '04/17/2021'}]
while not done:
if start_key:
scan_kwargs['ExclusiveStartKey'] = start_key
response = table.scan(**scan_kwargs)
process_clients(response.get('Items', []), developers, clients_to_be_notified, days)
start_key = response.get('LastEvaluatedKey', None)
done = start_key is None
print("total developers ", len(developers), " total clients_to_be_notified ", len(clients_to_be_notified))
return notify_clients(clients_to_be_notified, developers)
if __name__ == '__main__':
scan_clients(event={'days': 30, 'local': False, 'test': True}, context=None)
Response
{
"errorMessage": "Unable to import module 'test_client_visitor': No module named 'test_client_visitor'",
"errorType": "Runtime.ImportModuleError",
"stackTrace": []
}
Your file must be named test_client_visitor.py. The way lambda runs the code is by trying to import the main file and call the handler function. See the AWS docs to set up a handler for Python.
The reason you didn't run into this issue locally is because I assume you are calling python directly on the command line — python test_client_visitor. When you import a module in Python, the file has to end in the .py extension.
Able to fix this issue with right packaging of the contents to zip, avoided the creation of extra folder with the below command.
Command:
cd folder; zip -r ../filename.zip *
Thankyou everyone for your inputs.

Aws lambda function failing - Python

This is a very weird problem that I have got stuck into, will really appreciate if someone could provide some direction. I am trying to access value for request_url from web_token.py module.
when I only try to run web_token.py separately over pycharm and print request_url it works fine and generates the url. I zip both these files and upload it to lambda function but when testing it I get an error "Unable to import module 'retrieve_accounts': No module named boto.sts". I even tried putting the code of web_token.py inside retrieve_accounts.py but getting the same error. I am sure I am missing something very basic it looks boto.sts is not being recognized while running the python script. Can somebody please give some guidance. Thank You!
retrieve_accounts.py
import boto3
import web_token
def get_account(event, context):
client = boto3.client('dynamodb')
NameID = "testname#org.com"
ManagerEmail = "test1#eorg.com"
response = client.scan(
TableName='Sandbox-Users',
ScanFilter={
'NameID': {
'AttributeValueList': [
{
'S': NameID,
},
],
'ComparisonOperator': 'EQ'
}
}
)
if response["Count"] > 0:
client = boto3.client('dynamodb')
response = client.get_item(
Key={
'NameID': {
'S': NameID,
},
'ManagerEmail': {
'S': ManagerEmail,
},
},
TableName='Sandbox-Users',
)
return web_token.request_url ----------->here
else:
response = client.put_item(
Item={
'NameID': {
'S': NameID,
},
'ManagerEmail': {
'S': ManagerEmail,
}
},
TableName='Sandbox-Users'
)
return "Create Account"
web_token.py
import httplib
import urllib, json
from boto.sts import STSConnection -------->Error here
sts_connection = STSConnection()
assumed_role_object = sts_connection.assume_role(
role_arn="arn:aws:iam::454084028794:role/AMPSandboxRole",
role_session_name="AssumeRoleSession"
)
# Step 3: Format resulting temporary credentials into JSON
json_string_with_temp_credentials = '{'
json_string_with_temp_credentials += '"sessionId":"' +
assumed_role_object.credentials.access_key + '",'
json_string_with_temp_credentials += '"sessionKey":"' +
assumed_role_object.credentials.secret_key + '",'
json_string_with_temp_credentials += '"sessionToken":"' +
assumed_role_object.credentials.session_token + '"'
json_string_with_temp_credentials += '}'
# Step 4. Make request to AWS federation endpoint to get sign-in token.
Construct the parameter string with the sign-in action request, a 12-hour session duration, and the JSON
document with temporary credentials as parameters.
request_parameters = "?Action=getSigninToken"
request_parameters += "&SessionDuration=43200"
request_parameters += "&Session=" +
urllib.quote_plus(json_string_with_temp_credentials)
request_url = "/federation" + request_parameters
conn = httplib.HTTPSConnection("signin.aws.amazon.com")
conn.request("GET", request_url)
r = conn.getresponse()
# Returns a JSON document with a single element named SigninToken.
signin_token = json.loads(r.read())
request_parameters = "?Action=login"
request_parameters += "&Issuer=sandbox.com"
request_parameters += "&Destination=" +
urllib.quote_plus("https://console.aws.amazon.com/")
request_parameters += "&SigninToken=" + signin_token["SigninToken"]
request_url = "https://signin.aws.amazon.com/federation" +
request_parameters
AWS Lambda Python environments include boto3 (and botocore). They don't include the older boto (a precursor to boto3), hence the import failure.
You could potentially include boto in your upload but it's not advisable to mix boto and boto3 if you can avoid it. Use one or the other, preferably boto3.

How to detect changes in firebase child with python?

I have some troubles with this application. What I need is that If I detect a change in the database (FIREBASE) particularly in 'sala' and 'ventilacion' nodes the function do what it have to do. If there isn't any change in the database it would not do nothing. I am using python and pyrebase library. Here is the code. Thank you very much for you help.
import pyrebase
import serial
import time
config = {
#firebase configurations
}
firebase = pyrebase.initialize_app(config)
db = firebase.database()
def ReconfiguracionFabrica():
ser.write('AT')
time.sleep(0.2)
ser.write('AT+RENEW')
time.sleep(0.3)
def ConfiguracionMaster():
time.sleep(0.5)
ser.write('AT+IMME1')
time.sleep(0.350)
ser.write('AT+ROLE1')
time.sleep(0.2)
ser = serial.Serial(port="/dev/ttyAMA0", baudrate=9600, timeout=1)
ReconfiguracionFabrica()
time.sleep(0.1)
ConfiguracionMaster()
time.sleep(0.1)
print "********** INICIO *************"
ser.flushInput()
contador = 0
prender = ''
ventilacion1 = ''
checkeo = ''
while True:
#if db.child("sala").: # It is the line where would be the conditional that allows me to detect any change only in the sala's node.
salidaLed1 = db.child("sala").get()
ser.write('AT')
time.sleep(0.1)
ser.write('AT+CON508CB16A7014')
time.sleep(0.1)
if salidaLed1.val() == True:
prender = ";"
if salidaLed1.val() == False:
prender = ","
ser.write('luz: %s \n' %(prender))
print ('luz: %s \n' %(prender))
time.sleep(1)
ser.read(checkeo)
if checkeo == 'j':
ReconfiguracionFabrica()
time.sleep(0.1)
ConfiguracionMaster()
Question: How to detect changes in firebase child
Note: All Examples use Public Access
Setup Example Data and verify it's readable.
This hase to be done once!
temperature_c = 30
data = {'date':time.strftime('%Y-%m-%d'),
'time':time.strftime('%H:%M:%S'),
'temperature':temperature_c}
db.child('public').child('Device_1').set(data)
response = db.child('public').child('Device_1').get()
print(response.val())
Create First Script doing Updates:
for t in [25, 26, 27, 28, 29, 30, 31, 32, 33, 35]:
temperature_c = t
data = {'date':time.strftime('%Y-%m-%d'),
'time':time.strftime('%H:%M:%S'),
'temperature':temperature_c}
db.child('public').child('Device_1').update(data)
time.sleep(60)
Create Second Script with Stream Handler
def stream_handler(message):
print('event={m[event]}; path={m[path]}; data={m[data]}'
.format(m=message))
my_stream =db.child('public').child('Device_1').stream(stream_handler)
# Run Stream Handler forever
while True:
data = input("[{}] Type exit to disconnect: ".format('?'))
if data.strip().lower() == 'exit':
print('Stop Stream Handler')
if my_stream: my_stream.close()
break
Run Stream Handler Script:
Response Output from def stream_handler after startup (Initial Data):
event="put"; path=/; data={'Device_1': {'temperature': 30, 'time': '13:34:24', 'date': '2017-07-20'}}
Run Updater Script:
Watch Output from Stream Handler Script
Response Output from def stream_handler after First Update Data:
event=patch; path=/Device_1; data={'temperature': 25, 'time': '13:49:12'}
Tested with Python: 3.4.2
Pyrebase
streaming
You can listen to live changes to your data with the stream() method.
def stream_handler(message):
print(message["event"]) # put
print(message["path"]) # /-K7yGTTEp7O549EzTYtI
print(message["data"]) # {'title': 'Pyrebase', "body": "etc..."}
my_stream = db.child("posts").stream(stream_handler)
You should at least handle put and patch events. Refer to "Streaming from the REST API" for details.
I know this post is 2 years old but hope this helps. Try using firebase_admin module.
Use this command - pip install firebase-admin
I too had a requirement where I needed to check for changes made to the Firebase database. I referred here
Following is a sample code based on your question which you can refer from and try it out.
import firebase_admin
from firebase_admin import credentials
from firebase_admin import db
cred = credentials.Certificate("path/to/serviceAccountKey.json")
firebase_admin.initialize_app(cred, {
'databaseURL': 'https://example.firebaseio.com',
'databaseAuthVariableOverride': None
})
def ignore_first_call(fn):
called = False
def wrapper(*args, **kwargs):
nonlocal called
if called:
return fn(*args, **kwargs)
else:
called = True
return None
return wrapper
#ignore_first_call
def listener(event):
print(event.event_type) # can be 'put' or 'patch'
print(event.path) # relative to the reference, it seems
print(event.data) # new data at /reference/event.path. None if deleted
node = str(event.path).split('/')[-2] #you can slice the path according to your requirement
property = str(event.path).split('/')[-1]
value = event.data
if (node=='sala'):
#do something
elif (node=='ventilacion'):
#do something
else:
#do something else
db.reference('/').listen(listener)
I was working on the same thing so according to current updates on pyrebase and learning from above posted answers, I got this running perfectly.(Please make sure your python is upgraded from python2 to python3 for running pyrebase and firebase-admin)
import firebase_admin
import pyrebase
from firebase_admin import credentials
config = {
"apiKey": "",
"authDomain": "",
"databaseURL": "",
"projectId": "",
"storageBucket": "",
"serviceAccount": "path to the service account json file you downloaded",
"messagingSenderId": "",
"appId": "",
"measurementId": ""
}
firebase = pyrebase.initialize_app(config)
storage = firebase.storage()
cred = credentials.Certificate("path to downloaded json file")
firebase_admin.initialize_app(cred, {
"databaseURL": "same as config",
"databaseAuthVariableOverride": None
})
db = firebase.database()
def ignore_first_call(fn):
called = False
def wrapper(*args, **kwargs):
nonlocal called
if called:
return fn(*args, **kwargs)
else:
called = True
return None
return wrapper
def stream_handler(message):
ab = str(1)
all_videos = storage.child("videos/").list_files() #node where files are
path_on_local = "local path to save the downloads"
print(message["event"]) # put
print(message["path"]) # /-K7yGTTEp7O549EzTYtI
print(message["data"]) # {'title': 'Pyrebase', "body": "etc..."}
node = str(message["path"]).split('/')[-2]
property = str(message["path"]).split('/')[-1]
value = message["data"]
if (message["event"] == "put"):
for videos in all_videos:
try:
print(videos.name)
z = storage.child(videos.name).get_url(None)
storage.child(videos.name).download(path_on_local + "/" + ab + ".mp4")
x = int(ab)
ab = str(x + 1)
except:
print('Download Failed')
else:
print("error")
my_stream = db.child("videos").stream(stream_handler)

How to automate Google PageSpeed Insights tests using Python

Is there a way to automate checking Google Page Speed scores?
So I figured out how to do this using the Google Page Speed API buried in the documentation.
The TL:DR explanation is you can use the following URL setup, replacing the bracketed values after enabling Google Pagespeed API in Cloud Console (and if in a browser you must also have Authenticated Google Cloud User Signed In).
https://www.googleapis.com/pagespeedonline/v2/runPagespeed?url={YOUR_SITE_URL}/&filter_third_party_resources=true&locale=en_US&screenshot=false&strategy=desktop&key={YOUR_API_KEY}
As you can see from the link above you will need a Google Pagespeed API Key. These are from scratch setup instructions. If your project is already on Cloud Console you can skip the first few steps.
Go to Cloud https://console.developers.google.com
Sign up for an account if necessary.
Create a Project
Go to Menu > API Manager > Credentials
Create credentials button > API Key
Copy Key then hit close
Menu > Dashboard > Enable API
Use Search to find PageSpeed insights API and click on it
Use the “|> ENABLE” button near the title
Once you have an API Key you can replace the values in the URL, it should look something like this:
https://www.googleapis.com/pagespeedonline/v2/runPagespeed?url=https://www.example.com/&filter_third_party_resources=true&locale=en_US&screenshot=false&strategy=desktop&key=FaKeApIKey29nS8U22mM
The parameter strategy=desktop in the url can be changed to strategy=mobile. For mobile you get a speed and a usability score. Here's what the start of the JSON looks like:
{
"kind": "pagespeedonline#result",
"id": "https://www.example.com/fake/page”,
"responseCode": 200,
"title": "Example Domain",
"ruleGroups": {
"SPEED": {
"score": 100
},
"USABILITY": {
"score": 100
}
},
....continued...
So I automated this using a Python & Python Unit Test.
import requests
import json
import unittest
from globes import *
api_key = '' # Add API key. Found here: https://console.developers.google.com/apis/credentials/key/
base = 'http://example.com'
locale_code = 'en_US'
def get_insights_json(self, page_url, local, device_type, api_key, speed_or_useability, expected_score):
url = 'https://www.googleapis.com/pagespeedonline/v2/runPagespeed?url=' + page_url + '&filter_third_party_resources=true&locale=' + local + '&screenshot=false&strategy=' + device_type + '&key=' + api_key
# print "Getting :: " + url
r = requests.get(url)
return_code = r.status_code
try: self.assertEqual(return_code, 200)
except AssertionError, e: self.verificationErrors.append(str(page_url) + " did not return 200")
return_text = r.text
return_json = json.loads(return_text)
score = return_json['ruleGroups'][speed_or_useability]['score']
print 'Getting ' + speed_or_useability + ' for ' + page_url + ' and got a score of ' + str(score)
try: self.assertTrue(int(score) >= expected_score)
except AssertionError, e: self.verificationErrors.append(str(page_url) + ' expected ' + device_type + ' speed score to be greater than ' + str(expected_score) + ', instead got ' + str(score) )
class TestAllAPIs(unittest.TestCase):
def setUp(self):
self.verificationErrors = []
self.maxDiff = None
def tearDown(self):
self.assertEqual([], self.verificationErrors)
def test_desktop_speed(self):
current_page = base + '' # You could add to the url to test other pages, I tend to do this is a loop using a list I set up by base url.
device_type = 'desktop'
target = 'SPEED'
get_insights_json(self, current_page, locale_code, device_type, api_key, target, 80)
def test_mobile_speed(self):
current_page = base + ''
device_type = 'mobile'
target = 'SPEED'
get_insights_json(self, current_page, locale_code, device_type, api_key, target, 80)
def test_mobile_useability(self):
current_page = base + ''
device_type = 'mobile'
target = 'USABILITY'
get_insights_json(self, current_page, locale_code, device_type, api_key, target, 80)
if __name__ == "__main__":
unittest.main()
There is one thing that's a little of a mystery to me is why browsers need to be authenticated with Google to get JSON from URL but Python requests does not.
Anybody using this guide (as of April 2022) will need to update to the following:
https://www.googleapis.com/pagespeedonline/v5/runPagespeed?url={YOUR_SITE_URL}/&filter_third_party_resources=true&locale=en_US&screenshot=false&strategy=desktop&key={YOUR_API_KEY}
The difference is the "/v2/" needs to be replaced with "/v5/"

Categories