New to cloud, Can anyone help to correct this code
This module is to list the regions and delete the complete default vpc via a lambda function.
Getting below error while testing this:
Syntax error in module 'lambda function': unindent does not match any outer indentation level
Please help on this
Removed other function like vpc, sc as the block looks very big here in the post just added the igw for understanding..
Need assistance
def lambda_handler(event, context):
# TODO implement
#for looping across the regions
regionList=[]
region=boto3.client('ec2')
regions=region.describe_regions()
#print('the total region in aws are : ',len(regions['Regions']))
for r in range(0,len(regions['Regions'])):
regionaws=regions['Regions'][r]['RegionName']
regionList.append(regionaws)
#print(regionList)
#regionsl=['us-east-1']
#sending regions as a parameter to the remove_default_vps function
res=remove_default_vpcs(regionList)
return {
'status':res
}
def get_default_vpcs(client):
vpc_list = []
vpcs = client.describe_vpcs(
Filters=[
{
'Name' : 'isDefault',
'Values' : [
'true',
],
},
]
)
vpcs_str = json.dumps(vpcs)
resp = json.loads(vpcs_str)
data = json.dumps(resp['Vpcs'])
vpcs = json.loads(data)
for vpc in vpcs:
vpc_list.append(vpc['VpcId'])
return vpc_list
def del_igw(ec2, vpcid):
""" Detach and delete the internet-gateway """
vpc_resource = ec2.Vpc(vpcid)
igws = vpc_resource.internet_gateways.all()
if igws:
for igw in igws:
try:
print("Detaching and Removing igw-id: ", igw.id) if (VERBOSE == 1) else ""
igw.detach_from_vpc(
VpcId=vpcid
)
igw.delete(
)
except boto3.exceptions.Boto3Error as e:
print(e)
def remove_default_vpcs():
for region in res:
try:
client = boto3.client('ec2', region_name = region)
ec2 = boto3.resource('ec2', region_name = region)
vpcs = get_default_vpcs(client)
except boto3.exceptions.Boto3Error as e:
print(e)
exit(1)
else:
for vpc in vpcs:
print("\n" + "\n" + "REGION:" + region + "\n" + "VPC Id:" + vpc)
del_igw(ec2, vpc)
print(completed)
It looks to me a code indentation issue. Please try with this
def lambda_handler(event, context):
# TODO implement
#for looping across the regions
regionList=[]
region=boto3.client('ec2')
regions=region.describe_regions()
#print('the total region in aws are : ',len(regions['Regions']))
for r in range(0,len(regions['Regions'])):
regionaws=regions['Regions'][r]['RegionName']
regionList.append(regionaws)
#print(regionList)
#regionsl=['us-east-1']
#sending regions as a parameter to the remove_default_vps function
res=remove_default_vpcs(regionList)
return {
'status':res
}
def get_default_vpcs(client):
vpc_list = []
vpcs = client.describe_vpcs(
Filters=[
{
'Name' : 'isDefault',
'Values' : [
'true',
],
},
]
)
vpcs_str = json.dumps(vpcs)
resp = json.loads(vpcs_str)
data = json.dumps(resp['Vpcs'])
vpcs = json.loads(data)
for vpc in vpcs:
vpc_list.append(vpc['VpcId'])
return vpc_list
def del_igw(ec2, vpcid):
""" Detach and delete the internet-gateway """
vpc_resource = ec2.Vpc(vpcid)
igws = vpc_resource.internet_gateways.all()
if igws:
for igw in igws:
try:
print("Detaching and Removing igw-id: ", igw.id) if (VERBOSE == 1) else ""
igw.detach_from_vpc(
VpcId=vpcid
)
igw.delete(
)
except boto3.exceptions.Boto3Error as e:
print(e)
def remove_default_vpcs():
for region in res:
try:
client = boto3.client('ec2', region_name = region)
ec2 = boto3.resource('ec2', region_name = region)
vpcs = get_default_vpcs(client)
except boto3.exceptions.Boto3Error as e:
print(e)
exit(1)
else:
for vpc in vpcs:
print("\n" + "\n" + "REGION:" + region + "\n" + "VPC Id:" + vpc)
del_igw(ec2, vpc)
print(completed)
Related
I have created a lambda function to send billing reports to certain email addresses, for that im using my email address and app password. I have saved the app password under secret manager as other api. When i try to retrieve it using the lambda function it shows error.
import boto3
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import ast
import os
import datetime
import base64
import logging
import collections
import json
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def get_secret():
secret_name = "email_app_password"
region_name = "ca-central-1"
# Create a Secrets Manager client
session = boto3.session.Session()
client = session.client(
service_name='secretsmanager',
region_name=region_name
)
# In this sample we only handle the specific exceptions for the 'GetSecretValue' API.
# See https://docs.aws.amazon.com/secretsmanager/latest/apireference/API_GetSecretValue.html
# We rethrow the exception by default.
try:
get_secret_value_response = client.get_secret_value(
SecretId=secret_name
)
except ClientError as e:
if e.response['Error']['Code'] == 'DecryptionFailureException':
# Secrets Manager can't decrypt the protected secret text using the provided KMS key.
# Deal with the exception here, and/or rethrow at your discretion.
raise e
elif e.response['Error']['Code'] == 'InternalServiceErrorException':
# An error occurred on the server side.
# Deal with the exception here, and/or rethrow at your discretion.
raise e
elif e.response['Error']['Code'] == 'InvalidParameterException':
# You provided an invalid value for a parameter.
# Deal with the exception here, and/or rethrow at your discretion.
raise e
elif e.response['Error']['Code'] == 'InvalidRequestException':
# You provided a parameter value that is not valid for the current state of the resource.
# Deal with the exception here, and/or rethrow at your discretion.
raise e
elif e.response['Error']['Code'] == 'ResourceNotFoundException':
# We can't find the resource that you asked for.
# Deal with the exception here, and/or rethrow at your discretion.
raise e
else:
# Decrypts secret using the associated KMS key.
# Depending on whether the secret is a string or binary, one of these fields will be populated.
if 'SecretString' in get_secret_value_response:
secret = get_secret_value_response['SecretString']
else:
decoded_binary_secret = base64.b64decode(get_secret_value_response['SecretBinary'])
# Your code goes here.
def send_email(email_body):
"""
Sends email according to environment variables.
:param email_body: Body of email.
:return: None
"""
msg = MIMEMultipart('alternative')
email = os.environ['EMAIL_FROM']
try:
secret_value = get_secret()
print(secret_value[os.environ['SECRET_KEY_NAME']])
except Exception as e:
logger.exception("Exception while trying to get password from Secrets Manager")
return
sec_key = os.environ['SECRET_KEY_NAME']
password = ast.literal_eval(secret_value)[os.environ['SECRET_KEY_NAME']]
msg['Subject'] = os.environ["EMAIL_SUBJECT"]
msg['From'] = email
you = os.environ['EMAIL_TO'].split(',')
msg['To'] = ", ".join(you)
body = email_body
msg.attach(MIMEText(body, 'html'))
try:
smtpObj = smtplib.SMTP('smtp.gmail.com', 587)
smtpObj.starttls()
smtpObj.login(email, password)
smtpObj.sendmail(email, you, msg.as_string())
smtpObj.quit()
logger.info('Email sent')
except smtplib.SMTPException as e:
logger.info("Error: unable to send email due to %s", e)
class FileOpener:
"""
Class to cache file contents.
"""
file_cache = {}
#staticmethod
def open_file(filename):
if filename not in FileOpener.file_cache:
with open(filename) as fp:
FileOpener.file_cache[filename] = fp.read()
return FileOpener.file_cache[filename]
def get_account_cost(account_no, start_day, end_day):
client = boto3.client('ce')
response = client.get_cost_and_usage(
TimePeriod={
'Start': start_day,
'End': end_day
},
Granularity='MONTHLY',
Filter={
"And": [{
"Dimensions": {
"Key": "LINKED_ACCOUNT",
"Values": [account_no]
}
}, {
"Not": {
"Dimensions": {
"Key": "RECORD_TYPE",
"Values": ["Credit", "Refund"]
}
}
}]
},
Metrics=["BlendedCost"],
GroupBy=[
{
'Type': 'DIMENSION',
'Key': 'SERVICE'
},
]
)
cost_dictionary = collections.Counter()
for result in response['ResultsByTime']:
for group in result['Groups']:
servicename = group['Keys'][0]
amount = round(float(group['Metrics']['BlendedCost']['Amount']), 2)
if amount == 0.0:
continue
cost_dictionary[servicename] += amount
return cost_dictionary
def combine_cost_dictionaries(prev_cost_dict, curr_cost_dict):
combined_cost_dict = {}
prev_cost_total = 0.0
curr_cost_total = 0.0
for service, curr_cost in curr_cost_dict.items():
prev_cost = 0.0
if service in prev_cost_dict:
prev_cost = prev_cost_dict[service]
combined_cost_dict[service] = (prev_cost, curr_cost)
prev_cost_total += prev_cost
curr_cost_total += curr_cost
for service, prev_cost in prev_cost_dict.items():
curr_cost = 0.0
if service not in combined_cost_dict:
combined_cost_dict[service] = (prev_cost, curr_cost)
prev_cost_total += prev_cost
curr_cost_total += curr_cost
return combined_cost_dict, prev_cost_total, curr_cost_total
def generate_account_cost_html(account_name, combined_cost_dict, prev_cost_total, curr_cost_total):
prev_date = str((datetime.datetime.now().date() - datetime.timedelta(days=2)))
curr_date = str((datetime.datetime.now().date() - datetime.timedelta(days=1)))
table_rows = ""
sorted_combined_cost = sorted(combined_cost_dict.items(), key=lambda x: x[1][1], reverse=True)
for service, costs in sorted_combined_cost:
table_row = FileOpener.open_file("table_row.html")
prev_cost = round(float(costs[0]), 2)
curr_cost = round(float(costs[1]), 2)
if prev_cost < 0.01:
percentage_change = 'New Charge'
else:
percentage_change = ((curr_cost - prev_cost) / prev_cost) * 100
percentage_change = round(float(percentage_change), 2)
if percentage_change > 0:
percentage_change = "↑ {}%".format(percentage_change)
elif percentage_change == 0.0:
percentage_change = "{}%".format(percentage_change)
else:
percentage_change = "↓ {}%".format(percentage_change)
if percentage_change[0] == '↑':
percentage_css = "background-color: pink;border:darkblue solid thin;"
elif percentage_change == 'New Charge':
percentage_css = "background-color: LightGreen;border:darkblue solid thin;"
else:
percentage_css = "border:lightblue solid thin;"
table_rows += table_row.format(service, prev_cost, curr_cost, percentage_css, percentage_change)
prev_cost_total = round(float(prev_cost_total), 2)
curr_cost_total = round(float(curr_cost_total), 2)
table = FileOpener.open_file("table.html")
table = table.format(account_name, prev_cost_total, curr_cost_total, prev_date, curr_date, table_rows)
return table
def lambda_handler(event, context):
account_names = os.environ['ACCOUNT_NAMES'].split(",")
account_numbers = os.environ['ACCOUNT_NUMBERS'].split(",")
table_body_html = ''
for account_name, account_no in zip(account_names, account_numbers):
day_1 = str((datetime.datetime.now().date() - datetime.timedelta(days=2)))
day_2 = str((datetime.datetime.now().date() - datetime.timedelta(days=1)))
day_3 = str(datetime.datetime.now().date())
prev_cost_dict = get_account_cost(account_no, day_1, day_2)
curr_cost_dict = get_account_cost(account_no, day_2, day_3)
combined_cost_dict, prev_cost_total, curr_cost_total = combine_cost_dictionaries(prev_cost_dict, curr_cost_dict)
table_body = generate_account_cost_html(account_name, combined_cost_dict, prev_cost_total, curr_cost_total)
table_body_html += table_body
email_body = FileOpener.open_file("email_body.html").format(table_body_html)
send_email(email_body)
I get the following error when running the lambda function. I have even tried removing the print line. Then the error goes to password = ast.literal_eval(secret_value)[os.environ['SECRET_KEY_NAME']]
[ERROR] 2022-08-18T06:02:03.968Z 2ae88ceb-39a6-4feb-aa7c-2cbb17ec655c Exception while trying to get password from Secrets Manager
Traceback (most recent call last):
File "/var/task/lambda_function.py", line 88, in send_email
print(secret_value[os.environ['SECRET_KEY_NAME']])
TypeError: 'NoneType' object is not subscriptableEND RequestId: 2ae88ceb-39a6-4feb-aa7c-2cbb17ec655c
Your get_secret function does not return the value to the calling code. Please edit the else clause:
else:
# Decrypts secret using the associated KMS key.
# Depending on whether the secret is a string or binary, one of these fields will be populated.
if 'SecretString' in get_secret_value_response:
return get_secret_value_response['SecretString']
else:
return base64.b64decode(get_secret_value_response['SecretBinary'])
I am using the instances.list method from cloud sql admin-api to retrieve the information about instances in project. I am using the example code provided by google and it provides information about the instance. I am retrieving the name of the instance and then I need to update the instance labels if the label is matching the provided 'RC_PlatformCode'. This needs to be done to all sql instances in project matching the specific label. How can this be achieved as my code is not working. Or is there an easier way to do this in Python?
from config import Config, log, get_secret
from botocore.exceptions import ClientError
from typing import Dict, Iterable
from pprint import pprint
from googleapiclient import discovery
import json
import os
config = Config()
os.environ["GOOGLE_APPLICATION_CREDENTIALS"]="creds.json"
def updateSqlLabels(data, account):
log.info("-----")
log.info("updating Cloud Storage labels")
RC_PlatformCode = data['NewImage']['PlatformCode']['S']
platformcode_gcp = 'rc_platformcode'
tagKey = data['NewImage']['Key']['S']
tagValue = data['NewImage']['Value']['S']
char_to_replace = {
'#': '_at_',
'.': '_'
}
tagKey = tagKey.lower()
for key, value in char_to_replace.items():
tagValue = tagValue.replace(key, value)
service = discovery.build('sqladmin', 'v1beta4')
project = account # TODO: Update placeholder value.
request = service.instances().list(project=project)
while request is not None:
response = request.execute()
for database_instance in response['items']:
# TODO: Change code below to process each `database_instance` resource:
log.info("db_name = " + database_instance['name'])
update_tag = False
try:
labels = database_instance['settings']['userLabels']
log.info("tags -> " + str(labels))
except ClientError:
continue
for label in labels:
if labels["rc_platformcode"] == RC_PlatformCode:
log.info(
f"RC_PlatformCode [{RC_PlatformCode}] present for instance [{database_instance['name']}]")
update_tag = True
break
if update_tag:
create_tag = True
log.info("processing instance -> " + database_instance['name'])
log.info("setting tag Key -> " + tagKey)
log.info("setting tag Value -> " + tagValue)
for label in labels:
log.info("checking tag -> " + label)
labels[tagKey] = tagValue
instance_labels = labels
database_instance_body = {
'settings': {
'userLabels': instance_labels
}
}
log.info("project = " + project)
log.info("instance = " + database_instance['name'])
log.info("labels = " + str(database_instance_body))
request = service.instances().patch(project=project, instance=database_instance['name'], body=database_instance_body)
response = request.execute
break
I am receiving following error :
Response
{
"errorMessage": "'items'",
"errorType": "KeyError",
"stackTrace": [
" File \"/var/task/handler.py\", line 63, in handler\n updateSqlLabels(data, account)\n",
" File \"/var/task/cloudSql.py\", line 32, in updateSqlLabels\n for database_instance in response['items']:\n"
]
}
Any tips and help would be appreciated
I have a python application where a client retrieves csv data row by row from a server using a grpc stream. Data from each row is added to a dictionary which in turn is saved to a redis database. When I attempt to retrieve the data from the redis database in a seperate flask application, the data is not coming out in order and is duplicating much of the time. How can I retrieve the data in order of the key without duplicates?
Client
def run():
#Average number of comments metric
average_num_comments = 0
response_count = 0
comment_count = 0
try:
conn = redis.StrictRedis(host='redis', port=6379)
conn.flushdb()
except Exception as ex:
print('Error:', ex)
while True:
with grpc.insecure_channel('redditserver:50051') as channel:
stub = route_guide_pb2_grpc.RouteGuideStub(channel)
responses = stub.SendRedditPost(route_guide_pb2.PostRequestReddit(response='Recieved'))
#Single post with most letters in title
lg_post_title = ''
for response in responses:
response_count += 1
comment_count = int(response.num_comments) + comment_count
average_num_comments = avg(response_count, comment_count)
if (len(response.title) > len(lg_post_title)):
lg_post_title = response.title
redisdict = {"Largest Post Title": lg_post_title, "Comment Count": comment_count, "Average No. Comments": average_num_comments}
try:
conn = redis.StrictRedis(host='redis', port=6379)
conn.hmset(response_count, redisdict)
except Exception as ex:
print('Error:', ex)
time.sleep(2)
Flask Application
[![#app.route('/')
def get_page():
data = ''
try:
conn = redis.StrictRedis(host='redis', port=6379, decode_responses=True)
for key in conn.scan_iter():
value = conn.hgetall(key)
data = value
time.sleep(2)
print("KEY: " + key, file=sys.stderr)
print(data, file=sys.stderr)
except Exception as ex:
data = 'Error:' + str(ex)
return render_template('index.html', x=data)][1]][1]
I'm trying to use for the first time ElasticSearch 6.4 with an existing web application wrote in Python/Django. I have some issues and I would like to understand why and how I can solve these issues.
###########
# Existing : #
###########
In my application, it's possible to upload document files (.pdf or .doc for example). Then, I have a search function in my application which let to search over documents indexed by ElasticSearch when they are uploaded.
Document title is always written through the same way :
YEAR - DOC_TYPE - ORGANISATION - document_title.extension
For example :
1970_ANNUAL_REPORT_APP-TEST_1342 - loremipsum.pdf
The search function is always done among doc_type = ANNUAL_REPORT. because there are several doc_types (ANNUAL_REPORT, OTHERS, ....).
##################
# My environment : #
##################
This is some data according to my ElasticSearch part. I'm learning ES commands too.
$ curl -XGET http://127.0.0.1:9200/_cat/indices?v
health status index uuid pri rep docs.count docs.deleted store.size pri.store.size
yellow open app 5T0HZTbmQU2-ZNJXlNb-zg 5 1 742 2 396.4kb 396.4kb
So my index is app
For the above example, if I search this document : 1970_ANNUAL_REPORT_APP-TEST_1342 - loremipsum.pdf, I have :
$ curl -XGET http://127.0.0.1:9200/app/annual-report/1343?pretty
{
"_index" : "app",
"_type" : "annual-report",
"_id" : "1343",
"_version" : 33,
"found" : true,
"_source" : {
"attachment" : {
"date" : "2010-03-04T12:08:00Z",
"content_type" : "application/pdf",
"author" : "manshanden",
"language" : "et",
"title" : "Microsoft Word - Test document Word.doc",
"content" : "some text ...",
"content_length" : 3926
},
"relative_path" : "app_docs/APP-TEST/1970_ANNUAL_REPORT_APP-TEST_1342.pdf",
"title" : "1970_ANNUAL_REPORT_APP-TEST_1342 - loremipsum.pdf"
}
}
Now, with my search part in my web application, I would like to find this document with this search : 1970.
def search_in_annual(self, q):
try:
response = self.es.search(
index='app', doc_type='annual-report',
q=q, _source_exclude=['data'], size=5000)
except ConnectionError:
return -1, None
total = 0
hits = []
if response:
for hit in response["hits"]["hits"]:
hits.append({
'id': hit['_id'],
'title': hit['_source']['title'],
'file': hit['_source']['relative_path'],
})
total = response["hits"]["total"]
return total, hits
But when q=1970, the result is 0
If I write :
response = self.es.search(
index='app', doc_type='annual-report',
q="q*", _source_exclude=['data'], size=5000)
It returns my document, but many documents too with no 1970 inside the title or the document content.
#################
# My global code : #
#################
This is the global class which manage indexing functions :
class EdqmES(object):
host = 'localhost'
port = 9200
es = None
def __init__(self, *args, **kwargs):
self.host = kwargs.pop('host', self.host)
self.port = kwargs.pop('port', self.port)
# Connect to ElasticSearch server
self.es = Elasticsearch([{
'host': self.host,
'port': self.port
}])
def __str__(self):
return self.host + ':' + self.port
#staticmethod
def file_encode(filename):
with open(filename, "rb") as f:
return b64encode(f.read()).decode('utf-8')
def create_pipeline(self):
body = {
"description": "Extract attachment information",
"processors": [
{"attachment": {
"field": "data",
"target_field": "attachment",
"indexed_chars": -1
}},
{"remove": {"field": "data"}}
]
}
self.es.index(
index='_ingest',
doc_type='pipeline',
id='attachment',
body=body
)
def index_document(self, doc, bulk=False):
filename = doc.get_filename()
try:
data = self.file_encode(filename)
except IOError:
data = ''
print('ERROR with ' + filename)
# TODO: log error
item_body = {
'_id': doc.id,
'data': data,
'relative_path': str(doc.file),
'title': doc.title,
}
if bulk:
return item_body
result1 = self.es.index(
index='app', doc_type='annual-report',
id=doc.id,
pipeline='attachment',
body=item_body,
request_timeout=60
)
print(result1)
return result1
def index_annual_reports(self):
list_docs = Document.objects.filter(category=Document.OPT_ANNUAL)
print(list_docs.count())
self.create_pipeline()
bulk = []
inserted = 0
for doc in list_docs:
inserted += 1
bulk.append(self.index_document(doc, True))
if inserted == 20:
inserted = 0
try:
print(helpers.bulk(self.es, bulk, index='app',
doc_type='annual-report',
pipeline='attachment',
request_timeout=60))
except BulkIndexError as err:
print(err)
bulk = []
if inserted:
print(helpers.bulk(
self.es, bulk, index='app',
doc_type='annual-report',
pipeline='attachment', request_timeout=60))
My document is indexed when he's submitted thanks a Django form with a signal :
#receiver(signals.post_save, sender=Document, dispatch_uid='add_new_doc')
def add_document_handler(sender, instance=None, created=False, **kwargs):
""" When a document is created index new annual report (only) with Elasticsearch and update conformity date if the
document is a new declaration of conformity
:param sender: Class which is concerned
:type sender: the model class
:param instance: Object which was just saved
:type instance: model instance
:param created: True for a creation, False for an update
:type created: boolean
:param kwargs: Additional parameter of the signal
:type kwargs: dict
"""
if not created:
return
# Index only annual reports
elif instance.category == Document.OPT_ANNUAL:
es = EdqmES()
es.index_document(instance)
This is what I've done and it seems to work :
def search_in_annual(self, q):
try:
response = self.es.search(
index='app', doc_type='annual-report', q=q, _source_exclude=['data'], size=5000)
if response['hits']['total'] == 0:
response = self.es.search(
index='app', doc_type='annual-report',
body={
"query":
{"prefix": {"title": q}},
}, _source_exclude=['data'], size=5000)
except ConnectionError:
return -1, None
total = 0
hits = []
if response:
for hit in response["hits"]["hits"]:
hits.append({
'id': hit['_id'],
'title': hit['_source']['title'],
'file': hit['_source']['relative_path'],
})
total = response["hits"]["total"]
return total, hits
It lets to search over title, prefix and content to find my document.
I am a novice at Python but am learning as I go. I found this script and it works well but I wanted to make some edits to it so that it also saves the name of the instance that it created a snapshot for.
import boto3
import collections
import datetime
#allows Python developers to write software that makes use of Amazon services like S3 and EC2
ec = boto3.client('ec2')
#finds tags-keys with the name "backup" or "Backup"
def lambda_handler(event, context):
reservations = ec.describe_instances(
Filters=[
{'Name': 'tag-key', 'Values': ['backup', 'Backup']},
]
).get(
'Reservations', []
)
instances = [
i for r in reservations
for i in r['Instances']
]
print "Found %d instances that need backing up" % len(instances)
to_tag = collections.defaultdict(list)
#find tag-keys with the name Retention default value if NULL is 7 days
for instance in instances:
try:
retention_days = [
int(t.get('Value')) for t in instance['Tags']
if t['Key'] == 'Retention'][0]
except IndexError:
retention_days = 7
for dev in instance['BlockDeviceMappings']:
if dev.get('Ebs', None) is None:
continue
vol_id = dev['Ebs']['VolumeId']
print "Found EBS volume %s on instance %s" % (
vol_id, instance['InstanceId'])
snap = ec.create_snapshot(
VolumeId=vol_id,
)
to_tag[retention_days].append(snap['SnapshotId'])
print "Retaining snapshot %s of volume %s from instance %s for %d days" % (
snap['SnapshotId'],
vol_id,
instance['InstanceId'],
retention_days,
)
#set retention days according to the value int input
for retention_days in to_tag.keys():
delete_date = datetime.date.today() + datetime.timedelta(days=retention_days)
delete_fmt = delete_date.strftime('%Y-%m-%d')
print "Will delete %d snapshots on %s" % (len(to_tag[retention_days]), delete_fmt)
ec.create_tags(
Resources=to_tag[retention_days],
Tags=[
{'Key': 'DeleteOn', 'Value': delete_fmt},
]
)
So far I have this but am a little lost as to how to make it work in with the current script above:
snapshot = ec(to_tag['SnapshotId'])
volumename = ''
# Add volume name to snapshot for easier identification
snapshot.create_tags(Tags=[{'Key': 'Name', 'Value': volumename}])
Any ideas welcomed! Thanks.
import boto3
import collections
import datetime
#allows Python developers to write software that makes use of Amazon services like S3 and EC2
ec = boto3.client('ec2')
sns_client = boto3.client('sns')
#finds tags-keys with the name "backup" or "Backup"
def lambda_handler(event, context):
reservations = ec.describe_instances(
Filters=[
{'Name': 'tag-key', 'Values': ['backup', 'Backup']},
]
).get(
'Reservations', []
)
instances = [
i for r in reservations
for i in r['Instances']
]
print "Found %d instances that need backing up" % len(instances)
to_tag = collections.defaultdict(list)
#find tag-keys with the name Retention default value if NULL is 7 days
for instance in instances:
try:
retention_days = [
int(t.get('Value')) for t in instance['Tags']
if t['Key'] == 'Retention'][0]
except IndexError:
retention_days = 7
for dev in instance['BlockDeviceMappings']:
if dev.get('Ebs', None) is None:
continue
vol_id = dev['Ebs']['VolumeId']
print "Found EBS volume %s on instance %s" % (
vol_id, instance['InstanceId'])
volumes = ec2_client.describe_volumes()
volumes = volumes["Volumes"]
volumes_list = []
for volume in volumes:
volumes_list.append([volume["Tags"][0]["Value"], volume["VolumeId"]])
for volume in volumes_list:
try:
create_snapshot_response = ec2_client.create_snapshot(
VolumeId=volume[1],
Description=volume[0] + " " + str(datetime.now()).split(" ")[0],
)
snapshot_id = create_snapshot_response["SnapshotId"]
tags = ec2_client.create_tags(
Resources=[snapshot_id],
Tags=[{
"Key": "Name",
"Value": "{}: {}".format(volume[0], str(datetime.now()).split(" ")[0])
}]
)
to_tag[retention_days].append(snap['SnapshotId'])
print "Retaining snapshot %s of volume %s from instance %s for %d days" % (
snap['SnapshotId'],
vol_id,
instance['InstanceId'],
retention_days,
)
#set retention days according to the value int input
for retention_days in to_tag.keys():
delete_date = datetime.date.today() + datetime.timedelta(days=retention_days)
delete_fmt = delete_date.strftime('%Y-%m-%d')
print "Will delete %d snapshots on %s" % (len(to_tag[retention_days]), delete_fmt)
ec.create_tags(
Resources=to_tag[retention_days],
Tags=[
{'Key': 'DeleteOn', 'Value': delete_fmt},
]
)
import boto3
ec2_client = boto3.client('ec2')
def lambda_handler(event, context):
instances = ec2_client.describe_instances()['Reservations']
for i in instances:
try:
create_snapshot_response = ec2_client.create_snapshot(
VolumeId=i['Instances'][0]['BlockDeviceMappings'][0]["Ebs"]["VolumeId"]
)
snapshot_id = create_snapshot_response["SnapshotId"]
tags = ec2_client.create_tags(
Resources=[snapshot_id],
Tags=[{
"Key": "Name",
"Value": "{}".format(i['Instances'][0]["Tags"][0]['Value'])
}]
)
except Exception as e:
print(e)
return "Success"
Krishna did what I have in my lambda for providing instance names on my snapshots....one change i had was
instance_name = ""
if 'Tags' in instance:
for tag in instance['Tags']:
if tag['Key'] == 'Name':
instance_name = tag['Value']
if not instance_name:
instance_name = instance['InstanceId']
snap = ec.create_snapshot(
VolumeId=vol_id,
TagSpecifications=[{
'ResourceType': 'snapshot',
'Tags': [{
'Key': 'Name',
'Value': instance_name
}]
}]
)
Update lines 43-45 to following:
instance_name = ""
if 'Tags' in instance:
for tag in instance['Tags']:
if tag['Key'] == 'Name':
instance_name = tag['Value']
if not instance_name:
instance_name = instance['InstanceId']
snap = ec.create_snapshot(
VolumeId=vol_id,
TagSpecifications=[{
'ResourceType': 'snapshot',
'Tags': [{
'Key': 'Name',
'Value': instance['InstanceId']
}]
}]
)