Starting EC2 instance with Linux services health check - python

I am a beginner in DevOps and a noob at programming. I have been assigned a task to autostart a group of instances with a specific sequence. Checking the health of its Linux services before starting the next one.
I found an auto stop and start python script that can be run as a lambda function, but I am clueless about how can I start the instances sequentially and check the server services health.
I would really appreciate, if something can help me out or guide me on how can I do that.
Thank you
import boto3
import request
import time
region = 'region'
instances = ['']
ec2 = boto3.client('ec2', region_name=region)
def Ec2Instance1(ec2start):
ec2.start_instances(InstanceIds=instances)
print('started your instances: ' + str(instances))
def lambda_handler(event, context):
websiteURL = ['https://example1.com','https://example2.com','https://example3.com']
topicArnCode = 'arn:aws:sns:ap-southeast-1:123:sample'
for x in websiteURL:
print (x)
r = requests.get(x,verify=False)
print (r)
if r.status_code == 200:
Ec2Instance1()
time.sleep(10)
elif r.status_code == 200:
Ec2Instance1()
else:
sns_client = boto3.client('sns')
sns_client.publish(
TopicArn = topicArnCode,
Subject = 'Website is not reachable ' + x,
Message = 'Website: ' + x + ' is down\n')
print('Website is dead')

import boto3
import requests
import time
AWS_Access_Key_ID =
AWS_Secret_Access_Key =
DELAY_TIME=10 # 10 Seconds
region = 'us-east-2'
# instances = ['']
instances = {
'instance id': 'http://link',
'instance id': 'http://link'
}
ec2 = None
try:
ec2 = boto3.client('ec2', aws_access_key_id=AWS_Access_Key_ID, aws_secret_access_key=AWS_Secret_Access_Key, region_name=region)
# ec2 = boto3.resource('ec2',aws_access_key_id=AWS_Access_Key_ID, aws_secret_access_key=AWS_Secret_Access_Key, region_name=region)
except Exception as e:
print(e)
print("AWS CREDS ERROR, Exiting...")
exit()
def startInstances(instancesIds):
if(type(instancesIds) != list):
instancesIds = [instancesIds]
try:
response = ec2.start_instances(InstanceIds=instancesIds, DryRun=False)
print(response)
print("Instances Started")
except ClientError as e:
print(e)
print("Instances Failed to Start")
def stopInstances(instancesIds):
if(type(instancesIds) != list):
instancesIds = [instancesIds
]
try:
response = ec2.stop_instances(InstanceIds=instancesIds, DryRun=False)
print(response)
print("Instances Stopped")
except ClientError as e:
print(e)
print("Instances Failed to Stop")
def check():
for x in instances:
retry = 0
live = False
print("Checking Webiste " + instances[x])
while(retry < 5):
try:
r = requests.get(instances[x] ,verify=True)
if(r.status_code == 200):
live = True
break
except:
print("Not Live, retry time " + str(retry + 1))
print("Delaying request for " + str(DELAY_TIME) + " seconds...")
retry += 1
time.sleep(DELAY_TIME)
if(live):
print("Website is live")
# call function to start the ec2 instance
startInstances(x)
else:
# call function to stop the ec2 instance
print('Website is dead')
stopInstances(x)
print("")
def main():
check()
if __name__ == '__main__':
main()

Related

String to call specific data from a host Inventory

Looking for some guidance on how to get this code to point to the correct inventory within the Zabbix API.
Currently it is pulling all data from Inventory > Hosts > Latest Data.
Basically i'm trying to get this to change, to request the data grab to go to Inventory > Hosts > > Details and then grab the following 'Location latitude' and 'Location longitude'
My first assumption was the application within the def() getInventory was the culprit to change but it seems that even when I change that my output is the same.
If you need any further information please let me know.
import sys
import datetime
import csv
import re
import requests
import tkinter as tk
from tkinter import filedialog
from pyzabbix import ZabbixAPI,ZabbixAPIException
def initializeapi():
tries = 4
while tries >= 0:
user = "XXX"
password = "XXX"
if isinstance(user, str) == True and isinstance(password, str) == True:
try:
z.login(user=user,password=password)
print("Logged into ZabbixAPI version " + z.api_version() + ".")
return True
except ZabbixAPIException as e:
print(e)
tries -= 1
except requests.Timeout as f:
print(f, "\nProgram will now exit.")
sys.exit(2)
else:
print("Username and password must be strings.")
else:
print("Too many failed login attempts.")
return False
def getinventory(listname, hostid='',):
if isinstance(listname, list):
if len(hostid) != 0:
for i in z.item.get(output='extend', hostids=hostid, application='Monitoring'):
j = [i['hostid'], i['itemid'], i['name'], i['lastvalue'], i['units'], i['description'], i["location_lon"]]
listname.append(j)
else:
for i in z.item.get(output='extend', application='Monitoring'):
j = [i['hostid'], i['itemid'], i['name'], i['lastvalue'], i['units'], i['description']]
listname.append(j)
else:
print("Must pass list variable.")
return False
return True
def validateserver(serverip):
if re.search('http://', serverip):
return True
elif re.search('https://', serverip):
return True
else:
return False
def gethostdict(dictname):
if isinstance(dictname, dict):
for h in z.host.get(output="extend"):
dictname[h['hostid']] = h['name']
else:
print("Must pass dict variable.")
return False
return True
def hostchange(listname, dictname):
for index, item in enumerate(listname):
if isinstance(item, list):
hostchange(item, dictname)
elif item in dictname.keys():
listname[index] = dictname[item]
return
def writecsv(writelist):
with open(getfilepath(), 'w', newline='', encoding="utf-8") as result:
writer = csv.writer(result, dialect='excel')
header = ['Host', 'Item ID', 'Name', 'Value', 'Units', 'Description',]
writer.writerow(header)
writer.writerows(writelist)
def getfilepath():
root = tk.Tk()
return filedialog.asksaveasfilename(initialdir=r'XXX', defaultextension='.csv',
initialfile='Inventory ' + str(datetime.date.today()),
filetypes=(("Comma Separated Values",'*.csv'),("All Files", '*.*')))
if __name__ == '__main__':
retries = 4
while retries >= 0:
serverip = "XXX"
if validateserver(serverip):
timeout = 3.5
try:
z = ZabbixAPI(str(serverip), timeout=timeout)
except ZabbixAPIException as e:
print(e)
if initializeapi():
break
elif retries > 0:
retries -= 1
else:
print("Too many failed attempts.")
sys.exit(2)
list1 = []
dict1 = {}
getinventory(list1)
gethostdict(dict1)
hostchange(list1, dict1)
writecsv(list1)
print("Complete.")
Refer this Documentation.. https://www.zabbix.com/documentation/current/en/manual/api/reference/host/object#host-inventory..
below simple python script works for me
#using token
import requests
import json
# replace <your zabbix server ip> in url
url = 'http://<your zabbix server ip>/api_jsonrpc.php'
#replace <your zabbix auth token> in payload
payload = '{"jsonrpc": "2.0", "method": "host.get", "params": {"output": ["hostid","host","name","status"],"selectInventory": ["os_full","tag","location","location_lat","location_lon"]}, "auth": "<your zabbix auth token>", "id": 1 }'
headers = {'content-type': 'application/json-rpc'}
r = requests.post(url, data=payload, headers=headers)
hostslist = r.json()['result']
print(hostlist)
Here is the Python Script using username and password
from pyzabbix import ZabbixAPI
ZABBIX_SERVER = 'http://<your zabbix server>'
with ZabbixAPI(ZABBIX_SERVER) as zapi:
zapi.login('<username>', '<password>')
hosts = zapi.host.get(output=['hostid','itemid','name','lastvalue','units','desciption'], selectInventory=['location','location_lat','location_lon'])
for host in hosts:
print(host)

Appending twice in csv report when trying to list firewall rules in gcp using python

import requests
import json
import re
import sys
import subprocess
import os
from googleapiclient import discovery
from oauth2client.client import GoogleCredentials
from google.oauth2 import service_account
credentials = service_account.Credentials.from_service_account_file("")
service = discovery.build('cloudresourcemanager', 'v1', credentials=credentials)
request = service.projects().list()
token1 = subprocess.Popen("gcloud auth print-access-token", stdout=subprocess.PIPE, shell = True)
token, error = token1.communicate()
token = str(token.decode("utf-8"))
token = token.rstrip("\n")
token = token.rstrip("\r")
Compliance = [""]
ComplianceFlag = 0
PROTOCOL = "-"
PORT = "-"
f = open("xxxxxxxxx.csv", 'w')
f.write("ProjectId, VPC, Rule Name, Direction, Compliance, SourceRange, IPProtocol, Port\n")
while request is not None:
response = request.execute()
for project in response.get('projects', []):
projectid = project['projectId']
projectname = project['name']
headers = {
'Authorization': 'Bearer ' + token,
'x-goog-user-project': projectid
}
count = 0
try:
get_url = "https://compute.googleapis.com/compute/v1/projects/"+ projectid +"/global/firewalls"
get_url_data = requests.get(get_url, headers= headers)
get_api2_json = json.loads(get_url_data.text)
for vpc in get_api2_json["items"]:
vpcname = vpc["network"]
vpcname = vpcname.split("/")[-1]
rulename = vpc["name"]
direction = vpc["direction"]
try:
try:
for sr in vpc["sourceRanges"]:
if "y.y.y.y/y" in sr:
Compliance.append("NonCompliant")
ComplianceFlag = 1
for allowed in vpc["allowed"]:
PROTOCOL=allowed["IPProtocol"]
if(PROTOCOL=="all"):
Compliance.append("NonCompliant")
ComplianceFlag = 1
try:
for port in allowed["ports"]:
if "22" in port or "139" in port:
Compliance.append("NonCompliantport")
ComplianceFlag = 1
PORT=port
f.write("{},{},{},{},{},{},{},{},{}\n".format(projectid, vpcname, rulename, direction, ' '.join([str(elem) for elem in Compliance]),sr,PROTOCOL,PORT))
except KeyError as e:
f.write("{},{},{},{},{},{},{},{},{}\n".format(projectid, vpcname, rulename, direction, ' '.join([str(elem) for elem in Compliance]),sr,PROTOCOL,"-"))
if ComplianceFlag == 0:
Compliance = [""]
ComplianceFlag = 0
Compliance = [""]
except KeyError as e:
f.write("{},{},{},{},{},{},{},{},{},\n".format(projectid, vpcname, rulename, direction, ' '.join([str(elem) for elem in Compliance]),PROTOCOL,PORT))
ComplianceFlag = 0
Compliance = [""]
print("")
except Exception as e:
print(e)
pass
except Exception as e:
print(e)
pass
request = service.projects().list_next(previous_request=request, previous_response=response)
f.close()
print(count)
All i am trying to do here is to generate a csv report to list the firewall rules in gcp and get the compliance check (whether it is complaint or non compliant). When i am trying to append the compliance check value, it is appending twice in the report..
This is where in the code it is appending NonCompliantport Noncompliant port twice....
try:
for port in allowed["ports"]:
if "22" in port or "139" in port:
Compliance.append("NonCompliantport")
ComplianceFlag = 1
PORT=port
f.write("{},{},{},{},{},{},{},{},{}\n".format(projectid, vpcname, rulename, direction, ' '.join([str(elem) for elem in Compliance]),sr,PROTOCOL,PORT))
except KeyError as e:
f.write("{},{},{},{},{},{},{},{},{}\n".format(projectid, vpcname, rulename, direction, ' '.join([str(elem) for elem in Compliance]),sr,PROTOCOL,"-"))
if ComplianceFlag == 0:
Compliance = [""]
ComplianceFlag = 0
Compliance = [""]
Any idea of how to resolve this...
Can you try to add a check on the complianceFlag like that
for port in allowed["ports"]:
if ("22" in port or "139" in port) and ComplianceFlag == 0:
Compliance.append("NonCompliantport")
ComplianceFlag = 1
PORT=port
f.write("{},{},

Gmail API socket.timeout : The read operation timed out

My program which utilizes the Python Gmail API has been encountering alot of socket.timeout errors. I am very frequently receiving the following:
socket.timeout: The read operation timed out
This error appears to be random and generally occurs with any Gmail API function. I have tried modifying the socket timeout parameter. However, changing this parameter does not seem to remove the issue. I have varied it from 1 second, 10 seconds, and 600 seconds.
socket.setdefaulttimeout(10)
Or for an httplib2.Http object:
def build_http(self):
"""Builds httplib2.Http object
Returns:
A httplib2.Http object, which is used to make http requests, and which has timeout set by default.
To override default timeout call
socket.setdefaulttimeout(timeout_in_sec)
before interacting with this method.
"""
try:
return httplib2.Http(timeout=10)
except:
self.GLogger.error("An error was encountered in build_http")
tb = traceback.format_exc()
self.GLogger.exception(tb)
return False
The following code is used to initialize the Gmail API service.
def gmailAPIInitialize(self):
try:
self.waitForInternet()
self.GLogger.info("Initializing the Gmail API Service")
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if self.reprocess is True:
token_pickle_file = 'Gmail_token_2.pickle'
credentials_file = 'Gmail_credentials_2.json'
else:
token_pickle_file = 'Gmail_token_1.pickle'
credentials_file = 'Gmail_credentials_1.json'
if os.path.exists(token_pickle_file):
with open(token_pickle_file, 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
credentials_file, SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open(token_pickle_file, 'wb') as token:
pickle.dump(creds, token)
#service = build('gmail', 'v1', credentials=creds, cache_discovery=False)
service = build('gmail', 'v1', credentials=creds, cache_discovery=False)
self.gmailAPIService = service
self.GLogger.info("Successfully initialized the Gmail API Service")
return True
except:
self.GLogger.error("An error was encountered while attempting to initialize the Gmail API")
tb = traceback.format_exc()
self.GLogger.exception(tb)
return False
Again, pretty much any function will trigger this issue. But here are some examples:
Thread List:
def gmailAPIMessageLabelSearchThreads(self, labelList, userID="me", allPages=False, reverseOrder=False):
try:
self.GLogger.info("Attempting to search email threads with labelList (" + str(labelList)+ ") and userID (" +str(userID)+ ")")
service = self.gmailAPIService
if service is None:
logging.error('Gmail Service not initialized')
return False
response = service.users().threads().list(userId=userID, labelIds=labelList,maxResults=500, fields="threads(id),nextPageToken").execute()
messages = []
if 'messages' in response:
messages.extend(response['messages'])
if (allPages is True):
while 'nextPageToken' in response:
page_token = response['nextPageToken']
response = service.users().messages().list(userId=userID, labelIds=labelList, pageToken=page_token,maxResults=500, fields="threads(id),nextPageToken").execute()
if 'messages' in response:
messages.extend(response['messages'])
if reverseOrder is True:
messages.reverse()
self.GLogger.info("Successfully searched emails with labelList (" + str(labelList)+ ") and userID (" +str(userID)+ "). Number of matching emails (" +str(len(messages))+ ")")
return messages
except:
self.GLogger.error("An error was encounrtered while searching for messages with google API and label list")
tb = traceback.format_exc()
self.GLogger.exception(tb)
return False
Send Email:
def gmailAPISendEmail(self, message, userID="me"):
try:
service = self.gmailAPIService
self.GLogger.info("Attempting to send email message")
response = (service.users().messages().send(userId=userID, body=message).execute())
responseID = str(response['id'])
self.GLogger.info("Successfully sent email message with ID (" + responseID +")")
return responseID
except:
self.GLogger.error("Failed to send email message")
tb = traceback.format_exc()
self.GLogger.exception(tb)
return False
Message List:
def gmailAPIMessageLabelSearch(self, labelList, userID="me", allPages=False, reverseOrder=False, numPages=None):
try:
self.GLogger.info("Attempting to search emails with labelList (" + str(labelList)+ ") and userID (" +str(userID)+ ")")
service = self.gmailAPIService
if service is None:
logging.error('Gmail Service not initialized')
return False
#response = service.users().messages().list(userId=userID, labelIds=labelList, fields='messages(id)').execute()
response = service.users().messages().list(userId=userID, labelIds=labelList, maxResults=500, fields="messages(id),nextPageToken").execute()
messages = []
if 'messages' in response:
messages.extend(response['messages'])
numPages_Processed = 0
if (allPages is True):
while 'nextPageToken' in response:
page_token = response['nextPageToken']
#response = service.users().messages().list(userId=userID, labelIds=labelList, pageToken=page_token, fields='messages(id)', maxResults=500).execute()
response = service.users().messages().list(userId=userID, labelIds=labelList, pageToken=page_token, maxResults=500, fields="messages(id),nextPageToken").execute()
if 'messages' in response:
messages.extend(response['messages'])
numPages_Processed = numPages_Processed + 1
if (numPages is not None) and numPages_Processed>=numPages:
break
if reverseOrder is True:
messages.reverse()
self.GLogger.info("Successfully searched emails with labelList (" + str(labelList)+ ") and userID (" +str(userID)+ "). Number of matching emails (" +str(len(messages))+ ")")
listToReturn = list()
for message in messages:
listToReturn.append(message['id'])
return listToReturn
except:
self.GLogger.error("An error was encounrtered while searching for messages with google API and label list")
tb = traceback.format_exc()
self.GLogger.exception(tb)
return False
Attachment Download:
def gmailAPIDownloadAttachments(self, messageID, message=None, userID="me"):
try:
service = self.gmailAPIService
self.GLogger.info("Attempting to download attachments from messageID (" +str(messageID)+ ")")
if message is None:
message = self.gmailAPIGetFullMessage(messageID, userID=userID)
if message is False:
self.GLogger.error("Failed to extract message (" +str(messageID)+ ") for downloading attachments")
return False
attachmentList = list()
payload = message['payload']
if 'parts' in payload:
parts = payload['parts']
for part in parts:
if part['filename']:
if 'data' in part['body']:
data = part['body']['data']
else:
att_id = part['body']['attachmentId']
att = service.users().messages().attachments().get(userId=userID, messageId=messageID, id=att_id).execute()
data = att['data']
file_data = base64.urlsafe_b64decode(data.encode('UTF-8'))
filename = part['filename']
extSearch = filename.find('.')
if extSearch == -1:
ext = ""
partFileName = filename[0:extSearch]
else:
ext = filename[extSearch+1:]
partFileName = filename[0:extSearch]
theAttachment = Attachment(filename,partFileName, ext, file_data)
attachmentList.append(theAttachment)
self.GLogger.info("Successfully downloaded attachments from messageID (" +str(messageID)+ ")")
return(attachmentList)
except:
self.GLogger.error("Encountered an error while attempting to download email attacments from messageID (" +str(messageID)+ ")")
tb = traceback.format_exc()
self.GLogger.exception(tb)
return False
Batch Requests, where searchResultParts is a list of lists, where each embedded list contains 100 message IDs:
for searchResultPart in searchResultParts:
batch = service.new_batch_http_request(callback=self.theEmailCallback)
for msgID in searchResultPart: #Loop through each messageID
request1 = service.users().messages().get(userId=userID,id=msgID)
batch.add(request=request1 ,request_id=msgID)
batch.execute(http=self.http_toUse)
There are several Python Gmail API functions that I use, and these are just a subset. However, all of them tend to produce the socket.timeout error at some point. Currently, my firewall is off (ufw is inactive).
I believe I am using these functions as intended and believe this issue lies with Google. Are there any Google team members here who could take a look into this? What can I do to resolve this issue? These socket.timeout errors are occurring so often that it is causing issues with my application.
My internet connection is fiber Gigabit for both upload and download.
Edit** I am now calling all of my Python Gmail API requests through this function. It will wait 50 ms (and then 50ms more each retry) and then retry until the request succeeds or until 10 retries have been made. The socket.timeout error is still prevalent, but through my current observations, it seems like most socket.timeout errors go through after 2 or 3 retries.
def executeGmailAPI_withretry(self, request):
try:
response_valid = False
num_retries = 0
while num_retries < 10:
try:
response = request.execute()
response_valid = True
break
except socket.timeout:
num_retries = num_retries + 1
time.sleep(0.05*num_retries)
except:
self.GLogger.error("An error was encounrtered in executeGmailAPI_withretry")
tb = traceback.format_exc()
self.GLogger.exception(tb)
num_retries = num_retries + 1
time.sleep(0.05*num_retries)
if response_valid is False:
return False
else:
return response
except:
self.GLogger.error("An error was encounrtered in executeGmailAPI_withretry")
tb = traceback.format_exc()
self.GLogger.exception(tb)
return False

Search haveibeenpwned for all emails on a domain

I am able to use haveibeenpwned to search for 1 account compromise. However, I could not find an option to use the API key to search for compromise of all the email accounts on a domain. (For example. if the domain is xyz.com, I want to search for the compromise of abc#xyz.com, peter.charlie#xyz.com and so on). I am aware of the notification email that I can sign up for. But, that is a lengthy process and I prefer using the API.
So, I wrote a script to search against haveibeenpwned for all the email address of my domain, but it takes very long. I searched through a couple of Github projects, but I did not find any such implementation. Has anyone tried this before?
I have added the code below. I am using Multi threading approach, but still it takes very long, is there any other Optimization strategy I can use? Please help. Thank you.
import requests, json
import threading
from time import sleep
import datetime
import splunklib.client as client
import splunklib.results as results
date = datetime.datetime.now()
from itertools import islice
import linecache
import sys
def PrintException():
exc_type, exc_obj, tb = sys.exc_info()
f = tb.tb_frame
lineno = tb.tb_lineno
filename = f.f_code.co_filename
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
print 'EXCEPTION IN ({}, LINE {} "{}"): {}'.format(filename, lineno, line.strip(), exc_obj)
class myThread (threading.Thread):
def __init__(self, threadID, name, list_emails):
threading.Thread.__init__(self)
self.threadID = threadID
self.name = name
self.list_emails = list_emails
def run(self):
i=0
print "Starting " + self.name
for email in self.list_emails:
print i
i=i+1
result = check_pasteaccount(email)
print email
print result
print result
print "Exiting " + self.name
def check_pasteaccount(account):
account = str(account)
result = ""
URL = "https://haveibeenpwned.com/api/v3/pasteaccount/%s?truncateResponse=false" % (account)
# print(URL)
headers= {'hibp-api-key':api_key}
result = ""
try:
r = requests.get(url=URL,headers=headers)
# sleep(2)
status_code = r.status_code
if status_code == 200:
data = r.text
result = []
for entry in json.loads(data.decode('utf8')):
if int((date - datetime.datetime.strptime(entry['Date'], '%Y-%m-%dT%H:%M:%SZ')).days) > 120:
pass
else:
result.append(['Title: {0}'.format(entry['Title']), \
'Source: {0}'.format(['Source']), \
'Paste ID: {0}'.format(entry['Id'])])
if len(result) == 0:
result = "No paste reported for given account and time frame."
else:
paste_result = ""
for entry in result:
for item in entry:
paste_result += str(item) + "\r\n"
paste_result += "\r\n"
result = paste_result
elif status_code == 404:
result = "No paste for the account"
else:
if status_code == 429:
sleep(5)
# print "Limit exceeded, sleeping"
result = check_pasteaccount(account)
else:
result = "Exception"
print status_code
except Exception as e:
result = "Exception"
PrintException()
pass
return result
def split_every(n, iterable):
iterable = iter(iterable)
for chunk in iter(lambda: list(islice(iterable, n)), []):
yield chunk
def main():
print datetime.datetime.now()
# Fetching the list of email addresses from Splunk
list_emails = connect_splunk()
print datetime.datetime.now()
i=0
list_split = split_every(1000,list_emails)
threads=[]
for list in list_split:
i=i+1
thread_name = "Thread" + str(i)
thread = myThread(1, thread_name, list)
thread.start()
threads.append(thread)
# Wait for all the threads to complete
for t in threads:
t.join()
print "Completed Search"
Here's a shorter and maybe more efficient version of your script using the standard multiprocessing library instead of a hand-rolled thread system.
You'll need Python 3.6+ since we're using f-strings.
You'll need to install the tqdm module for fancy progress bars.
You can adjust the number of concurrent requests with the pool size parameter.
Output is written in machine-readable JSON Lines format into a timestamped file.
A single requests session is shared (per-worker), which means less time spent connecting to HIBP.
import datetime
import json
import multiprocessing
import random
import time
import requests
import tqdm
HIBP_PARAMS = {
"truncateResponse": "false",
}
HIBP_HEADERS = {
"hibp-api-key": "xxx",
}
sess = requests.Session()
def check_pasteaccount(account):
while True:
resp = sess.get(
url=f"https://haveibeenpwned.com/api/v3/pasteaccount/{account}",
params=HIBP_PARAMS,
headers=HIBP_HEADERS,
)
if resp.status_code == 429:
print("Quota exceeded, waiting for a while")
time.sleep(random.uniform(3, 7))
continue
if resp.status_code >= 400:
return {
"account": account,
"status": resp.status_code,
"result": resp.text,
}
return {
"account": account,
"status": resp.status_code,
"result": resp.json(),
}
def connect_splunk():
# TODO: return emails
return []
def main():
list_emails = [str(account) for account in connect_splunk()]
datestamp = datetime.datetime.now().isoformat().replace(":", "-")
output_filename = f"accounts-log-{datestamp}.jsonl"
print(f"Accounts to look up: {len(list_emails)}")
print(f"Output filename: {output_filename}")
with multiprocessing.Pool(processes=16) as p:
with open(output_filename, "a") as f:
results_iterable = p.imap_unordered(
check_pasteaccount, list_emails, chunksize=20
)
for result in tqdm.tqdm(
results_iterable,
total=len(list_emails),
unit="acc",
unit_scale=True,
):
print(json.dumps(result, sort_keys=True), file=f)
if __name__ == "__main__":
main()

Lambda Gives an error { "errorMessage": "Process exited before completing request" }

Trying to execute this lambda function and getting error "{
"errorMessage": "RequestId: 6db7d67e-78e9-43e5-a325-09206e4514ac Process exited before completing request"
}
"
I am looking the script to notify the AWS IAM Users when their password and access keys expire.
from __future__ import print_function
import boto3
from botocore.exceptions import ClientError
import os
import json
import csv
from time import sleep
import datetime
import dateutil.parser
import sys
# These should be passed in via Lambda Environment Variables
try:
BLACKHOLE_GROUPNAME = os.environ['BLACKHOLE_GROUPNAME']
ACTION_TOPIC_ARN = os.environ['ACTION_TOPIC_ARN']
GRACE_PERIOD = int(os.environ['GRACE_PERIOD'])
DISABLE_USERS = os.environ['DISABLE_USERS']
SEND_EMAIL = os.environ['SEND_EMAIL']
FROM_ADDRESS = os.environ['FROM_ADDRESS']
EXPLANATION_FOOTER = os.environ['EXPLANATION_FOOTER']
EXPLANATION_HEADER = os.environ['EXPLANATION_HEADER']
except KeyError as e:
print("Key Error: " + e.message)
sys.exit(1)
# Define a Global String to be the report output sent to ACTION_TOPIC_ARN
ACTION_SUMMARY = ""
REPORT_SUMMARY = ""
print('Loading function')
if DISABLE_USERS == "true":
expired_message = "\n\tYour Password is {} days post expiration. Your permissions have been revoked. "
key_expired_message = "\n\tYour AccessKey ID {} is {} days post expiration. It has been deactivated. "
else:
expired_message = "\n\tYour Password is {} days post expiration. You must change your password or risk losing access. "
key_expired_message = "\n\tYour AccessKey ID {} is {} days post expiration. You must rotate this key or it will be deactivated. "
key_warn_message = "\n\tYour AccessKey ID {} is {} days from expiration. You must rotate this key or it will be deactivated. "
password_warn_message = "\n\tYour Password will expire in {} days"
email_subject = "Credential Expiration Notice From AWS Account: {}"
def lambda_handler(event, context):
print("Received event: " + json.dumps(event, sort_keys=True))
iam_client = boto3.client('iam')
try:
if event['source'] == "aws.iam" :
process_IAMEvent(event, context, iam_client)
else:
process_UsersCron(iam_client)
except KeyError as e:
# Probably called as a test event with out a source. This is what we want to do here.
process_UsersCron(iam_client)
return
def process_UsersCron(iam_client):
global ACTION_SUMMARY # This is what we send to the admins
global REPORT_SUMMARY
max_age = get_max_password_age(iam_client)
account_name = iam_client.list_account_aliases()['AccountAliases'][0]
credential_report = get_credential_report(iam_client)
# Iterate over the credential report, use the report to determine password expiration
# Then query for access keys, and use the key creation data to determine key expiration
for row in credential_report:
if row['password_enabled'] != "true": continue # Skip IAM Users without passwords, they are service accounts
message = "" # This is what we send to the user
if is_user_expired(row['user']) == 0:
# Process their password
password_expires = days_till_expire(row['password_last_changed'], max_age)
if password_expires <= 0:
REPORT_SUMMARY = REPORT_SUMMARY + "\n{}'s Password expired {} days ago".format(row['user'], password_expires * -1)
message = message + expired_message.format(password_expires * -1)
add_user_to_blackhole(row['user'], iam_client)
elif password_expires < GRACE_PERIOD :
message = message + password_warn_message.format(password_expires)
REPORT_SUMMARY = REPORT_SUMMARY + "\n{}'s Password Will expire in {} days".format(row['user'], password_expires)
try:
# Process their Access Keys
response = iam_client.list_access_keys( UserName=row['user'] )
for key in response['AccessKeyMetadata'] :
if key['Status'] == "Inactive" : continue
key_expires = days_till_expire(key['CreateDate'], max_age)
if key_expires <= 0:
message = message + key_expired_message.format(key['AccessKeyId'], key_expires * -1)
disable_users_key(key['AccessKeyId'], row['user'], iam_client)
REPORT_SUMMARY = REPORT_SUMMARY + "\n {}'s Key {} expired {} days ago ".format(row['user'], key['AccessKeyId'], key_expires * -1 )
elif key_expires < GRACE_PERIOD:
message = message + key_warn_message.format(key['AccessKeyId'], key_expires)
REPORT_SUMMARY = REPORT_SUMMARY + "\n {}'s Key {} will expire {} days from now ".format(row['user'], key['AccessKeyId'], key_expires)
except ClientError as e:
continue
# Email user if necessary
if message != "":
email_user(row['user'], message, account_name)
# All Done. Send a summary to the ACTION_TOPIC_ARN, and print one out for the Lambda Logs
print("Action Summary:" + ACTION_SUMMARY)
if ACTION_SUMMARY != "": send_summary()
if REPORT_SUMMARY != "": email_user(FROM_ADDRESS, REPORT_SUMMARY, account_name )
return
def is_user_expired(username):
client = boto3.client('iam')
try:
response = client.list_groups_for_user(UserName=username)
except ClientError as e:
return 1
for group in response['Groups'] :
if group['GroupName'] == BLACKHOLE_GROUPNAME:
return 1
return 0
def email_user(email, message, account_name):
global ACTION_SUMMARY # This is what we send to the admins
if SEND_EMAIL != "true": return # Abort if we're not supposed to send email
if message == "": return # Don't send an empty message
client = boto3.client('ses')
body = EXPLANATION_HEADER + "\n" + message + "\n\n" + EXPLANATION_FOOTER
try:
response = client.send_email(
Source=FROM_ADDRESS,
Destination={ 'ToAddresses': [ email ] },
Message={
'Subject': { 'Data': email_subject.format(account_name) },
'Body': { 'Text': { 'Data': body } }
}
)
ACTION_SUMMARY = ACTION_SUMMARY + "\nEmail Sent to {}".format(email)
return
except ClientError as e:
print("Failed to send message to {}: {}".format(email, e.message))
ACTION_SUMMARY = ACTION_SUMMARY + "\nERROR: Message to {} was rejected: {}".format(email, e.message)
def days_till_expire(last_changed, max_age):
# Ok - So last_changed can either be a string to parse or already a datetime object.
# Handle these accordingly
if type(last_changed) is str:
last_changed_date=dateutil.parser.parse(last_changed).date()
elif type(last_changed) is datetime.datetime:
last_changed_date=last_changed.date()
else:
# print("last_changed", last_changed)
# print(type(last_changed))
return -99999
expires = (last_changed_date + datetime.timedelta(max_age)) - datetime.date.today()
return(expires.days)
# Request the credential report, download and parse the CSV.
def get_credential_report(iam_client):
resp1 = iam_client.generate_credential_report()
if resp1['State'] == 'COMPLETE' :
try:
response = iam_client.get_credential_report()
credential_report_csv = response['Content']
# print(credential_report_csv)
reader = csv.DictReader(credential_report_csv.splitlines())
# print(reader.fieldnames)
credential_report = []
for row in reader:
credential_report.append(row)
return(credential_report)
except ClientError as e:
print("Unknown error getting Report: " + e.message)
else:
sleep(2)
return get_credential_report(iam_client)
# Query the account's password policy for the password age. Return that number of days
def get_max_password_age(iam_client):
try:
response = iam_client.get_account_password_policy()
return response['PasswordPolicy']['MaxPasswordAge']
except ClientError as e:
print("Unexpected error in get_max_password_age: %s" + e.message)
# if called by an IAM Event, do stuff. Not yet implemented
def process_IAMEvent(event, context, iam_client):
api_call = event['detail']['eventName']
if api_call == "CreateLoginProfile" :
process_CreateLoginProfile(event,context)
return 0
elif api_call == "EnableMFADevice" :
process_EnableMFADevice(event,context)
return 0
elif api_call == "DeactivateMFADevice" :
process_DeactivateMFADevice(event,context)
return 0
else:
raise Exception("Invalid API Call: " + api_call)
# Add the user to the group that only allows them to reset their password
def add_user_to_blackhole(username, iam_client):
if DISABLE_USERS != "true": return
global ACTION_SUMMARY
ACTION_SUMMARY = ACTION_SUMMARY + "\nAdding {} to Blackhole Group".format(username)
response = iam_client.add_user_to_group(
GroupName=os.environ['BLACKHOLE_GROUPNAME'],
UserName=username
)
if response['ResponseMetadata']['HTTPStatusCode'] != 200:
handle_error("Adding User to Blackhole Group", username, response['ResponseMetadata'])
else:
return 0
# Turn off the specified user's key by setting it to inactive.
def disable_users_key(AccessKeyId, UserName, iam_client):
if DISABLE_USERS != "true": return
global ACTION_SUMMARY
ACTION_SUMMARY = ACTION_SUMMARY + "\nDisabling AccessKeyId {} for user {}".format(AccessKeyId, UserName)
response = iam_client.update_access_key(
UserName=UserName,
AccessKeyId=AccessKeyId,
Status='Inactive'
)
if response['ResponseMetadata']['HTTPStatusCode'] != 200:
handle_error("Adding User to Blackhole Group", username, response['ResponseMetadata'])
else:
return 0
# Not used, but would remove the user from the blackhole group once they did change their password
def remove_user_from_blackhole(username, iam_client):
response = iam_client.remove_user_from_group(
GroupName=os.environ['BLACKHOLE_GROUPNAME'],
UserName=username
)
if response['ResponseMetadata']['HTTPStatusCode'] != 200:
handle_error("Removing User from Blackhole Group", username, response['ResponseMetadata'])
else:
return 0
def handle_error(action, username, ResponseMetadata):
raise Exception("ERROR" + action + " User: " + username + " Details: " + ResponseMetadata)
# Send the Summary of actions taken to the SNS topic
def send_summary():
global ACTION_SUMMARY
client = boto3.client('sns')
message = "The following Actions were taken by the Expire Users Script at {}: ".format( datetime.datetime.now() ) + ACTION_SUMMARY
response = client.publish(
TopicArn=ACTION_TOPIC_ARN,
Message=message,
Subject="Expire Users Report for {}".format(datetime.date.today())
)
Sorry, if its repeated. Couldn't find a solution. If someone has script to notify IAM Users about their password expiry, that would be fine as well.
Thank You.
Have you checked the memory, if you ran out of memory you are also getting that message.

Categories