Adding the SNS topic to python script to send email - python

I have a python script which in gives the IAM user names in AWS with their last login.I'm getting the expected result. now i have added the SNS topic to send the result via email.
But the response i had received in email is as below
Response:
User not logged into AWS:
u
s
e
r
n
a
m
e
:
i need this in single line as Username: "*****"
PhysicalString =''
for user in resource.users.all():
if user.password_last_used is not None:
delta = (today - user.password_last_used.replace(tzinfo=None)).days
final_result=("username: ", [user.user_name][0]," - ",delta , "days")
physicalString = 'User not logged into AWS: \n\n' + '\n'.join(str(final_result))
response = sns.publish(
TopicArn='*************',
Message= physicalString,
Subject='IAM USER',
)
return final_result

This:
'\n'.join(str(final_result))
is saying "take each character in final_result and put a \n between them.
The problem is that final_result is being converted to a string, so the join is operating on each character rather than each element.
Try:
'\n'.join(final_result)
but you will need to convert delta to str(delta) before storing it in final_result.

Related

I want help regarding tweepy hiding your username

So here is what I am trying to do I am trying to get my Twitter bot to give maths answers to users using WolframAlpha API
here is what problem I am facing
as people will mention my Twitter username to active the bot example: #twitterusername 2+2
the WolframAlpha will take it as the whole input #twitterusername 2+2 which will give me the error I want it to ignore the username
here is my code
def respondToTweet(file='tweet_ID.txt'):
last_id = get_last_tweet(file)
mentions = api.mentions_timeline(last_id, tweet_mode='extended')
if len(mentions) == 0:
return
new_id = 0
logger.info("someone mentioned me...")
for mention in reversed(mentions):
logger.info(str(mention.id) + '-' + mention.full_text)
new_id = mention.id
status = api.get_status(mention.id)
if '#Saketltd01' in mention.full_text.lower():
logger.info("Responding back with QOD to -{}".format(mention.id))
client = wolframalpha.Client(app_id)
query = mention.full_text.lower()
rest = client.query(query)
answer = next(rest.results).text
Wallpaper.get_wallpaper(answer)
media = api.media_upload("created_image.png")
logger.info("liking and replying to tweet")
api.create_favorite(mention.id)
api.update_status('#' + mention.user.screen_name, mention.id,
media_ids=[media.media_id])
put_last_tweet(file, new_id)
def main():
respondToTweet()
When you take the whole input remember to strip it down by simply removing your username from the actual input string and then perform the mathematical operation on it:
myUsername = "#my_username"
equation = userInput.lstrip(myUsername)
perform_desired_operation_on(equation) // User defined function

Code times out when trying to run as a lambda function in AWS

Below is my code and I am hoping someone can help me with the cleaning up the code and making it more effiencient. Basically, the code should iterate through all the volumes in my AWS account and then list all untagged volumes and then send out an email. However, it times out when running it as a lambda function in AWS but if i run it locally, it will take over 30 mins to complete (however it does complete). Im sure its iterating through things it doesnt need.
Also if I print the ec2_instances list, I can see duplicate values, so I want to only have unique values so that its not repeating the script for each ec2 instance.
import logging
import boto3
from smtplib import SMTP, SMTPException
from email.mime.text import MIMEText
logger = logging.getLogger()
logger.setLevel(logging.INFO)
session = boto3.Session(profile_name="prod")
client = session.client('ec2')
untagged_volumes = []
detached_volumes = []
ec2_instances = []
response = client.describe_volumes()
for volume in response['Volumes']:
if 'Tags' in str(volume):
continue
else:
if 'available' in str(volume):
detached_volumes.append(volume['VolumeId'])
else:
untagged_volumes.append(volume['VolumeId'])
untagged_volumes.append(volume['Attachments'][0]['InstanceId'])
ec2_instances.append(volume['Attachments'][0]['InstanceId'])
unique_instances = list(set(ec2_instances))
# Create the msg body.
msg_body_list = []
for instance in unique_instances:
desc_instance = client.describe_instances()
# append to the msg_body_list the lines that we would like to show on the email
msg_body_list.append("VolumeID: {}".format(desc_instance['Reservations'][0]['Instances'][0]['BlockDeviceMappings'][0]['Ebs']['VolumeId']))
msg_body_list.append("Attached Instance: {}".format(desc_instance['Reservations'][0]['Instances'][0]['InstanceId']))
# if there are tags, we will append it as singles lines as far we have tags
if 'Tags' in desc_instance['Reservations'][0]['Instances'][0]:
msg_body_list.append("Tags:")
for tag in desc_instance['Reservations'][0]['Instances'][0]['Tags']:
msg_body_list.append(" Key: {} | Value: {}".format(tag['Key'], tag['Value']))
# in case we don't have tags, just append no tags.
else:
msg_body_list.append("Tags: no tags")
msg_body_list.append("--------------------")
# send email
mail_from = "xxx#xxx.com"
mail_to = 'xxx#xxx.com'
msg = MIMEText("\n".join(msg_body_list))
msg["Subject"] = "EBS Tagged Instance Report for"
msg["From"] = mail_from
msg["To"] = mail_to
try:
server = SMTP('xxx.xxx.xxx.xxx', 'xx')
server.sendmail(mail_from, mail_to.split(','), msg.as_string())
server.quit()
print('Email sent')
except SMTPException:
print('ERROR! Unable to send mail')
Lambda functions have a time limit of 15 minutes. That is the reason for the timeout - if you need to run scripts for longer, look up AWS Fargate.

Python Error not reading config.cfg?

Traceback (most recent call last):
File "/Users/jondevereux/Desktop/Data reporting/kpex_code/1PD/api_python_publisher_1PD.py", line 40, in <module>
username = parser.get('api_samples', 'username')
File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/ConfigParser.py", line 607, in get
raise NoSectionError(section)
ConfigParser.NoSectionError: No section: 'api_samples'
The config file is in the correct directory (same as .py and has the appropriate section api_samples:
[api_samples]
authentication_url = https://crowdcontrol.lotame.com/auth/v1/tickets
api_url = https://api.lotame.com/2/
username = xxx
password = xxx
Script works on co-workers PC not on mine? I had to use pip to install requests - i'm wondering I i'm missing something else?
Code is as follows:
# Set up the libs we need
import requests
import sys
import csv
import json
from ConfigParser import SafeConfigParser # used to get information from a config file
reload(sys)
sys.setdefaultencoding('utf-8')
'''
Now let's get what we need from our config file, including the username and password
We are assuming we have a config file called config.config in the same directory
where this python script is run, where the config file looks like:
[api_samples]
authentication_url = https://crowdcontrol.lotame.com/auth/v1/tickets
api_url = https://api.lotame.com/2/
username = USERNAME_FOR_API_CALLS
password = PASSWORD
'''
# Set up our Parser and get the values - usernames and password should never be in code!
parser = SafeConfigParser()
parser.read('config.cfg')
username = parser.get('api_samples', 'username')
password = parser.get('api_samples', 'password')
authentication_url = parser.get('api_samples', 'authentication_url')
base_api_url = parser.get('api_samples', 'api_url')
# OK, all set with our parameters, let's get ready to make our call to get a Ticket Granting Ticket
# Add the username and password to the payload (requests encodes for us, no need to urlencode)
payload = {'username': username,
'password': password}
# We want to set some headers since we are going to post some url encoded params.
headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain", "User-Agent":"python" }
# Now, let's make our Ticket Granting Ticket request. We get the location from the response header
tg_ticket_location = requests.post(authentication_url, data=payload).headers['location']
# Let's take a look at what a Ticket Granting Ticket looks like:
# print ('Ticket Granting Ticket - %s \n') % (tg_ticket_location[tg_ticket_location.rfind('/') + 1:])
# Now we have our Ticket Granting Ticket, we can get a service ticket for the service we want to call
# The first service call will be to get information on behavior id 5990.
# service_call = base_api_url + 'behaviors/5990'
# Add the service call to the payload and get the ticket
#payload = {'service': service_call}
#service_ticket = requests.post( tg_ticket_location, data=payload ).text
# Let's take a look at the service ticket
#print ('Here is our Service Ticket - %s \n') % ( service_ticket )
'''
Now let's make our call to the service ... remember we need to be quick about it because
we only have 10 seconds to do it before the Service Ticket expires.
A couple of things to note:
JSON is the default response, and it is what we want, so we don't need to specify
like {'Accept':'application/json'}, but we will anyway because it is a good practice.
We don't need to pass any parameters to this call, so we just add the parameter
notation and then 'ticket=[The Service Ticet]'
'''
headers = {'Accept':'application/json'}
#behavior_info = requests.get( ('%s?ticket=%s') % (service_call, service_ticket), headers=headers)
# Let's print out our JSON to see what it looks like
# requests support JSON on it's own, so not other package needed for this
# print ('Behavior Information: \n %s \n') % (behavior_info.json() )
'''
Now let's get the names and IDs of some audiences
We can reuse our Ticket Granting Ticket for a 3 hour period ( we haven't passed that yet),
so let's use it to get a service ticket for the audiences service call.
Note that here we do have a parameter that is part of the call. That needs to be included
in the Service Ticket request.
We plan to make a call to the audience service to get the first 10 audiences in the system
ascending by audience id. We don't need to pass the sort order, because it defaults to ascending
'''
# Set up our call and get our new Service Ticket, we plan to sort by id
# Please insert audiences ID below:
audienceids = ['243733','243736','241134','242480','240678','242473','242483','241119','243732','242492','243784','242497','242485','243785','242486','242487','245166','245167','245168','245169','245170','245171','240860']
f = open("publisher_report_1PD.csv", 'w+')
title_str = ['1PD % Contribution','audienceId','publisherName','audienceName']
print >> f,(title_str)
for audience_id in audienceids:
service_call = base_api_url + 'reports/audiences/' + audience_id + '/publisher?stat_interval=LAST_MONTH&page_count=100&page_num=1&sort_attr=audienceName&inc_network=false&sort_order=ASC'
payload = {'service': service_call}
# Let's get the new Service Ticket, we can print it again to see it is a new ticket
service_ticket = requests.post( tg_ticket_location, data=payload ).text
#print ('Here is our new Service Ticket - %s \n') % ( service_ticket )
# Use the new ticket to query the service, remember we did have a parameter this time,
# so we need to & 'ticket=[The Service Ticket]' to the parameter list
audience_list = requests.get( ('%s&ticket=%s') % (service_call, service_ticket)).json()
#print audience_list
# create an array to hold the audiences, pull ou the details we want, and print it out
audiences = []
for ln in audience_list['stats']:
audiences.append({ 'audienceId': ln['audienceId'], 'audienceName': ln['audienceName'], 'publisherName': ln['publisherName'], '1PD % Contribution': ln['percentOfAudience']})
for ii in range( 0, len(audiences) ):
data = audiences[ii]
data_str = json.dumps(data)
result = data_str.replace("\"","")
result1 = result.replace("{1PD % Contribution:","")
result2 = result1.replace("publisherName: ","")
result3 = result2.replace("audienceName: ","")
result4 = result3.replace("audienceId: ","")
result5 = result4.replace("}","")
print >> f,(result5)
# Once we are done with the Ticket Granting Ticket we should clean it up'
remove_tgt = requests.delete( tg_ticket_location )
print ( 'Status for closing TGT - %s') % (remove_tgt.status_code)
i = input('YAY! Gotcha!!')
I see only one reason for your problem: you run script from different folder and then script is looking for config.cfg in different folder.
You can get full path to folder with script
import os
script_folder = os.path.dirname(os.path.realpath(__file__))
and create full path to config.cfg
parser.read( os.path.join(script_folder, 'config.cfg') )

Send an email only if # isn't in csv

I have two CSV files, one with IOC hits and a second that is a watchfile. The watchfile adds an # to the file along with the IOC domain and last seen date. I'm trying to send one email when an IOC hit for that day, but I can't seem to get my loop right. Currently it emails every time, even though the # is present in the watchfile.csv. I've printed the values for val and emailed and they show up in the correct format, but it still emails every time the script is ran.
finIOChit.csv
last: 2017-01-17 query: rabbitons.pw,
watchfile.csv
last: 2017-01-17 query: # rabbitons.pw,
import smtplib
import csv
import os
import re
from datetime import *
today = date.today()
today = datetime.combine(today, datetime.min.time())
# Setup email alerting
sender = 'server#company.com'
receivers = ['user#company.com']
patn = re.compile('20\d{2}-\d{2}-\d{2}')
watchfile = open('watchfile.csv', 'r+w')
alreadyemailed = re.compile('#')
with open('finalIOChit.csv') as finalhit:
for hit in finalhit:
for line in watchfile:
emailed = alreadyemailed.findall(line)
for match in patn.findall(hit):
val = datetime.strptime(match, '%Y-%m-%d')
if val == today and emailed != '#':
hit = re.sub('query: ','query: # ',hit)
watchfile.write(hit)
message = """From:server <server#comapny.com>
To: user <user#company.com>
Subject: Passive DNS hit
"""
subject = ' ' + str(hit)
messagefull = message + subject
try:
smtpObj = smtplib.SMTP('emailserver')
smtpObj.sendmail(sender, receivers, messagefull)
except SMTPException:
print "Error: unable to send email"

How to deal with flooded unseen messages

I have written an email parsing mechanism in python.
It finds a new email and passes the data correctly. I am 99.999% certain that my code is functioning correctly, so there should be no issue there. The problem is that occasionally, the Gmail inbox will get flooded with messages that are considered "unseen". At this point, there is nothing that my code can do.
It fails with:
imaplib.error: FETCH command error: BAD ['Could not parse command']
This is distressing, and I would love to have either
a way to check whether the unseen messages have overflown to this state, or
a way to manually (via imaplib) mark all messages as read, including a way to detect this particular error.
Any thoughts on how to accomplish this?
Here is my code:
#!/usr/bin/env python
import imaplib, re, sys, time, OSC, threading, os
iparg = 'localhost'
oportarg = 9000
iportarg = 9002
usern = 'myusrname#gmail.com'
gpass = 'mypass'
kill_program = False
server = imaplib.IMAP4_SSL('imap.googlemail.com', 993)
oclient = OSC.OSCClient()
email_interval = 2.0
def login():
server.login(usern, gpass)
oclient.connect((iparg, oportarg))
def logout_handle(addr, tags, stuff, source):
print 'received kill call'
global kill_program
kill_program = True
def filter_signature(s): #so annoying; wish i didn't have to do this
try:
a_sig = re.sub(r'Sent|--Sent', '', s)
b_sig = re.sub(r'using SMS-to-email. Reply to this email to text the sender back and', '', a_sig)
c_sig = re.sub(r'save on SMS fees.', '', b_sig)
d_sig = re.sub(r'https://www.google.com/voice', '', c_sig)
no_lines = re.sub(r'\n|=|\r?', '', d_sig) #add weird characters to this as needed
except:
nolines = s
return no_lines
def parse_email(interval):
while True:
server.select('INBOX')
status, ids = server.search(None, 'UnSeen')
print 'status is: ', status
if not ids or ids[0] is '':
print 'no new messages'
else:
try:
print 'found a message; attempting to parse...'
latest_id = ids[0]
status, msg_data = server.fetch(latest_id, '(UID BODY[TEXT])')
raw_data = msg_data[0][1]
raw_filter = raw_data
print 'message result: ', raw_filter
time.sleep(interval)
#execute main block
while not kill_program:
login()
parse_email(email_interval)
st.kill()
sys.exit()
Based upon the error, I would very carefully check the parameters that you're passing to fetch. Gmail is telling you that it could not parse the command that you sent to it.
Also, you can do a STORE +FLAGS \SEEN to mark the messages as read.

Categories