I am reading and creating the calendar event for set of emails through Google calendar API. Now If I give one email id is wrong it's throwing an error .
googleapiclient.errors.HttpError: <HttpError 404 when requesting https://www.goo
gleapis.com/calendar/v3/calendars/xxx%40gmail.com/events?timeMin=2019-1
2-18T00%3A00%3A00%2B05%3A30&maxResults=240&timeMax=2019-12-18T23%3A59%3A00%2B05%
3A30&singleEvents=true&orderBy=startTime&alt=json returned "Not Found">
I can understand there is wrong in the email and I am getting this error. But I want to handle this exception,like if my email is wrong also it should skip the wrong email and it should go further and display the proper result.
What I tried is
from googleapiclient.errors import HttpError
def my_funtion():
try:
----
-----
except HttpError as err:
print("The exception is",err)
finally:
return "I am returning whatever i get it from try
Is it correct try catch block?
for the above code I am getting the same googleclientapi error,It's not going inside the excpet block
What I expect here is
It should go to the try block,if one of the email id is wrong,it should skip the email id and it should return the result of whatever is getting fetched from the try block.
I can say it should omit the apiclient but and return the result.
#for calendar_id in calendar_ids:
eventsResult = service.events().list(calendarId=["a#gmail.com","b#gmail.com","c#gmail.com"],timeMin=start_date,timeMax=end_date,singleEvents=True,orderBy='startTime').execute()
events = eventsResult.get('items', [])
if not events:
print('No upcoming events found.')
print(events)
while True:
for event in events.get('items', []):
print(event['summary'])
page_token = events.get('nextPageToken') #check if any event present in next page of the calendar
if page_token:
events = service.events().list(calendarId='primary', pageToken=page_token).execute()
else:
break
for calendar_id in calendar_ids:
count = 0
print('\n----%s:\n' % calendar_id)
try:
eventsResult = service.events().list(
calendarId=calendar_id,
timeMin=start_date,
timeMax=end_date,
singleEvents=True,
orderBy='startTime').execute()
events = eventsResult.get('items', [])
if not events:
print('No upcoming events found.')
for event in events:
if 'summary' in event:
if 'PTO' in event['summary']:
count += 1
start = event['start'].get(
'dateTime', event['start'].get('date'))
print(start, event['summary'])
except exception as err:
print("I am executing",err)
finally:
print('Total days off for %s is %d' % (calendar_id, count))```
I have got the answer for this post. I have used 'pass' in exception block and it worked well.Thanks
Related
I am trying to follow the guide here to automate the rotation of keys for IAM users- https://awsfeed.com/whats-new/apn/automating-rotation-of-iam-user-access-and-secret-keys-with-aws-secrets-manager
Essentially I'm wanting to get new keys every 60 days, deactivate the old keys every 80 days, and then delete/remove old keys every 90 days.
I have slightly modified it to get new keys every 60 days instead of 90 and here is the lambda function:
import json
import boto3
import base64
import datetime
import os
from datetime import date
from botocore.exceptions import ClientError
iam = boto3.client('iam')
secretmanager = boto3.client('secretsmanager')
#IAM_UserName=os.environ['IAM_UserName']
#SecretName=os.environ['SecretName']
def create_key(uname):
try:
IAM_UserName=uname
response = iam.create_access_key(UserName=IAM_UserName)
AccessKey = response['AccessKey']['AccessKeyId']
SecretKey = response['AccessKey']['SecretAccessKey']
json_data=json.dumps({'AccessKey':AccessKey,'SecretKey':SecretKey})
secmanagerv=secretmanager.put_secret_value(SecretId=IAM_UserName,SecretString=json_data)
emailmsg="New "+AccessKey+" has been create. Please get the secret key value from secret manager"
ops_sns_topic ='arn:aws:sns:us-east-1:redacted'
sns_send_report = boto3.client('sns',region_name='us-east-1')
sns_send_report.publish(TopicArn=ops_sns_topic, Message=emailmsg, Subject="New Key created for user"+ IAM_UserName)
except ClientError as e:
print (e)
def deactive_key(uname):
try:
#GET PREVIOUS AND CURRENT VERSION OF KEY FROM SECRET MANAGER
IAM_UserName=uname
getpresecvalue=secretmanager.get_secret_value(SecretId=IAM_UserName,VersionStage='AWSPREVIOUS')
#getcursecvalue=secretmanager.get_secret_value(SecretId='secmanager3',VersionStage='AWSCURRENT')
#print (getpresecvalue)
#print (getcursecvalue)
preSecString = json.loads(getpresecvalue['SecretString'])
preAccKey=preSecString['AccessKey']
#GET CREATION DATE OF CURRENT VERSION OF ACCESS KEY
#curdate=getcursecvalue['CreatedDate']
#GET TIMEZONE FROM CREATION DATE
#tz=curdate.tzinfo
#CALCULATE TIME DIFFERENCE BETWEEN CREATION DATE AND TODAY
#diff=datetime.datetime.now(tz)-curdate
#diffdays=diff.days
#print (curdate)
#print (tz)
#print (diffdays)
#print (preAccKey)
#IF TIME DIFFERENCE IS MORE THAN x NUMBER OF DAYS THEN DEACTIVATE PREVIOUS KEY AND SEND A MESSAGE
#if diffdays >= 1:
iam.update_access_key(AccessKeyId=preAccKey,Status='Inactive',UserName=IAM_UserName)
emailmsg="PreviousKey "+preAccKey+" has been disabled for IAM User"+IAM_UserName
ops_sns_topic ='arn:aws:sns:us-east-1:redacted'
sns_send_report = boto3.client('sns',region_name='us-east-1')
sns_send_report.publish(TopicArn=ops_sns_topic, Message=emailmsg, Subject='Previous Key Deactivated')
return
except ClientError as e:
print (e)
#else:
# print ("Current Key is not older than 10 days")
#print (datediff)
def delete_key(uname):
try:
IAM_UserName=uname
print (IAM_UserName)
getpresecvalue=secretmanager.get_secret_value(SecretId=IAM_UserName,VersionStage='AWSPREVIOUS')
#getcursecvalue=secretmanager.get_secret_value(SecretId='secmanager3',VersionStage='AWSCURRENT')
preSecString = json.loads(getpresecvalue['SecretString'])
preAccKey=preSecString['AccessKey']
#print (preAccKey)
#GET CREATION DATE OF CURRENT VERSION OF ACCESS KEY
#curdate=getcursecvalue['CreatedDate']
#GET TIMEZONE FROM CREATION DATE
#tz=curdate.tzinfo
#CALCULATE TIME DIFFERENCE BETWEEN CREATION DATE AND TODAY
#diff=datetime.datetime.now(tz)-curdate
#diffdays=diff.days
#IF TIME DIFFERENCE IS MORE THAN x NUMBER OF DAYS THEN DEACTIVATE PREVIOUS KEY AND SEND A MESSAGE
#if diffdays >= 1:
keylist=iam.list_access_keys (UserName=IAM_UserName)
#print (keylist)
for x in range(2):
prevkeystatus=keylist['AccessKeyMetadata'][x]['Status']
preacckeyvalue=keylist['AccessKeyMetadata'][x]['AccessKeyId']
print (prevkeystatus)
if prevkeystatus == "Inactive":
if preAccKey==preacckeyvalue:
print (preacckeyvalue)
iam.delete_access_key (UserName=IAM_UserName,AccessKeyId=preacckeyvalue)
emailmsg="PreviousKey "+preacckeyvalue+" has been deleted for user"+IAM_UserName
ops_sns_topic ='arn:aws:sns:us-east-1:redacted'
sns_send_report = boto3.client('sns',region_name='us-east-1')
sns_send_report.publish(TopicArn=ops_sns_topic, Message=emailmsg, Subject='Previous Key has been deleted')
return
else:
print ("secret manager previous value doesn't match with inactive IAM key value")
else:
print ("previous key is still active")
return
except ClientError as e:
print (e)
#else:
#print ("Current Key is not older than 10 days")
def lambda_handler(event, context):
# TODO implement
faction=event ["action"]
fuser_name=event ["username"]
if faction == "create":
status = create_key(fuser_name)
print (status)
elif faction == "deactivate":
status = deactive_key(fuser_name)
print (status)
elif faction == "delete":
status = delete_key(fuser_name)
print (status)
when testing the function I get the below error message:
Response
{
"errorMessage": "'action'",
"errorType": "KeyError",
"stackTrace": [
[
"/var/task/lambda_function.py",
108,
"lambda_handler",
"faction=event [\"action\"]"
]
]
}
Function Logs
START RequestId: 45b13b13-e992-40fe-b2e8-1f2cc53a86e5 Version: $LATEST
'action': KeyError
Traceback (most recent call last):
File "/var/task/lambda_function.py", line 108, in lambda_handler
faction=event ["action"]
KeyError: 'action'
I have the following policies on the role and group for my user:
IAMReadOnlyAccess
AmazonSNSFullAccess
and a custom policy with the following actions:
"iam:ListUsers",
"iam:CreateAccessKey",
"iam:DeleteAccessKey",
"iam:GetAccessKeyLastUsed",
"iam:GetUser",
"iam:ListAccessKeys",
"iam:UpdateAccessKey"
My EventBridge has the constant (JSON text) as {"action":"create","username":"secmanagert3"}
Looking to see why I keep getting errors on the lambda handler
Edit:
After printing out the environment variables and even, I have these function logs:
Function Logs
START RequestId: c5cabedf-d806-4ca5-a8c6-1ded84c39a39 Version: $LATEST
## ENVIRONMENT VARIABLES
environ({'PATH': '/var/lang/bin:/usr/local/bin:/usr/bin/:/bin:/opt/bin', 'LD_LIBRARY_PATH': '/var/lang/lib:/lib64:/usr/lib64:/var/runtime:/var/runtime/lib:/var/task:/var/task/lib:/opt/lib', 'LANG': 'en_US.UTF-8', 'TZ': ':UTC', '_HANDLER': 'lambda_function.lambda_handler', 'LAMBDA_TASK_ROOT': '/var/task', 'LAMBDA_RUNTIME_DIR': '/var/runtime', 'AWS_REGION': 'us-east-2', 'AWS_DEFAULT_REGION': 'us-east-2', 'AWS_LAMBDA_LOG_GROUP_NAME': '/aws/lambda/AutomatedKeyRotation', 'AWS_LAMBDA_LOG_STREAM_NAME': '2021/10/14/[$LATEST]7f05c89773e240788lda232ec5dh8hg04', 'AWS_LAMBDA_FUNCTION_NAME': 'AutomatedKeyRotation', 'AWS_LAMBDA_FUNCTION_MEMORY_SIZE': '128', 'AWS_LAMBDA_FUNCTION_VERSION': '$LATEST', '_AWS_XRAY_DAEMON_ADDRESS': 'xxx.xxx.xx.xxx', '_AWS_XRAY_DAEMON_PORT': '2000', 'AWS_XRAY_DAEMON_ADDRESS': 'xxx.xxx.xx.xxx:2000', 'AWS_XRAY_CONTEXT_MISSING': 'LOG_ERROR', '_X_AMZN_TRACE_ID': 'Root=1-61686a72-0v9fgta25cb9ca19568ae978;Parent=523645975780233;Sampled=0', 'AWS_EXECUTION_ENV': 'AWS_Lambda_python3.6', 'AWS_LAMBDA_INITIALIZATION_TYPE': 'on-demand', 'AWS_ACCESS_KEY_ID': 'key-id-number', 'AWS_SECRET_ACCESS_KEY': 'top-secret-key', 'AWS_SESSION_TOKEN': 'very-long-token', 'PYTHONPATH': '/var/runtime'})
## EVENT
{'key1': 'value1', 'key2': 'value2', 'key3': 'value3'}
'action': KeyError
Traceback (most recent call last):
File "/var/task/lambda_function.py", line 112, in lambda_handler
faction=event["action"]
KeyError: 'action'
As you can see from the log file, your event doesn't have action and username variables. That's why you're getting the KeyError.
The problem is that you are testing this by running a test from the Lambda function, and not through the Cloudwatch. To solve this:
In your Lambda function, open the "Test" tab. There, you can see what your event looks like. You can either manually change it, to add the values you need in the JSON, or you can choose from given templates (among others, there's Cloudwatch as a template). Once you added action and username to the JSON, it won't throw this error
You can create a Cloudwatch event, as instructed in the post that you shared, and invoke that event. That way, you will see exactly what the event will look like when you actually invoke it in production.
I have created a module for a Bacnet scan and it will respond with a list of devices and its address as a result. But I'm having trouble implementing a direct method handler in python. When i first tried implementing it myself i got this error. Which could mean I didn't successfully register the direct method callback. I have some references but it was from C# and azure docs is not helping me figure out the right method to register the callback. for IoTHubModuleClient there's a on_method_request_received and a receive_method_request. appreciate any help!
def iothub_client_scan_run():
try:
iot_client = iothub_client_init()
bacnet_scan_listener_thread = threading.Thread(target=device_method_listener, args=(iot_client,))
bacnet_scan_listener_thread.daemon = True
bacnet_scan_listener_thread.start()
while True:
time.sleep(1000)
def device_method_listener(iot_client):
while True:
# Receive the direct method request
method_request = iot_client.receive_method_request()
print (
"\nMethod callback called with:\nmethodName = {method_name}\npayload = {payload}".format(
method_name=method_request.name,
payload=method_request.payload
)
)
if method_request.name == "runBacnetScan":
response = bacnet_scan_device(method_request)
else:
response_payload = {"Response": "Direct method {} not defined".format(method_request.name)}
response_status = 404
# Send a method response indicating the method request was resolved
print('Sending method response')
iot_client.send_method_response(response)
print('Message sent!')
Edit:
Here is my route config
I was able to resolve my issue or at least find the root cause and it was my network configuration under the createOptions. It seems like there's an issue when I'm trying to do NetworkMode: host and connects to the IotModuleClient.connect_from_edge_environment via connect with connection string. I'm still trying to tweak the connection configuration but at least i know its not on the code.
async def method_request_handler(module_client):
while True:
method_request = await module_client.receive_method_request()
print (
"\nMethod callback called with:\nmethodName = {method_name}\npayload = {payload}".format(
method_name=method_request.name,
payload=method_request.payload
)
)
if method_request.name == "method1":
payload = {"result": True, "data": "some data"} # set response payload
status = 200 # set return status code
print("executed method1")
elif method_request.name == "method2":
payload = {"result": True, "data": 1234} # set response payload
status = 200 # set return status code
print("executed method2")
else:
payload = {"result": False, "data": "unknown method"} # set response payload
status = 400 # set return status code
print("executed unknown method: " + method_request.name)
# Send the response
method_response = MethodResponse.create_from_method_request(method_request, status, payload)
await module_client.send_method_response(method_response)
print('Message sent!')
def stdin_listener():
while True:
try:
selection = input("Press Q to quit\n")
if selection == "Q" or selection == "q":
print("Quitting...")
break
except:
time.sleep(10)
# Schedule task for C2D Listener
listeners = asyncio.gather(input1_listener(module_client), twin_patch_listener(module_client), method_request_handler(module_client))
I have the following code. For a twitter account with more than 75k followers, I always get this error:
tweepy.error.TweepError: Failed to send request: ('Connection aborted.', ConnectionResetError(10054, 'An existing connection was forcibly closed by the remote host', None, 10054, None))
That's why I have added the except block in my code as listed in
Getting back to back error using "wait_on_rate_limit" parameter
For any account with more than 75k followers, I will get that error, it will meet the except clause, and then ideally it should 'continue' back into the loop and fetch the remaining followers. But it wont, at that point it just goes out of the loop and it stops.
I don't understand what am I missing?
My code:
for entity in entities:
try:
c = tweepy.Cursor(api.followers_ids, screen_name=entity,count=5000)
for pages in c.pages():
id.append(pages)
et.append(entity)
d = {'follower_id': id[i], 'name': et[i]}
df_temp=pd.DataFrame(d)
df = df.append(df_temp)
print('Loop NUmber:',i)
i=i+1
print(df.shape)
except tweepy.TweepError as e:
print(e.reason)
time.sleep(60 * backoff_counter)
backoff_counter += 1
continue
Please tell me what am I missing here?
This is not the answer but may help narrow down the problem.
Try this code. It stores the iterators in lists so they (hopefully) end. It also keeps count for the entities and pages.
i=0
id = []
et = []
backoff_counter = 1
entCtr = 0
while True:
try:
lstEntities = list(entities) # lock entity list
print('Entity Length', len(lstEntities))
for entity in lstEntities:
entCtr += 1
print('entCtr', entCtr)
c = tweepy.Cursor(api.followers_ids, screen_name=entity,count=5000)
lstPages = list(c.pages()) # freeze here ?
print('entCtr', entCtr, 'Page Length', len(lstPages))
i = 0
for pages in lstPages:
id.append(pages)
et.append(entity)
d = {'follower_id': id[i], 'name': et[i]}
df_temp=pd.DataFrame(d)
df = df.append(df_temp)
print('Page Loop Number:',i)
i=i+1
print(df.shape)
break
except tweepy.TweepError as e:
print(e.reason)
time.sleep(60 * backoff_counter)
backoff_counter += 1
continue
I am using Python 3 and the YouTube Data API V3 to fetch comments from a YouTube video. This particular video has around 280,000 comments. I am trying to write a while loop that will get as many comments as possible before hitting the quota limit and then breaking if the quota limit is reached.
My loop appears to be successfully calling next page tokens and appending the requested metadata to my list, but when the quota is reached, it doesn't end the loop, instead registering an HttpError, and not saving any of the correctly fetched comment data.
Here is my current code:
# Get resources:
def get(resource, **kwargs):
print(f'Getting {resource} with params {kwargs}')
kwargs['key'] = API_KEY
response = requests.get(url=f'{YOUTUBE_BASE_URL}/{resource}',
params=remove_empty_kwargs(kwargs))
print(f'Response: {response.status_code}')
return response.json()
# Getting ALL comments for a video:
def getComments(video_id):
comments = []
res = get('commentThreads', part='id,snippet,replies', maxResults=100, videoId=video_id)
try:
nextPageToken = res['nextPageToken']
except TypeError:
nextPageToken = None
while (nextPageToken):
try:
res = get('commentThreads', part='id,snippet,replies', maxResults=100, videoId=video_id)
for i in res['items']:
comments.append(i)
nextPageToken = res['nextPageToken']
except HttpError as error:
print('An error occurred: %s' % error)
break
return comments
test = 'video-id-here'
testComments = getComments(test)
So, what happens is this correctly seems to be looping through all the comments. But after some time, i.e., after it has looped through several hundred times, I get the following error:
Getting commentThreads with params {'part': 'id,snippet,replies', 'maxResults': 100, 'videoId': 'real video ID shows here'}
Response: 403
KeyError Traceback (most recent call last)
<ipython-input-39-6582a0d8f122> in <module>
----> 1 testComments = getComments(test)
<ipython-input-29-68952caa30dd> in getComments(video_id)
12 res = get('commentThreads', part='id,snippet,replies', maxResults=100, videoId=video_id)
13
---> 14 for i in res['items']:
15 comments.append(i)
16
KeyError: 'items'
So, first I get the expected 403 respsonse from the API after some time, which indicates reaching the quota limit. Then it throws the error for 'items', but the reason this error is thrown is because it didn't catch anymore comment threads, so there are no more 'items' to append.
My expected result is that the loop will just break when the quota limit is reached and save the comment data it managed to fetch before reaching the quota.
I think this is probably related to my 'try' and 'except' handling, but I can't seem to figure out.
Thanks!
Ultimately fixed it with this code:
def getComments(video_id):
comments = []
res = get('commentThreads', part='id,snippet,replies', maxResults=100, videoId=video_id)
try:
nextPageToken = res['nextPageToken']
except KeyError:
nextPageToken = None
except TypeError:
nextPageToken = None
while (nextPageToken):
try:
res = get('commentThreads', part='id,snippet,replies', maxResults=100, videoId=video_id)
for i in res['items']:
comments.append(i)
nextPageToken = res['nextPageToken']
except KeyError:
break
return comments
Proper exception handling for the KeyError was the ultimate solution, since my get() function returns a JSON object.
You are catching an HttpError but it never happens, because when your limit runs out the API just returns 403.
There is no HttpError to catch and so you try to read a value which isn't there and get a KeyError.
The most robust way is probably to check the status code.
res = get('commentThreads', part='id,snippet,replies', maxResults=100, videoId=video_id)
if res.status_code != 200:
break
for i in res['items']:
comments.append(i)
nextPageToken = res['nextPageToken']
The res.status_code is assuming you're using requests.
I'm considering a fan-out proxy in tornado to query multiple backend servers and the possible use-case of having it not wait for all responses before returning.
Is there a problem with the remaining futures if you use a WaitIterator but not continuing to wait after receiving a useful response?
Perhaps the results of the other futures will not be cleaned up? Perhaps callbacks could be added to any remaining futures to discard their results?
#!./venv/bin/python
from tornado import gen
from tornado import httpclient
from tornado import ioloop
from tornado import web
import json
class MainHandler(web.RequestHandler):
#gen.coroutine
def get(self):
r1 = httpclient.HTTPRequest(
url="http://apihost1.localdomain/api/object/thing",
connect_timeout=4.0,
request_timeout=4.0,
)
r2 = httpclient.HTTPRequest(
url="http://apihost2.localdomain/api/object/thing",
connect_timeout=4.0,
request_timeout=4.0,
)
http = httpclient.AsyncHTTPClient()
wait = gen.WaitIterator(
r1=http.fetch(r1),
r2=http.fetch(r2)
)
while not wait.done():
try:
reply = yield wait.next()
except Exception as e:
print("Error {} from {}".format(e, wait.current_future))
else:
print("Result {} received from {} at {}".format(
reply, wait.current_future,
wait.current_index))
if reply.code == 200:
result = json.loads(reply.body)
self.write(json.dumps(dict(result, backend=wait.current_index)))
return
def make_app():
return web.Application([
(r'/', MainHandler)
])
if __name__ == '__main__':
app = make_app()
app.listen(8888)
ioloop.IOLoop.current().start()
So I've checked through the source for WaitIterator.
It tracks the futures adding a callback, when fired the iterator queues the result or (if you've called next()) fulfils a future it's given to you.
As the future you wait on only gets created by calling .next(), it appears you can exit out of the while not wait.done() and not leave any futures without observers.
Reference counting ought to allow the WaitIterator instance to remain until after all the futures have fired their callbacks and then be reclaimed.
Update 2017/08/02
Having tested further with subclassing WaitIterator with extra logging, yes the iterator will be cleaned up when all the futures return, but if any of those futures return an exception it will be logged that this exception hasn't been observed.
ERROR:tornado.application:Future exception was never retrieved: HTTPError: HTTP 599: Timeout while connecting
In summary and answering my question: completing the WaitIterator isn't necessary from a clean-up point of view, but it is probably desirable to do so from a logging point of view.
If you wanted to be sure, passing the the wait iterator to a new future that will finish consuming it and adding an observer may suffice. For example
#gen.coroutine
def complete_wait_iterator(wait):
rounds = 0
while not wait.done():
rounds += 1
try:
reply = yield wait.next()
except Exception as e:
print("Not needed Error {} from {}".format(e, wait.current_future))
else:
print("Not needed result {} received from {} at {}".format(
reply, wait.current_future,
wait.current_index))
log.info('completer finished after {n} rounds'.format(n=rounds))
class MainHandler(web.RequestHandler):
#gen.coroutine
def get(self):
r1 = httpclient.HTTPRequest(
url="http://apihost1.localdomain/api/object/thing",
connect_timeout=4.0,
request_timeout=4.0,
)
r2 = httpclient.HTTPRequest(
url="http://apihost2.localdomain/api/object/thing",
connect_timeout=4.0,
request_timeout=4.0,
)
http = httpclient.AsyncHTTPClient()
wait = gen.WaitIterator(
r1=http.fetch(r1),
r2=http.fetch(r2)
)
while not wait.done():
try:
reply = yield wait.next()
except Exception as e:
print("Error {} from {}".format(e, wait.current_future))
else:
print("Result {} received from {} at {}".format(
reply, wait.current_future,
wait.current_index))
if reply.code == 200:
result = json.loads(reply.body)
self.write(json.dumps(dict(result, backend=wait.current_index)))
consumer = complete_wait_iterator(wait)
consumer.add_done_callback(lambda f: f.exception())
return