I am trying to mock an AWS lambda function, below is my sample code
def get_lambda_resp(arn: str, input: str) -> str:
lambda_client = boto3.client("lambda")
response = lambda_client.invoke(
FunctionName=arn, LogType="None",
Payload=json.dumps({"param": input}).encode("utf-8")
)
output = json.loads(response["Payload"].read().decode("utf-8"))
return output["value"]
and below is my test case
import io
import zipfile
import boto3
from moto import mock_lambda
#mock_lambda
def test():
conn = boto3.client('lambda', 'us-east-1')
def get_test_zip_file():
pfunc = '''
import json
def lambda_handler(event, context):
resp = {"value":"input_str"}
return json.dumps(resp)
'''
zip_output = io.BytesIO()
zip_file = zipfile.ZipFile(zip_output, 'w', zipfile.ZIP_DEFLATED)
zip_file.writestr('lambda_function.py', pfunc)
zip_file.close()
zip_output.seek(0)
return zip_output.read()
conn.create_function(
FunctionName='lambda-function-name',
Runtime='python3.8',
Role='test-iam-role',
Handler='lambda_function.lambda_handler',
Code={
'ZipFile': get_test_zip_file(),
},
Description='test lambda function',
Timeout=3,
MemorySize=128,
Publish=True
)
resp = get_auth("arn", "input_str")
assert resp is not None
While running the test case, I am getting below error
E ModuleNotFoundError: No module named 'docker'
I already have my Docker running, What else should I do to run it?
That message refers to the pip-module called docker.
Assuming you use Moto >=2.x, make sure you install it correctly to get all required dependencies:
pip install moto[awslambda,s3,service1,etc]
Or if you use many services, install all dependencies without having to list all services:
pip install moto[all]
This will install all required Pip modules, including Docker.
Source: https://github.com/spulec/moto/issues/3722
Related
I am using pytest to test out an AWS Lambda function I am writing. The function has some initialization code outside of any function. It looks similar to this:
catalog_service/lambda_v1.py
import boto3
import botocore
import json
import os
from aws_lambda_powertools import Logger
from elastic_enterprise_search import AppSearch
LOGGER = Logger()
ENV = os.environ['ENVIRONMENT']
ssm = boto3.client('ssm')
key_response = ssm.get_parameter(
Name=key,
WithDecryption=True
)
APP_SEARCH = AppSearch(
os.environ['APP_SEARCH_API_HOST'],
bearer_auth=key_response['Parameter']['Value'],
)
# Additional functions
Whenever I run my test, it always fails with a KeyError saying that ENVIRONMENT is not in os.environ which makes sense since I believe it's importing the Lambda script before running any of the other mocking code. Whenever I try to mock it with monkeypatch it seems like I am not doing it in the right spot. My test looks like this:
import pytest
import os
from . import fixtures
from catalog_service import lambda_v1 as v1
#pytest.fixture
def mock_set_environment(monkeypatch):
monkeypatch.setenv('ENVIRONMENT', 'qa')
class TestLambdaV1:
#pytest.mark.parametrize('data, expected', [
(
{'a': 1},
{
'statusCode': '200',
'headers': {
'content-type': 'application/json'
},
'body': '{"a": 1}'
},
),
])
def test_format_good_response(self, mock_set_environment, data, expected):
assert os.getenv('ENVIRONMENT') == 'qa'
result = v1.format_good_response(data)
assert result == expected
I think it would mock the environment variable if it were being used within the function but it is used during the import. How do I mock the os.environ variable as well as the AppSearch and boto3 packages before they're imported / used?
I am using two actions of IBM cloud function - write1 and write2 (both using PYTHON).
I created a sequence that should pass value from write1 to write2.
I wrote a PYTHON code in write1 action but is throws some JSON error.
Write 1 Python File:*
import os
import sys
import json
import requests
import ibm_boto3
from ibm_botocore.client import Config, ClientError
cos = ibm_boto3.resource("s3",
ibm_api_key_id='my-api-key',
ibm_service_instance_id='my-instance-id',
config=Config(signature_version="oauth"),
endpoint_url='https://s3.eu-gb.cloud-object-storage.appdomain.cloud'
)
def get_item(bucket_name, item_name):
a={"Retrieving item from bucket":bucket_name , "key": item_name}
print(json.dumps(a))
try:
file = cos.Object(bucket_name, item_name).get()
return file["Body"].read()
except ClientError as be:
w={"CLIENT ERROR":be}
print(json.dumps(w))
except Exception as e:
y={"Unable to retrieve file contents":e}
print(json.dumps(y))
def test():
x = get_item('cloud-college-bucket0','abc.txt')
print(x.decode('utf-8'))
if x is not None:
string_in_uppercase = x.upper();
n={"String in Uppercase =":string_in_uppercase.decode('utf-8')}
b=json.dumps(n)
print(b)
def main(dict):
return test()
if __name__ == '__main__':
main()
Error it throws:
Results:
{
"error": "**The action did not produce a valid JSON response**: null\n"
}
Logs:
[
"2019-10-08T13:01:56.339677Z stderr: /usr/local/lib/python3.7/site-packages/ibm_botocore/vendored/requests/api.py:67: DeprecationWarning: You are using the post() function from 'ibm_botocore.vendored.requests'. This is not a public API in ibm_botocore and will be removed in the future. Additionally, this version of requests is out of date. We recommend you install the requests package, 'import requests' directly, and use the requests.post() function instead.",
"2019-10-08T13:01:56.339748Z stderr: DeprecationWarning",
"2019-10-08T13:01:56.339755Z stderr: /usr/local/lib/python3.7/site-packages/ibm_botocore/vendored/requests/models.py:169: DeprecationWarning:
Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated, and in 3.8 it will stop working",
"2019-10-08T13:01:56.339759Z stderr: if isinstance(hook, collections.Callable):",
"2019-10-08T13:01:56.339678Z stdout: {\"Retrieving item from bucket\": \"cloud-college-bucket0\", \"key\": \"abc.txt\"}",
"2019-10-08T13:01:56.339772Z stdout: hello friends",
"2019-10-08T13:01:56.339776Z stdout: {\"String in Uppercase =\": \"HELLO FRIENDS\"}",
"2019-10-08T13:01:56.340Z stderr: The action did not initialize or run as expected. Log data might be missing."
]
It says import request which I did but still the problem persists.
I also say use request.post function but how and where to use is what I am unable to understand. And how to solve this JSON issue?
And the desired output is shown in the logs.
From the first outset, I could see that the test function only prints the JSON but never returns it. But you if you see the sample Python action, it always should return a JSON
import sys
def main(dict):
return { 'message': 'Hello world' }
Also, you can check the supported packages list with Python runtime here before using them in your action.
If you have a package that is not in the list, you can always package Python code with a virtual environment in .zip files or Packaging code in Docker images
I created a zip file with main.py and entrypoint handler function for AWS Lambda using the runtime python 3.7.
The zip file was packaged inside a Amazon Linux image on EC2 using 3.7.3 python.
I had some errors running on the AWS lambda, so decided if there's a way to run the function locally.
My main.py is below:
import datetime
import logging
import os
import re
import subprocess
import boto3
import certbot.main
import raven
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def list_files(folder_path):
onlyfiles = [f for f in listdir(folder_path) if path.isfile(path.join(folder_path, f))]
logger.info('## path')
logger.info(onlyfiles)
def read_and_delete_file(path):
with open(path, 'r') as file:
contents = file.read()
os.remove(path)
return contents
def provision_cert(email, domains):
certbot.main.main([
'certonly', # Obtain a cert but don't install it
'-n', # Run in non-interactive mode
'--agree-tos', # Agree to the terms of service,
'--email', email, # Email
'--dns-route53', # Use dns challenge with route53
'-d', domains, # Domains to provision certs for
# Override directory paths so script doesn't have to be run as root
'--config-dir', '/tmp/config-dir/',
'--work-dir', '/tmp/work-dir/',
'--logs-dir', '/tmp/logs-dir/',
])
first_domain = domains.split(',')[0]
first_domain_cert_folder = re.sub('\*\.', '', first_domain)
path = '/tmp/config-dir/live/' + first_domain_cert_folder + '/'
logger.info('## path')
logger.info(path)
list_files(path)
return {
'certificate': read_and_delete_file(path + 'cert.pem'),
'private_key': read_and_delete_file(path + 'privkey.pem'),
'certificate_chain': read_and_delete_file(path + 'fullchain.pem')
}
def should_provision(domains):
existing_cert = find_existing_cert(domains)
if existing_cert:
now = datetime.datetime.now(datetime.timezone.utc)
not_after = existing_cert['Certificate']['NotAfter']
return (not_after - now).days <= 30
else:
return True
def find_existing_cert(domains):
domains = frozenset(domains.split(','))
client = boto3.client('acm')
paginator = client.get_paginator('list_certificates')
iterator = paginator.paginate(PaginationConfig={'MaxItems':1000})
for page in iterator:
for cert in page['CertificateSummaryList']:
cert = client.describe_certificate(CertificateArn=cert['CertificateArn'])
sans = frozenset(cert['Certificate']['SubjectAlternativeNames'])
if sans.issubset(domains):
return cert
return None
def notify_via_sns(topic_arn, domains, certificate):
process = subprocess.Popen(['openssl', 'x509', '-noout', '-text'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, encoding='utf8')
stdout, stderr = process.communicate(certificate)
client = boto3.client('sns')
client.publish(TopicArn=topic_arn,
Subject='Issued new LetsEncrypt certificate',
Message='Issued new certificates for domains: ' + domains + '\n\n' + stdout,
)
def upload_cert_to_acm(cert, domains):
existing_cert = find_existing_cert(domains)
certificate_arn = existing_cert['Certificate']['CertificateArn'] if existing_cert else None
client = boto3.client('acm')
acm_response = client.import_certificate(
CertificateArn=certificate_arn,
Certificate=cert['certificate'],
PrivateKey=cert['private_key'],
CertificateChain=cert['certificate_chain']
)
return None if certificate_arn else acm_response['CertificateArn']
def handler(event, context):
try:
domains = os.environ['LETSENCRYPT_DOMAINS']
if should_provision(domains):
cert = provision_cert(os.environ['LETSENCRYPT_EMAIL'], domains)
upload_cert_to_acm(cert, domains)
notify_via_sns(os.environ['NOTIFICATION_SNS_ARN'], domains, cert['certificate'])
except:
client = raven.Client(os.environ['SENTRY_DSN'], transport=raven.transport.http.HTTPTransport)
client.captureException()
raise
The zip file is about 20mb. I found resources on AWS about debugging locally, but frankly, I am a bit lost as to how to get started.
I am not very familiar with AWS and Lambda in general though I am fairly comfortable with Python.
I am on a macOS mojave and use visual studio code as my editor. I am okay to create virtualenv on my mac if that helps.
How do I debug my lambda on my local macbook pro?
As you've found, you can use AWS SAM (with Docker) to debug locally.
Below is a step-by-step guide on getting started:
Prerequisites
Install Docker: https://docs.docker.com/install/
Install AWS CLI: https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-install.html
Install AWS SAM CLI: https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/serverless-sam-cli-install.html
Install pipenv: https://pipenv.kennethreitz.org/en/latest/install/
Create example project
Useful in giving you an understanding of the structure and debugging locally - you can then substitute the example code for your own.
sam init --runtime python3.7
Add dependencies
pipenv shell
pipenv install package-names
Run and debug locally
pipenv lock -r > requirements.txt
sam build --manifest requirements.txt
sam local invoke HelloWorldFunction --event event.json
Deploy to AWS Lambda
Make a new bucket, if needed, in which to store the function code:
aws s3 mb s3://bucket-name
Create and run a .sh script:
#!/bin/bash
pipenv lock -r > requirements.txt && sam build --manifest requirements.txt
sam package \
--output-template-file packaged.yaml \
--s3-bucket bucket-name
sam deploy \
--template-file packaged.yaml \
--stack-name name-of-lambda-stack \
--capabilities CAPABILITY_IAM \
--region us-east-1
Replacing:
bucket-name with the name of the S3 bucket to store the function code
name-of-lambda-stack with the name of the AWS Lambda Stack to deploy to
us-east-1 with another region if desired
Your function is now deployed to AWS Lambda.
project interpreter and local env IMAGEI'm having a real problem with using the module flask , I've tried a lot of the solutions here on the forum but none has worked.
I can see flask is installed
pip list - showing flask
in the setting the module flask is installed in project interpreter
when I type the code I can see the module comes up
However when I launch the code I get an error
No module named 'flask'
I've tried to re-install pycharm
I've tried to uninstall and install flask again
still the same problem. Any advice ?
The file name vsearch.py
Here is the code:
from flask import Flask, render_template, request, escape
app = Flask(__name__)
def search4words(phrase: str, letters: str) -> set:
return set(letters).intersection(set(phrase))
def log_request(req: 'flask_request', res: str) -> None:
with open('vsearch.log', 'a') as log:
print(req.form, req.remote_addr, req.user_agent, res, file=log,
sep='|')
#app.route('/search4', methods=['POST'])
def do_search() -> 'html':
phrase = request.form['phrase']
letters = request.form['letters']
title = 'Here are your results:'
results = str(search4words(phrase, letters))
log_request(request, results)
return render_template('results.html', the_phrase=phrase,
the_letters=letters, the_title=title,
the_results=results,)
#app.route('/')
#app.route('/entry')
def entry_page() -> 'html':
return render_template('entry.html', the_title='Welcome back
AGAIN!!!!!')
#app.route('/viewlog')
def view_the_log() -> 'html':
contents = []
with open('vsearch.log') as log:
for line in log:
contents.append([])
for item in line.split('|'):
contents[-1].append(escape(item))
titles = ('Form Data', 'Remote_addr', 'User_agent', 'Results')
return render_template('viewlog.html',
the_title = 'View log',
the_row_titles = titles,
the_data = contents,)
if __name__ == '__main__' :
app.run(debug=True)
Your issue was attempting to run vsearch.py through terminal, rather than through PyCharm's interpreter (which was correctly installed). In order to utilize the virtual environment, you should configure it to be used correctly when running your code.
There are multiple ways of activating your virtual environment, so please find that which is applicable to your project. A good source for this would be https://uoa-eresearch.github.io/eresearch-cookbook/recipe/2014/11/26/python-virtual-env/.
I am running the following script inside AWS Lambda:
#!/usr/bin/python
from __future__ import print_function
import json
import os
import ansible.inventory
import ansible.playbook
import ansible.runner
import ansible.constants
from ansible import utils
from ansible import callbacks
print('Loading function')
def run_playbook(**kwargs):
stats = callbacks.AggregateStats()
playbook_cb = callbacks.PlaybookCallbacks(verbose=utils.VERBOSITY)
runner_cb = callbacks.PlaybookRunnerCallbacks(
stats, verbose=utils.VERBOSITY)
# use /tmp instead of $HOME
ansible.constants.DEFAULT_REMOTE_TMP = '/tmp/ansible'
out = ansible.playbook.PlayBook(
callbacks=playbook_cb,
runner_callbacks=runner_cb,
stats=stats,
**kwargs
).run()
return out
def lambda_handler(event, context):
return main()
def main():
out = run_playbook(
playbook='little.yml',
inventory=ansible.inventory.Inventory(['localhost'])
)
return(out)
if __name__ == '__main__':
main()
However, I get the following error: failed=True msg='boto required for this module'
However, according to this comment(https://github.com/ansible/ansible/issues/5734#issuecomment-33135727), it works.
But, I'm not understanding how do I mention that in my script? Or, can I have a separate hosts file, and include it in the script, like how I call my playbook?
If so, then how?
[EDIT - 1]
I have added the line inventory=ansible.inventory.Inventory('hosts')
with hosts file as:
[localhost]
127.0.0.1 ansible_python_interpreter=/usr/local/bin/python
But, I get this error: /bin/sh: /usr/local/bin/python: No such file or directory
So, where is python located inside AWS Lambda?
I installed boto just like I installed other packages in the Lambda's deployment package: pip install boto -t <folder-name>
The bash command which python will usually give the location of the Python binary. There's an example of how to call a bash script from AWS Lambda here.