I created a zip file with main.py and entrypoint handler function for AWS Lambda using the runtime python 3.7.
The zip file was packaged inside a Amazon Linux image on EC2 using 3.7.3 python.
I had some errors running on the AWS lambda, so decided if there's a way to run the function locally.
My main.py is below:
import datetime
import logging
import os
import re
import subprocess
import boto3
import certbot.main
import raven
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def list_files(folder_path):
onlyfiles = [f for f in listdir(folder_path) if path.isfile(path.join(folder_path, f))]
logger.info('## path')
logger.info(onlyfiles)
def read_and_delete_file(path):
with open(path, 'r') as file:
contents = file.read()
os.remove(path)
return contents
def provision_cert(email, domains):
certbot.main.main([
'certonly', # Obtain a cert but don't install it
'-n', # Run in non-interactive mode
'--agree-tos', # Agree to the terms of service,
'--email', email, # Email
'--dns-route53', # Use dns challenge with route53
'-d', domains, # Domains to provision certs for
# Override directory paths so script doesn't have to be run as root
'--config-dir', '/tmp/config-dir/',
'--work-dir', '/tmp/work-dir/',
'--logs-dir', '/tmp/logs-dir/',
])
first_domain = domains.split(',')[0]
first_domain_cert_folder = re.sub('\*\.', '', first_domain)
path = '/tmp/config-dir/live/' + first_domain_cert_folder + '/'
logger.info('## path')
logger.info(path)
list_files(path)
return {
'certificate': read_and_delete_file(path + 'cert.pem'),
'private_key': read_and_delete_file(path + 'privkey.pem'),
'certificate_chain': read_and_delete_file(path + 'fullchain.pem')
}
def should_provision(domains):
existing_cert = find_existing_cert(domains)
if existing_cert:
now = datetime.datetime.now(datetime.timezone.utc)
not_after = existing_cert['Certificate']['NotAfter']
return (not_after - now).days <= 30
else:
return True
def find_existing_cert(domains):
domains = frozenset(domains.split(','))
client = boto3.client('acm')
paginator = client.get_paginator('list_certificates')
iterator = paginator.paginate(PaginationConfig={'MaxItems':1000})
for page in iterator:
for cert in page['CertificateSummaryList']:
cert = client.describe_certificate(CertificateArn=cert['CertificateArn'])
sans = frozenset(cert['Certificate']['SubjectAlternativeNames'])
if sans.issubset(domains):
return cert
return None
def notify_via_sns(topic_arn, domains, certificate):
process = subprocess.Popen(['openssl', 'x509', '-noout', '-text'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, encoding='utf8')
stdout, stderr = process.communicate(certificate)
client = boto3.client('sns')
client.publish(TopicArn=topic_arn,
Subject='Issued new LetsEncrypt certificate',
Message='Issued new certificates for domains: ' + domains + '\n\n' + stdout,
)
def upload_cert_to_acm(cert, domains):
existing_cert = find_existing_cert(domains)
certificate_arn = existing_cert['Certificate']['CertificateArn'] if existing_cert else None
client = boto3.client('acm')
acm_response = client.import_certificate(
CertificateArn=certificate_arn,
Certificate=cert['certificate'],
PrivateKey=cert['private_key'],
CertificateChain=cert['certificate_chain']
)
return None if certificate_arn else acm_response['CertificateArn']
def handler(event, context):
try:
domains = os.environ['LETSENCRYPT_DOMAINS']
if should_provision(domains):
cert = provision_cert(os.environ['LETSENCRYPT_EMAIL'], domains)
upload_cert_to_acm(cert, domains)
notify_via_sns(os.environ['NOTIFICATION_SNS_ARN'], domains, cert['certificate'])
except:
client = raven.Client(os.environ['SENTRY_DSN'], transport=raven.transport.http.HTTPTransport)
client.captureException()
raise
The zip file is about 20mb. I found resources on AWS about debugging locally, but frankly, I am a bit lost as to how to get started.
I am not very familiar with AWS and Lambda in general though I am fairly comfortable with Python.
I am on a macOS mojave and use visual studio code as my editor. I am okay to create virtualenv on my mac if that helps.
How do I debug my lambda on my local macbook pro?
As you've found, you can use AWS SAM (with Docker) to debug locally.
Below is a step-by-step guide on getting started:
Prerequisites
Install Docker: https://docs.docker.com/install/
Install AWS CLI: https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-install.html
Install AWS SAM CLI: https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/serverless-sam-cli-install.html
Install pipenv: https://pipenv.kennethreitz.org/en/latest/install/
Create example project
Useful in giving you an understanding of the structure and debugging locally - you can then substitute the example code for your own.
sam init --runtime python3.7
Add dependencies
pipenv shell
pipenv install package-names
Run and debug locally
pipenv lock -r > requirements.txt
sam build --manifest requirements.txt
sam local invoke HelloWorldFunction --event event.json
Deploy to AWS Lambda
Make a new bucket, if needed, in which to store the function code:
aws s3 mb s3://bucket-name
Create and run a .sh script:
#!/bin/bash
pipenv lock -r > requirements.txt && sam build --manifest requirements.txt
sam package \
--output-template-file packaged.yaml \
--s3-bucket bucket-name
sam deploy \
--template-file packaged.yaml \
--stack-name name-of-lambda-stack \
--capabilities CAPABILITY_IAM \
--region us-east-1
Replacing:
bucket-name with the name of the S3 bucket to store the function code
name-of-lambda-stack with the name of the AWS Lambda Stack to deploy to
us-east-1 with another region if desired
Your function is now deployed to AWS Lambda.
Related
I am trying to mock an AWS lambda function, below is my sample code
def get_lambda_resp(arn: str, input: str) -> str:
lambda_client = boto3.client("lambda")
response = lambda_client.invoke(
FunctionName=arn, LogType="None",
Payload=json.dumps({"param": input}).encode("utf-8")
)
output = json.loads(response["Payload"].read().decode("utf-8"))
return output["value"]
and below is my test case
import io
import zipfile
import boto3
from moto import mock_lambda
#mock_lambda
def test():
conn = boto3.client('lambda', 'us-east-1')
def get_test_zip_file():
pfunc = '''
import json
def lambda_handler(event, context):
resp = {"value":"input_str"}
return json.dumps(resp)
'''
zip_output = io.BytesIO()
zip_file = zipfile.ZipFile(zip_output, 'w', zipfile.ZIP_DEFLATED)
zip_file.writestr('lambda_function.py', pfunc)
zip_file.close()
zip_output.seek(0)
return zip_output.read()
conn.create_function(
FunctionName='lambda-function-name',
Runtime='python3.8',
Role='test-iam-role',
Handler='lambda_function.lambda_handler',
Code={
'ZipFile': get_test_zip_file(),
},
Description='test lambda function',
Timeout=3,
MemorySize=128,
Publish=True
)
resp = get_auth("arn", "input_str")
assert resp is not None
While running the test case, I am getting below error
E ModuleNotFoundError: No module named 'docker'
I already have my Docker running, What else should I do to run it?
That message refers to the pip-module called docker.
Assuming you use Moto >=2.x, make sure you install it correctly to get all required dependencies:
pip install moto[awslambda,s3,service1,etc]
Or if you use many services, install all dependencies without having to list all services:
pip install moto[all]
This will install all required Pip modules, including Docker.
Source: https://github.com/spulec/moto/issues/3722
I am running the following script inside AWS Lambda:
#!/usr/bin/python
from __future__ import print_function
import json
import os
import ansible.inventory
import ansible.playbook
import ansible.runner
import ansible.constants
from ansible import utils
from ansible import callbacks
print('Loading function')
def run_playbook(**kwargs):
stats = callbacks.AggregateStats()
playbook_cb = callbacks.PlaybookCallbacks(verbose=utils.VERBOSITY)
runner_cb = callbacks.PlaybookRunnerCallbacks(
stats, verbose=utils.VERBOSITY)
# use /tmp instead of $HOME
ansible.constants.DEFAULT_REMOTE_TMP = '/tmp/ansible'
out = ansible.playbook.PlayBook(
callbacks=playbook_cb,
runner_callbacks=runner_cb,
stats=stats,
**kwargs
).run()
return out
def lambda_handler(event, context):
return main()
def main():
out = run_playbook(
playbook='little.yml',
inventory=ansible.inventory.Inventory(['localhost'])
)
return(out)
if __name__ == '__main__':
main()
However, I get the following error: failed=True msg='boto required for this module'
However, according to this comment(https://github.com/ansible/ansible/issues/5734#issuecomment-33135727), it works.
But, I'm not understanding how do I mention that in my script? Or, can I have a separate hosts file, and include it in the script, like how I call my playbook?
If so, then how?
[EDIT - 1]
I have added the line inventory=ansible.inventory.Inventory('hosts')
with hosts file as:
[localhost]
127.0.0.1 ansible_python_interpreter=/usr/local/bin/python
But, I get this error: /bin/sh: /usr/local/bin/python: No such file or directory
So, where is python located inside AWS Lambda?
I installed boto just like I installed other packages in the Lambda's deployment package: pip install boto -t <folder-name>
The bash command which python will usually give the location of the Python binary. There's an example of how to call a bash script from AWS Lambda here.
Using the new environment variable support in AWS Lambda, I've added an env var via the webui for my function.
How do I access this from Python? I tried:
import os
MY_ENV_VAR = os.environ['MY_ENV_VAR']
but my function stopped working (if I hard code the relevant value for MY_ENV_VAR it works fine).
AWS Lambda environment variables can be defined using the AWS Console, CLI, or SDKs. This is how you would define an AWS Lambda that uses an LD_LIBRARY_PATH environment variable using AWS CLI:
aws lambda create-function \
--region us-east-1
--function-name myTestFunction
--zip-file fileb://path/package.zip
--role role-arn
--environment Variables={LD_LIBRARY_PATH=/usr/bin/test/lib64}
--handler index.handler
--runtime nodejs4.3
--profile default
Once created, environment variables can be read using the support your language provides for accessing the environment, e.g. using process.env for Node.js. When using Python, you would need to import the os library, like in the following example:
...
import os
...
print("environment variable: " + os.environ['variable'])
Resource Link:
AWS Lambda Now Supports Environment Variables
Assuming you have created the .env file along-side your settings module.
.
├── .env
└── settings.py
Add the following code to your settings.py
# settings.py
from os.path import join, dirname
from dotenv import load_dotenv
dotenv_path = join(dirname(__file__), '.env')
load_dotenv(dotenv_path)
Alternatively, you can use find_dotenv() method that will try to find a .env file by (a) guessing where to start using file or the working directory -- allowing this to work in non-file contexts such as IPython notebooks and the REPL, and then (b) walking up the directory tree looking for the specified file -- called .env by default.
from dotenv import load_dotenv, find_dotenv
load_dotenv(find_dotenv())
Now, you can access the variables either from system environment variable or loaded from .env file.
Resource Link:
https://github.com/theskumar/python-dotenv
gepoggio answered in this post: https://github.com/serverless/serverless/issues/577#issuecomment-192781002
A workaround is to use python-dotenv:
https://github.com/theskumar/python-dotenv
import os
import dotenv
dotenv.load_dotenv(os.path.join(here, "../.env"))
dotenv.load_dotenv(os.path.join(here, "../../.env"))
It tries to load it twice because when ran locally it's in
project/.env and when running un Lambda the .env is located in
project/component/.env
Both
import os
os.getenv('MY_ENV_VAR')
And
os.environ['MY_ENV_VAR']
are feasible solutions, just make sure in the lambda GUI that the ENV variables are actually there.
I used this code; it includes both cases, setting the variable from the handler and setting it from outside the handler.
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Trying new lambda stuff"""
import os
import configparser
class BqEnv(object):
"""Env and self variables settings"""
def __init__(self, service_account, configfile=None):
config = self.parseconfig(configfile)
self.env = config
self.service_account = service_account
#staticmethod
def parseconfig(configfile):
"""Connection and conf parser"""
config = configparser.ConfigParser()
config.read(configfile)
env = config.get('BigQuery', 'env')
return env
def variable_tests(self):
"""Trying conf as a lambda variable"""
my_env_var = os.environ['MY_ENV_VAR']
print my_env_var
print self.env
return True
def lambda_handler(event, context):
"""Trying env variables."""
print event
configfile = os.environ['CONFIG_FILE']
print configfile
print type(str(configfile))
bqm = BqEnv('some-json.json', configfile)
bqm.variable_tests()
return True
I tried this with a demo config file that has this:
[BigQuery]
env = prod
And the setting on lambda was the following:
Hope this can help!
os.environ["variable_name"]
In the configuration section of AWS lambda, make sure you declare the variable with the same name that you're trying to access here. For this example, it should be variable_name
I often find myself recreating file structures for Flask apps so I have decided to make a script to do all that for me. I would like the script to create all the folders I need as well as the files with some basic boilerplate, which it does, that part is working fine. However I would also like to create a virtual environment and install Flask to that environment. That is where I am encountering the problem. The script runs but it installs Flask to my system installation of Python.
I followed the advice in this question here but it's not working. I am running Ubuntu 12.04.4 LTS via crouton on a Chromebook.
#!/usr/bin/python
from os import mkdir, chdir, getcwd, system
import sys
APP_NAME = sys.argv[1]
ROOT = getcwd()
PROJECT_ROOT = ROOT + '/' + APP_NAME
# dictionary represents folder structure. Key is the folder name and the value is it's contents
folders = {APP_NAME : {'app' : {'static': {'css' : '', 'img' : '', 'js' : ''}, 'templates' : ''} } }
def create_folders(dic):
for key in dic:
if isinstance(dic[key], dict):
mkdir(key)
prev = getcwd() + '/' + key
chdir(prev)
create_folders(dic[key])
else:
mkdir(key)
create_folders(folders)
chdir(PROJECT_ROOT)
open('config.py', 'a').close()
with open('run.py', 'a') as run:
run.write("""stuff""")
with open('app/__init__.py', 'a') as init:
init.write("""stuff""")
with open('app/views.py', 'a') as views:
views.write("""stuff""")
open('app/models.py', 'a').close()
open('app/forms.py', 'a').close()
with open('app/templates/layout.html', 'a') as layout:
layout.write("""stuff""")
system('chmod a+x run.py')
system('virtualenv venv')
system('. venv/bin/activate;sudo pip install flask') # this does not seem to be working the way I am expecting it to
I suppose your calls are not within the same console session and therefore the console environment is not as expected. I suggest to concatenate the related commands in one system call using subprocess.Popen like this (including suggestions by limasxgoesto0):
subprocess.Popen('virtualenv venv;source venv/bin/activate;pip install flask')
You should probably be using subprocess; os.system is deprecated.
Original Question
I've got some python scripts which have been using Amazon S3 to upload screenshots taken following Selenium tests within the script.
Now we're moving from S3 to use GitHub so I've found GitPython but can't see how you use it to actually commit to the local repo and push to the server.
My script builds a directory structure similar to \images\228M\View_Use_Case\1.png in the workspace and when uploading to S3 it was a simple process;
for root, dirs, files in os.walk(imagesPath):
for name in files:
filename = os.path.join(root, name)
k = bucket.new_key('{0}/{1}/{2}'.format(revisionNumber, images_process, name)) # returns a new key object
k.set_contents_from_filename(filename, policy='public-read') # opens local file buffers to key on S3
k.set_metadata('Content-Type', 'image/png')
Is there something similar for this or is there something as simple as a bash type git add images command in GitPython that I've completely missed?
Updated with Fabric
So I've installed Fabric on kracekumar's recommendation but I can't find docs on how to define the (GitHub) hosts.
My script is pretty simple to just try and get the upload to work;
from __future__ import with_statement
from fabric.api import *
from fabric.contrib.console import confirm
import os
def git_server():
env.hosts = ['github.com']
env.user = 'git'
env.passowrd = 'password'
def test():
process = 'View Employee'
os.chdir('\Work\BPTRTI\main\employer_toolkit')
with cd('\Work\BPTRTI\main\employer_toolkit'):
result = local('ant viewEmployee_git')
if result.failed and not confirm("Tests failed. Continue anyway?"):
abort("Aborting at user request.")
def deploy():
process = "View Employee"
os.chdir('\Documents and Settings\markw\GitTest')
with cd('\Documents and Settings\markw\GitTest'):
local('git add images')
local('git commit -m "Latest Selenium screenshots for %s"' % (process))
local('git push -u origin master')
def viewEmployee():
#test()
deploy()
It Works \o/ Hurrah.
You should look into Fabric. http://docs.fabfile.org/en/1.4.1/index.html. Automated server deployment tool. I have been using this quite some time, it works pretty fine.
Here is my one of the application which uses it, https://github.com/kracekumar/sachintweets/blob/master/fabfile.py
It looks like you can do this:
index = repo.index
index.add(['images'])
new_commit = index.commit("my commit message")
and then, assuming you have origin as the default remote:
origin = repo.remotes.origin
origin.push()