I have tried to use lambda function to write a file to S3, then test shows "succeeded" ,but nothing appeared in my S3 bucket. What happened? Does anyone can give me some advice or solutions? Thanks a lot. Here's my code.
import json
import boto3
def lambda_handler(event, context):
string = "dfghj"
file_name = "hello.txt"
lambda_path = "/tmp/" + file_name
s3_path = "/100001/20180223/" + file_name
with open(lambda_path, 'w+') as file:
file.write(string)
file.close()
s3 = boto3.resource('s3')
s3.meta.client.upload_file(lambda_path, 's3bucket', s3_path)
I've had success streaming data to S3, it has to be encoded to do this:
import boto3
def lambda_handler(event, context):
string = "dfghj"
encoded_string = string.encode("utf-8")
bucket_name = "s3bucket"
file_name = "hello.txt"
s3_path = "100001/20180223/" + file_name
s3 = boto3.resource("s3")
s3.Bucket(bucket_name).put_object(Key=s3_path, Body=encoded_string)
If the data is in a file, you can read this file and send it up:
with open(filename) as f:
string = f.read()
encoded_string = string.encode("utf-8")
My response is very similar to Tim B but the most import part is
1.Go to S3 bucket and create a bucket you want to write to
2.Follow the below steps otherwise you lambda will fail due to permission/access. I've copied and pasted it the link content here for you too just in case if they change the url /move it to some other page.
a. Open the roles page in the IAM console.
b. Choose Create role.
c. Create a role with the following properties.
-Trusted entity – AWS Lambda.
-Permissions – AWSLambdaExecute.
-Role name – lambda-s3-role.
The AWSLambdaExecute policy has the permissions that the function needs to manage objects in Amazon S3 and write logs to CloudWatch Logs.
Copy and past this into your Lambda python function
import json, boto3,os, sys, uuid
from urllib.parse import unquote_plus
s3_client = boto3.client('s3')
def lambda_handler(event, context):
some_text = "test"
#put the bucket name you create in step 1
bucket_name = "my_buck_name"
file_name = "my_test_file.csv"
lambda_path = "/tmp/" + file_name
s3_path = "output/" + file_name
os.system('echo testing... >'+lambda_path)
s3 = boto3.resource("s3")
s3.meta.client.upload_file(lambda_path, bucket_name, file_name)
return {
'statusCode': 200,
'body': json.dumps('file is created in:'+s3_path)
}
from os import path
import json, boto3, sys, uuid
import requests
s3_client = boto3.client('s3')
def lambda_handler(event, context):
bucket_name = "mybucket"
url = "https://i.imgur.com/ExdKOOz.png"
reqponse = requests.get(url)
filenname = get_filename(url)
img = reqponse.content
s3 = boto3.resource("s3")
s3.Bucket(bucket_name).put_object(Key=filenname, Body=img)
return {'statusCode': 200,'body': json.dumps('file is created in:')}
def get_filename(url):
fragment_removed = url.split("#")[0]
query_string_removed = fragment_removed.split("?")[0]
scheme_removed = query_string_removed.split("://")[-1].split(":")[-1]
if scheme_removed.find("/") == -1:
return ""
return path.basename(scheme_removed)
Related
s3=boto3.resource('s3')
bucket=s3.Bucket('***')
prefix_objs=bucket.objects.filter(Prefix='****')
body=[]
for obj in prefix_objs:
print(obj.key())
This chunk of code isn't returning any output. Ideally I would want to read in the multiple files into different dataframes.
The prefix_objs variable is returning the following:
s3.Bucket.objectsCollection(s3.Bucket(name='****'), s3.ObjectSummary)
As I understood you want to print out objects that start with specific prefix (like: logbucket-asdf, logbucket-qwerty and etc.). For such a case you can use this code:
bucket_name = 'paste your bucket name'
prefix = 'paste your prefix'
import boto3
s3 = boto3.resource('s3')
my_bucket = s3.Bucket(bucket_name)
for my_bucket_object in my_bucket.objects.all():
if my_bucket_object.key.startswithprefix):
print(my_bucket_object.key)
Or if you want to print out objects that contain specific element(like: myapp-logbucket, ic-data-logbucket-asdf, logbucket-erere and etc.) you can use the following example:
bucket_name = 'paste your bucket name'
prefix = 'paste your prefix'
import boto3
s3 = boto3.resource('s3')
my_bucket = s3.Bucket(bucket_name)
for my_bucket_object in my_bucket.objects.all():
if str(my_bucket_object.key).find(prefix) > -1:
print(my_bucket_object.key)
I am in the process of automating an AWS Textract flow where files gets uploaded to S3 using an app (that I have already done), a lambda function gets triggered, extracts the forms as a CSV, and saves it in the same bucket.
I have started this with just a Textract formula for all the text in the image, with the result being a .txt file. Below is my code:
def InvokeTextract(bucketName, documentKey):
print('Loading InvokeTextract')
# Call Amazon Textract
response = textract.detect_document_text(
Document={
'S3Object': {
'Bucket': bucketName,
'Name': documentKey
}
})
Textractoutput = ''
# Print detected text
for item in response['Blocks']:
if item['BlockType'] == 'LINE':
Textractoutput += item['Text'] + '\n'
return Textractoutput
def writeOutputToS3Bucket(textractData, bucketName, createdS3Document):
print('Loading writeOutputToS3Bucket')
generateFilePath = os.path.splitext(createdS3Document)[0] + '.txt'
s3.put_object(Body=textractData, Bucket=bucketName, Key=generateFilePath)
print('Generated ' + generateFilePath)
def lambda_handler(event, context):
# Get the object from the event and show its content type
bucket = event['Records'][0]['s3']['bucket']['name']
key = urllib.parse.unquote_plus(event['Records'][0]['s3']['object']['key'], encoding='utf-8')
try:
Textractoutput = InvokeTextract(bucket, key)
writeOutputToS3Bucket(Textractoutput, bucket, key)
return 'Processed'
And this work just fine, but if I want to get key-value pairs, this isn't helpful. So, I tried to use another code for CSV. From my local drive, I was able to do that. Below is part of my code:
import trp #Local Module
import csv
doc = Document(response) #from TRP
with open('aws_doc.csv', mode='w') as aws_field_file:
field_write = csv.writer(aws_field_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
field_write.writerow(["Key", "Value"])
for page in doc.pages:
for field in page.form.fields:
# This will write it as your <key>, <value>
field_write.writerow([field.key, field.value])
But when I am trying to code this using Lambda, I am not getting the results (i.e. a CSV file in my bucket). I read about it and I found I needed to create a tmp file, but that was a bit unclear. I went with this code below:
def lambda_handler(event, context):
# Get the object from the event and show its content type
bucketName = event['Records'][0]['s3']['bucket']['name']
documentKey = urllib.parse.unquote_plus(event['Records'][0]['s3']['object']['key'], encoding='utf-8')
#S3 client
s3 = boto3.resource('s3')
# Amazon Textract client
textract = boto3.client('textract')
# Get AWS Textract Response for Forms
response = textract.analyze_document(
Document={
'S3Object': {
'Bucket': bucketName,
'Name': documentKey
}
},
FeatureTypes = ["FORMS"])
# Using custom trp module
doc = Document(response)
import csv
temp_csv_file = csv.writer(open("/tmp/csv_file.csv", "w+"))
temp_csv_file.writerow(["Key", "Value"])
for page in doc.pages:
for field in page.form.fields:
# This will write it as your <key>, <value>
temp_csv_file.writerow([field.key, field.value])
bucketName.upload_file('/tmp/csv_file.csv', 'textractData.csv')
Is my code correct? Am I missing a step in there?
Instead of
bucketName.upload_file('/tmp/csv_file.csv', 'textractData.csv')
Try
s3.upload_file('/tmp/csv_file.csv', bucketName, 'textractData.csv')
Try this unless you need to create a temp file.
s3.put_object(Body='contents', Bucket='bucket-name', Key='outputTextFileName')
get this to work by implementing as below:
def writeCSV(csvData):
body = StringIO() #because s3 require bytes or file like obj
writer = csv.writer(body)
for item in csvData:
writer.writerow(item)
csvS3 = body.getvalue()
return csvS3
contents = writeCSV('provide csv data')
s3.put_object(Body=contents, Bucket='bucket-name', Key='outputTextFileName')
S3 has to be defined previously using s3 = boto3.client('s3')
Bucket must be existing in the same
region as to that of lambda function
I am trying to download a file from Amazon S3 to a predefined folder in the local machine. This is the code and it works fine. But when the file is saved, it saves with lastname of the path. How should I correct this?
import boto3
import os
S3_Object = boto3.client('s3', aws_access_key_id='##', aws_secret_access_key='##')
BUCKET_NAME = '##'
filename2 = []
Key2 = []
bucket = S3_Object.list_objects(Bucket=BUCKET_NAME)['Contents']
download_path = target_file_path = os.path.join('..', 'data', 'lz', 'test_sample', 'sample_file' )
for key in bucket:
path, filename = os.path.split(key['Key'])
filename2.append(filename)
Key2.append(key['Key'])
for f in Key2:
if f.endswith('.csv'):
#if f.endswith('.csv'):
print(f)
file_name = str(f.rsplit('/', 1)[-1])
print(file_name)
if not os.path.exists(download_path):
os.makedirs(download_path)
else:
S3_Object.download_file(BUCKET_NAME, f, download_path + file_name)
print("success")
Here is my test code.
import boto3
import os
s3 = boto3.resource('s3')
bucket = 'your bucket'
response = s3.Bucket(bucket).objects.all()
# If you want to search only specific path of bucket,
#response = s3.Bucket(bucket).objects.filter(Prefix='path')
path = 'your path'
if not os.path.exists(path):
os.makedirs(path)
for item in response:
filename = item.key.rsplit('/', 1)[-1]
if filename.endswith('.csv'):
s3.Object(bucket, item.key).download_file(path + filename)
print("success")
I have tested the code and it gives a correct name.
What is wrong?
I think, there is a missing / in your code for the path.
print(os.path.join('..', 'data', 'lz', 'test_sample', 'sample_file'))
The code gives the result:
../data/lz/test_sample/sample_file
So, in the below step,
S3_Object.download_file(BUCKET_NAME, f, download_path + file_name)
the download_path + file_name will be wrong and it should be:
S3_Object.download_file(BUCKET_NAME, f, download_path + '/' + file_name)
the following function downloadS recursively the files.
The directories are created locally only if they contain files.
import boto3
import os
def download_dir(client, resource, dist, local='/tmp', bucket='your_bucket'):
paginator = client.get_paginator('list_objects')
for result in paginator.paginate(Bucket=bucket, Delimiter='/', Prefix=dist):
if result.get('CommonPrefixes') is not None:
for subdir in result.get('CommonPrefixes'):
download_dir(client, resource, subdir.get('Prefix'), local, bucket)
for file in result.get('Contents', []):
dest_pathname = os.path.join(local, file.get('Key'))
if not os.path.exists(os.path.dirname(dest_pathname)):
os.makedirs(os.path.dirname(dest_pathname))
resource.meta.client.download_file(bucket, file.get('Key'), dest_pathname)
The function is called that way:
def _start():
client = boto3.client('s3')
resource = boto3.resource('s3')
download_dir(client, resource, 'clientconf/', '/tmp', bucket='my-bucket')
This question has been asked earlier in the following link:
How to write dynamodb scan data's in CSV and upload to s3 bucket using python?
I have amended the code as advised in the comments. The code looks like as follows:
import csv
import boto3
import json
dynamodb = boto3.resource('dynamodb')
db = dynamodb.Table('employee_details')
def lambda_handler(event, context):
AWS_BUCKET_NAME = 'session5cloudfront'
s3 = boto3.resource('s3')
bucket = s3.Bucket(AWS_BUCKET_NAME)
path = '/tmp/' + 'employees.csv'
try:
response = db.scan()
myFile = open(path, 'w')
for i in response['Items']:
csv.register_dialect('myDialect', delimiter=' ', quoting=csv.QUOTE_NONE)
with myFile:
writer = csv.writer(myFile, dialect='myDialect')
writer.writerows(i)
print(i)
except :
print("error")
bucket.put_object(
ACL='public-read',
ContentType='application/csv',
Key=path,
# Body=json.dumps(i),
)
# print("here")
body = {
"uploaded": "true",
"bucket": AWS_BUCKET_NAME,
"path": path,
}
# print("then here")
return {
"statusCode": 200,
"body": json.dumps(body)
}
I am a novice, please help me in fixing this code as it is having problem in inserting data in file created in S3 Bucket.
Thanks
I have revised the code to be simpler and to also handle paginated responses for tables with more than 1MB of data:
import csv
import boto3
import json
TABLE_NAME = 'employee_details'
OUTPUT_BUCKET = 'my-bucket'
TEMP_FILENAME = '/tmp/employees.csv'
OUTPUT_KEY = 'employees.csv'
s3_resource = boto3.resource('s3')
dynamodb_resource = boto3.resource('dynamodb')
table = dynamodb_resource.Table(TABLE_NAME)
def lambda_handler(event, context):
with open(TEMP_FILENAME, 'w') as output_file:
writer = csv.writer(output_file)
header = True
first_page = True
# Paginate results
while True:
# Scan DynamoDB table
if first_page:
response = table.scan()
first_page = False
else:
response = table.scan(ExclusiveStartKey = response['LastEvaluatedKey'])
for item in response['Items']:
# Write header row?
if header:
writer.writerow(item.keys())
header = False
writer.writerow(item.values())
# Last page?
if 'LastEvaluatedKey' not in response:
break
# Upload temp file to S3
s3_resource.Bucket(OUTPUT_BUCKET).upload_file(TEMP_FILENAME, OUTPUT_KEY)
I am trying to upload files from local directory to S3 folder. I am able to upload files to S3 bucket but I am unable to upload files to folder within S3 bucket.
Could any one help? What am i doing wrong here..
Here is the code:
import os
import sys
import boto3
import fnmatch
import pprint
import re
import hashlib
SOURCE_DIR = '/home/user/Downloads/tracks/'
BUCKET_NAME = 'mybucket'
S3_FOLDER = 'mybucket/folder1/'
client = boto3.client('s3')
s3 = boto3.resource('s3')
def get_md5(filename):
f = open(filename, 'rb')
m = hashlib.md5()
while True:
data = f.read(10240)
if len(data) == 0:
break
m.update(data)
return m.hexdigest()
def get_etag(filebase,filepath):
for item in bucket.objects.all():
keyfile = S3_FOLDER + filebase
if(keyfile == item.key):
md5 = get_md5(filepath)
etag = item.e_tag.strip('"').strip("'")
if etag != md5:
print(filebase + ": " + md5 + " != " + etag)
return(files_to_upload.append(filepath))
else:
return(files_to_upload.append(filepath))
files_to_upload = []
for root, dirnames, filenames in os.walk(SOURCE_DIR):
for filename in filenames:
filepath = os.path.join(root, filename)
get_etag(filename,filepath)
for f in files_to_upload:
client.put_object(Bucket=BUCKET_NAME, Key=f)
Folders don't really exist in S3. You can prefix the file name (object key) with the something that looks like a folder path.
It's not entirely clear to me what your code is doing with the file paths, but your code needs to be changed to something like this:
for f in files_to_upload:
key = "my/s3/folder/name/" + f
client.put_object(Bucket=BUCKET_NAME, Key=key, Body=f)
Note: You weren't passing a Body parameter, so I think your code was just creating empty objects in S3.