Boto3 not uploading zip file to S3 python - python

I'm trying to upload a .zip file to S3 using boto3 for python but the .zip file in my directory is not uploaded correctly.
The code downloads all emails of a given user, zips them in the same directory and uploads them to an S3 bucket.
The problem is that the file that gets uploaded is not the one I intend to upload. Instead a file with 18kb only appears.
Here's the code:
import sys
import imaplib
import getpass
import email
import shutil
import boto3
import os
username = input("Enter user's first name: ")
surname = input("Enter user's surname: ")
email_address = username + "." + surname + "#gmail.com"
password = getpass.getpass()
directory = username + surname + '/'
def download_emails(server):
result, data = server.uid('search', None, "ALL") #search all email and return their uids
if result == 'OK':
for num in data[0].split():
result, data = server.uid('fetch', num, '(RFC822)') #RFC is a standard for the format of ARPA Internet text messages
if result == 'OK':
email_message = email.message_from_bytes(data[0][1]) #raw email text including headers
file_name = email_message['Subject'] #use dates and file names(can be changed)
if not os.path.exists(directory):
os.makedirs(directory) #create a dir for user's emails
try:
email_file = open(directory + file_name+'.eml', 'wb') #open a file for each email and insert the data.
email_file.write(data[0][1])
email_file.close()
except:
pass
#function to zip all the emails
def archive(zipname, directory):
return shutil.make_archive(zipname, 'zip', root_dir=directory, base_dir=None)
#function to upload zipped emails to AWS bucket
def upload_to_s3(file_name):
s3 = boto3.resource('s3',
aws_access_key_id=accessKey,
aws_secret_access_key=secretKey,
aws_session_token=secretToken,
)
s3.Bucket('user-backups').put_object(Key=username.title() + " " +
surname.title() + "/" + file_name, Body=file_name)
print("Uploaded")
def main():
server = imaplib.IMAP4_SSL("imap.gmail.com", 993) #connect to gmail's imap server
server.login(email_address, password) #enter creds
result, data = server.select('"[Gmail]/All Mail"') #get all emails(inbox, outbox etc)
if result == 'OK':
print("Downloading")
download_emails(server)
server.close()
else:
print("ERROR: Unable to open mailbox ", result)
server.logout()
archive(username + surname, directory)
upload_to_s3(username + surname + ".zip")
#os.remove(email_address + ".zip")
#shutil.rmtree(email_address)
print("Done")
if __name__ == "__main__":
main()

You can check out this article for more information.
There are a number of ways to upload. Check out this boto3 document where I have the methods listed below:
The managed upload methods are exposed in both the client and resource interfaces of boto3:
S3.Client method to upload a file by name: S3.Client.upload_file()
S3.Client method to upload a readable file-like object: S3.Client.upload_fileobj()
S3.Bucket method to upload a file by name: S3.Bucket.upload_file()
S3.Bucket method to upload a readable file-like object: S3.Bucket.upload_fileobj()
S3.Object method to upload a file by name: S3.Object.upload_file()
S3.Object method to upload a readable file-like object: S3.Object.upload_fileobj()
I made it work using s3.client.upload_file.
upload_file(Filename, Bucket, Key, ExtraArgs=None, Callback=None,
Config=None) .
Upload a file to an S3 object.
import boto3
s3Resource = boto3.resource('s3')
try:
s3Resource.meta.client.upload_file('/path/to/file', 'bucketName', 'keyName')
except Exception as err:
print(err)

None of the above answers worked!
The following code worked for me..
import os
def upload_file_zip(local_file_path):
s3_client = boto3.client('s3')
s3_path = os.path.join(os.path.basename(local_file_path))
with open(local_file_path,mode='rb') as data:
s3_client.upload_fileobj(data, BUCKET_NAME, s3_path)
Updated code as s3_folder parameter is not required here.

The put_object function accepts Body which is either bytes object or a file object. You have currently just passed the plain filename (a string).
From documentation:
Body (bytes or seekable file-like object) -- Object data.
So the fix should be to pass the file object. Consult this to know how to do that.

Just use s3.client.upload_file.
upload_file(Filename, Bucket, Key, ExtraArgs=None, Callback=None,
Config=None)
def upload_to_s3(file_name):
s3 = boto3.client('s3')
Key = username.title() + " " + surname.title() + "/" + file_name
try:
s3.meta.client.upload_file('/path/to/file', 'user-backups', Key)
except Exception as e:
print(e)

I managed to upload a .zip file by means of the following code:
def write_to_s3(filename, bucket, key):
s3 = boto3.resource(service_name='s3',
aws_access_key_id=os.environ["AWS_USER1_ACCESS_KEY"],
aws_secret_access_key=os.environ["AWS_USER1_SECRET_ACCESS_KEY"])
s3.meta.client.upload_file(filename, bucket, key)
Note : I had to use boto3.resource() instead of boto3.client() as was answered above by mootmoot, as it threw an Exception().
Exception thrown if boto3.client() is used:
AttributeError: 'ClientMeta' object has no attribute 'client'

Related

How to make a link to S3 file download

I want to make a linke to download S3 stored file.
<a href="https://s3.region.amazonaws.com/bucket/file.txt" download>DownLoad</a>
it only display file.txt on the browser.
So I found way to download. It is add Content-Disposition : attachment meta tag to file.
But I need to add this meta tag to new file automately. So I made lambda function by python.
import json
import urllib.parse
import boto3
print('Loading function')
s3 = boto3.client('s3')
def lambda_handler(event, context):
#print("Received event: " + json.dumps(event, indent=2))
# Get the object from the event and show its content type
bucket = event['Records'][0]['s3']['bucket']['name']
key = urllib.parse.unquote_plus(event['Records'][0]['s3']['object']['key'], encoding='utf-8')
try:
response = s3.get_object(Bucket=bucket, Key=key)
print("CONTENT TYPE: " + response['ContentType'])
except Exception as e:
print(e)
print('Error getting object {} from bucket {}. Make sure they exist and your bucket is in the same region as this function.'.format(key, bucket))
raise e
try:
s3_2 = boto3.resource('s3')
s3_object = s3_2.Object(bucket, key)
print(s3_object.metadata)
s3_object.metadata.update({'ContentDisposition':'attachment'})
print(bucket, key)
s3_object.copy_from(CopySource={'Bucket':bucket, 'Key':key}, Metadata=s3_object.metadata, MetadataDirective='REPLACE')
except:
print(s3_object.metadata)
return response['ContentType']
But this function add user defined metatag not system metatag. . .
What should I do?
Content-Disposition is treated by S3 as (somewhat) more like system metadata than custom/user-defined metadata, so it has its own argument.
s3_object.copy_from(CopySource={'Bucket':bucket, 'Key':key}, ContentDisposition='attachment', Metadata=s3_object.metadata, MetadataDirective='REPLACE')
Note that you still need Metadata and MetadataDirective as shown, for this to work, but s3_object.metadata.update() is not required since you are not changing the custom metadata.

How to make a Python AWS Lambda open an email stored in S3 as email object

I realize this is a total noob question and hopefully an easy solution exists. However, I'm stuck and turning to you for help! What I'm trying to do is this: I have an SES rule set that stores emails in my S3 bucket. The specific emails I'm storing contain a .txt attachment. I'm hoping to have a Lambda function that is triggered on S3 bucket "Create" function, open the email AND attachment, and then perform some other processing based on specific text in the email attachment.
My specific question is this: How do I allow the Lambda function to take the S3 email "object" and convert it to the standard Python "message" object format so that I can use Python's Email library against it?
Here is what I have so far...not much, I know:
import boto3
import email
def lambda_handler(event, context):
s3 = boto3.client("s3")
if event:
print("My Event is : ", event)
file_obj = event["Records"][0]
filename = str(file_obj["s3"]['object']['key'])
print("filename: ", filename)
fileObj = s3.get_object(Bucket = "mytestbucket", Key=filename)
print("file has been gotten!")
#Now that the .eml file that was stored in S3 is stored in fileObj,
#start parsing it--but how to convert it to "email" class???
#??????
Can you try something like this?. With this, you will get msg object back from stream you opened with S3 file.
import boto3
import email
def lambda_handler(event, context):
s3 = boto3.client("s3")
if event:
print("My Event is : ", event)
file_obj = event["Records"][0]
filename = str(file_obj["s3"]['object']['key'])
print("filename: ", filename)
fileObj = s3.get_object(Bucket = "mytestbucket", Key=filename)
print("file has been gotten!")
msg = email.message_from_bytes(fileObj['Body'].read())
print(msg['Subject'])
#Hello

Using AWS Lambda and boto3 to append new lines to text file objects in S3

I'm trying to use a python lambda function to append a text file with a new line on a object stored in S3. Since objects stored in S3 are immutable, you must first download the file into '/tmp/', then modify it, then upload the new version back to S3. My code appends the data, however it will not append it with a new line.
BUCKET_NAME = 'mybucket'
KEY = 'test.txt'
s3 = boto3.resource('s3')
def lambda_handler(event, context):
try:
s3.Object(BUCKET_NAME, KEY).download_file('/tmp/test.txt')
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "404":
print("The object does not exist.")
else:
raise
with open('/tmp/test.txt', 'a') as fd:
fd.write("this is a new string\n")
s3.meta.client.upload_file('/tmp/test.txt', BUCKET_NAME, KEY)
The file is always appended with the new string but never with a new line. Any ideas?
UPDATE: This problem does not occur on linux machines or on a Mac. Lambda functions run on linux containers, which means the file in /tmp/ is saved as a Unix-formatted text file. Some Windows applications will not show line breaks on Unix-formatted text files, which was the case here. I'm dumb.
You don't need to download and upload a file in order to overwrite a file in S3; To overwrite an existing object you can just upload the file with the same name and it will be done automatically (reference). Look into the put_object function (S3 doc).
So your code will look like this:
BUCKET_NAME = 'mybucket'
KEY = 'test.txt'
# Use .client() instead of .resource()
s3 = boto3.client('s3')
def lambda_handler(event, context):
try:
# (Optional) Read the object
obj = s3.get_object(Bucket=BUCKET_NAME, Key=KEY)
file_content = obj['Body'].read().decode('utf-8')
# (Optional) Update the file content
new_file_content = file_content + "this is a new string\n"
# Write to the object
s3.put_object(Bucket=BUCKET_NAME, Key=KEY, Body=str(new_file_content))
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "404":
print("The object does not exist.")
else:
raise
You need to specify the local file path
import boto3
import botocore
from botocore.exceptions import ClientError
BUCKET_NAME = 'mybucket'
KEY = 'test.txt'
LOCAL_FILE = '/tmp/test.txt'
s3 = boto3.resource('s3')
def lambda_handler(event, context):
try:
obj=s3.Bucket(BUCKET_NAME).download_file(LOCAL_FILE, KEY)
except ClientError as e:
if e.response['Error']['Code'] == "404":
print("The object does not exist.")
else:
raise
with open('/tmp/test.txt', 'a') as fd:
fd.write("this is a new string\n")
s3.meta.client.upload_file(LOCAL_FILE, BUCKET_NAME, KEY)
Boto3 doc reference: http://boto3.readthedocs.io/en/latest/reference/services/s3.html#S3.Bucket.download_file
Nice Post!
Just an adjustment..
You should change the order of LOCAL_FILE and KEY in the parameters of the download_file method.
The correct syntax is:
obj=s3.Bucket(BUCKET_NAME).download_file(KEY,LOCAL_FILE)
Also it would be nice if we delete de local file in case of file not found in the bucket. because if we dont remove the local file (if exists obviously) we may be adding a new line to the already existed local file.
With the help of this function:
def remove_local_file(filePath):
import os
# As file at filePath is deleted now, so we should check if file exists or not not before deleting them
if os.path.exists(filePath):
os.remove(filePath)
else:
print("Can not delete the file as it doesn't exists")
the final code starting in the 'try' could be like this:
try:
obj=s3.Bucket(BUCKET_NAME).download_file(KEY,LOCAL_FILE)
except ClientError as e:
if e.response['Error']['Code'] == "404":
print("The object does not exist.")
remove_local_file(LOCAL_FILE)
else:
raise
with open(LOCAL_FILE, 'a') as fd:
fd.write("this is a new string\n")
s3.meta.client.upload_file(LOCAL_FILE, BUCKET_NAME, KEY)

How to download files from s3 given the file path using boto3 in python

Pretty basic but I am not able to download files given s3 path.
for eg, I have this s3://name1/name2/file_name.txt
import boto3
locations = ['s3://name1/name2/file_name.txt']
s3_client = boto3.client('s3')
bucket = 'name1'
prefix = 'name2'
for file in locations:
s3_client.download_file(bucket, 'file_name.txt', 'my_local_folder')
I am getting error as botocore.exceptions.ClientError: An error occurred (404) when calling the HeadObject operation: Not Found
This file exists as when I download. using aws cli as s3 path: s3://name1/name2/file_name.txt .
You need to have a list of filename paths, then modify your code like shown in the documentation:
import os
import boto3
import botocore
files = ['name2/file_name.txt']
bucket = 'name1'
s3 = boto3.resource('s3')
for file in files:
try:
s3.Bucket(bucket).download_file(file, os.path.basename(file))
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "404":
print("The object does not exist.")
else:
raise
You may need to do this with some type of authentication. There are several methods, but creating a session is simple and fast:
from boto3.session import Session
bucket_name = 'your_bucket_name'
folder_prefix = 'your/path/to/download/files'
credentials = 'credentials.txt'
with open(credentials, 'r', encoding='utf-8') as f:
line = f.readline().strip()
access_key = line.split(':')[0]
secret_key = line.split(':')[1]
session = Session(
aws_access_key_id=access_key,
aws_secret_access_key=secret_key
)
s3 = session.resource('s3')
bucket = s3.Bucket(bucket_name)
for s3_file in bucket.objects.filter(Prefix=folder_prefix):
file_object = s3_file.key
file_name = str(file_object.split('/')[-1])
print('Downloading file {} ...'.format(file_object))
bucket.download_file(file_object, '/tmp/{}'.format(file_name))
In credentials.txt file you must add a single line where you concatenate the access key id and the secret, for example:
~$ cat credentials.txt
AKIAIO5FODNN7EXAMPLE:ABCDEF+c2L7yXeGvUyrPgYsDnWRRC1AYEXAMPLE
Don't forget to protect this file well on your host, give read-only permissions for the user who runs this program. I hope it works for you, it works perfectly for me.

How to upload a file to directory in S3 bucket using boto

I want to copy a file in s3 bucket using python.
Ex : I have bucket name = test. And in the bucket, I have 2 folders name "dump" & "input". Now I want to copy a file from local directory to S3 "dump" folder using python... Can anyone help me?
NOTE: This answer uses boto. See the other answer that uses boto3, which is newer.
Try this...
import boto
import boto.s3
import sys
from boto.s3.key import Key
AWS_ACCESS_KEY_ID = ''
AWS_SECRET_ACCESS_KEY = ''
bucket_name = AWS_ACCESS_KEY_ID.lower() + '-dump'
conn = boto.connect_s3(AWS_ACCESS_KEY_ID,
AWS_SECRET_ACCESS_KEY)
bucket = conn.create_bucket(bucket_name,
location=boto.s3.connection.Location.DEFAULT)
testfile = "replace this with an actual filename"
print 'Uploading %s to Amazon S3 bucket %s' % \
(testfile, bucket_name)
def percent_cb(complete, total):
sys.stdout.write('.')
sys.stdout.flush()
k = Key(bucket)
k.key = 'my test file'
k.set_contents_from_filename(testfile,
cb=percent_cb, num_cb=10)
[UPDATE]
I am not a pythonist, so thanks for the heads up about the import statements.
Also, I'd not recommend placing credentials inside your own source code. If you are running this inside AWS use IAM Credentials with Instance Profiles (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2_instance-profiles.html), and to keep the same behaviour in your Dev/Test environment, use something like Hologram from AdRoll (https://github.com/AdRoll/hologram)
import boto3
s3 = boto3.resource('s3')
BUCKET = "test"
s3.Bucket(BUCKET).upload_file("your/local/file", "dump/file")
No need to make it that complicated:
s3_connection = boto.connect_s3()
bucket = s3_connection.get_bucket('your bucket name')
key = boto.s3.key.Key(bucket, 'some_file.zip')
with open('some_file.zip') as f:
key.send_file(f)
Upload file to s3 within a session with credentials.
import boto3
session = boto3.Session(
aws_access_key_id='AWS_ACCESS_KEY_ID',
aws_secret_access_key='AWS_SECRET_ACCESS_KEY',
)
s3 = session.resource('s3')
# Filename - File to upload
# Bucket - Bucket to upload to (the top level directory under AWS S3)
# Key - S3 object name (can contain subdirectories). If not specified then file_name is used
s3.meta.client.upload_file(Filename='input_file_path', Bucket='bucket_name', Key='s3_output_key')
I used this and it is very simple to implement
import tinys3
conn = tinys3.Connection('S3_ACCESS_KEY','S3_SECRET_KEY',tls=True)
f = open('some_file.zip','rb')
conn.upload('some_file.zip',f,'my_bucket')
https://www.smore.com/labs/tinys3/
from boto3.s3.transfer import S3Transfer
import boto3
#have all the variables populated which are required below
client = boto3.client('s3', aws_access_key_id=access_key,aws_secret_access_key=secret_key)
transfer = S3Transfer(client)
transfer.upload_file(filepath, bucket_name, folder_name+"/"+filename)
This is a three liner. Just follow the instructions on the boto3 documentation.
import boto3
s3 = boto3.resource(service_name = 's3')
s3.meta.client.upload_file(Filename = 'C:/foo/bar/baz.filetype', Bucket = 'yourbucketname', Key = 'baz.filetype')
Some important arguments are:
Parameters:
Filename (str) -- The path to the file to upload.
Bucket (str) -- The name of the bucket to upload to.
Key (str) -- The name of the that you want to assign to your file in your s3 bucket. This could be the same as the name of the file or a different name of your choice but the filetype should remain the same.
Note: I assume that you have saved your credentials in a ~\.aws folder as suggested in the best configuration practices in the boto3 documentation.
This will also work:
import os
import boto
import boto.s3.connection
from boto.s3.key import Key
try:
conn = boto.s3.connect_to_region('us-east-1',
aws_access_key_id = 'AWS-Access-Key',
aws_secret_access_key = 'AWS-Secrete-Key',
# host = 's3-website-us-east-1.amazonaws.com',
# is_secure=True, # uncomment if you are not using ssl
calling_format = boto.s3.connection.OrdinaryCallingFormat(),
)
bucket = conn.get_bucket('YourBucketName')
key_name = 'FileToUpload'
path = 'images/holiday' #Directory Under which file should get upload
full_key_name = os.path.join(path, key_name)
k = bucket.new_key(full_key_name)
k.set_contents_from_filename(key_name)
except Exception,e:
print str(e)
print "error"
Using boto3
import logging
import boto3
from botocore.exceptions import ClientError
def upload_file(file_name, bucket, object_name=None):
"""Upload a file to an S3 bucket
:param file_name: File to upload
:param bucket: Bucket to upload to
:param object_name: S3 object name. If not specified then file_name is used
:return: True if file was uploaded, else False
"""
# If S3 object_name was not specified, use file_name
if object_name is None:
object_name = file_name
# Upload the file
s3_client = boto3.client('s3')
try:
response = s3_client.upload_file(file_name, bucket, object_name)
except ClientError as e:
logging.error(e)
return False
return True
For more:-
https://boto3.amazonaws.com/v1/documentation/api/latest/guide/s3-uploading-files.html
import boto
from boto.s3.key import Key
AWS_ACCESS_KEY_ID = ''
AWS_SECRET_ACCESS_KEY = ''
END_POINT = '' # eg. us-east-1
S3_HOST = '' # eg. s3.us-east-1.amazonaws.com
BUCKET_NAME = 'test'
FILENAME = 'upload.txt'
UPLOADED_FILENAME = 'dumps/upload.txt'
# include folders in file path. If it doesn't exist, it will be created
s3 = boto.s3.connect_to_region(END_POINT,
aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY,
host=S3_HOST)
bucket = s3.get_bucket(BUCKET_NAME)
k = Key(bucket)
k.key = UPLOADED_FILENAME
k.set_contents_from_filename(FILENAME)
For upload folder example as following code and S3 folder picture
import boto
import boto.s3
import boto.s3.connection
import os.path
import sys
# Fill in info on data to upload
# destination bucket name
bucket_name = 'willie20181121'
# source directory
sourceDir = '/home/willie/Desktop/x/' #Linux Path
# destination directory name (on s3)
destDir = '/test1/' #S3 Path
#max size in bytes before uploading in parts. between 1 and 5 GB recommended
MAX_SIZE = 20 * 1000 * 1000
#size of parts when uploading in parts
PART_SIZE = 6 * 1000 * 1000
access_key = 'MPBVAQ*******IT****'
secret_key = '11t63yDV***********HgUcgMOSN*****'
conn = boto.connect_s3(
aws_access_key_id = access_key,
aws_secret_access_key = secret_key,
host = '******.org.tw',
is_secure=False, # uncomment if you are not using ssl
calling_format = boto.s3.connection.OrdinaryCallingFormat(),
)
bucket = conn.create_bucket(bucket_name,
location=boto.s3.connection.Location.DEFAULT)
uploadFileNames = []
for (sourceDir, dirname, filename) in os.walk(sourceDir):
uploadFileNames.extend(filename)
break
def percent_cb(complete, total):
sys.stdout.write('.')
sys.stdout.flush()
for filename in uploadFileNames:
sourcepath = os.path.join(sourceDir + filename)
destpath = os.path.join(destDir, filename)
print ('Uploading %s to Amazon S3 bucket %s' % \
(sourcepath, bucket_name))
filesize = os.path.getsize(sourcepath)
if filesize > MAX_SIZE:
print ("multipart upload")
mp = bucket.initiate_multipart_upload(destpath)
fp = open(sourcepath,'rb')
fp_num = 0
while (fp.tell() < filesize):
fp_num += 1
print ("uploading part %i" %fp_num)
mp.upload_part_from_file(fp, fp_num, cb=percent_cb, num_cb=10, size=PART_SIZE)
mp.complete_upload()
else:
print ("singlepart upload")
k = boto.s3.key.Key(bucket)
k.key = destpath
k.set_contents_from_filename(sourcepath,
cb=percent_cb, num_cb=10)
PS: For more reference URL
If you have the aws command line interface installed on your system you can make use of pythons subprocess library.
For example:
import subprocess
def copy_file_to_s3(source: str, target: str, bucket: str):
subprocess.run(["aws", "s3" , "cp", source, f"s3://{bucket}/{target}"])
Similarly you can use that logics for all sort of AWS client operations like downloading or listing files etc. It is also possible to get return values. This way there is no need to import boto3. I guess its use is not intended that way but in practice I find it quite convenient that way. This way you also get the status of the upload displayed in your console - for example:
Completed 3.5 GiB/3.5 GiB (242.8 MiB/s) with 1 file(s) remaining
To modify the method to your wishes I recommend having a look into the subprocess reference as well as to the AWS Cli reference.
Note: This is a copy of my answer to a similar question.
I have something that seems to me has a bit more order:
import boto3
from pprint import pprint
from botocore.exceptions import NoCredentialsError
class S3(object):
BUCKET = "test"
connection = None
def __init__(self):
try:
vars = get_s3_credentials("aws")
self.connection = boto3.resource('s3', 'aws_access_key_id',
'aws_secret_access_key')
except(Exception) as error:
print(error)
self.connection = None
def upload_file(self, file_to_upload_path, file_name):
if file_to_upload is None or file_name is None: return False
try:
pprint(file_to_upload)
file_name = "your-folder-inside-s3/{0}".format(file_name)
self.connection.Bucket(self.BUCKET).upload_file(file_to_upload_path,
file_name)
print("Upload Successful")
return True
except FileNotFoundError:
print("The file was not found")
return False
except NoCredentialsError:
print("Credentials not available")
return False
There're three important variables here, the BUCKET const, the file_to_upload and the file_name
BUCKET: is the name of your S3 bucket
file_to_upload_path: must be the path from file you want to upload
file_name: is the resulting file and path in your bucket (this is where you add folders or what ever)
There's many ways but you can reuse this code in another script like this
import S3
def some_function():
S3.S3().upload_file(path_to_file, final_file_name)
You should mention the content type as well to omit the file accessing issue.
import os
image='fly.png'
s3_filestore_path = 'images/fly.png'
filename, file_extension = os.path.splitext(image)
content_type_dict={".png":"image/png",".html":"text/html",
".css":"text/css",".js":"application/javascript",
".jpg":"image/png",".gif":"image/gif",
".jpeg":"image/jpeg"}
content_type=content_type_dict[file_extension]
s3 = boto3.client('s3', config=boto3.session.Config(signature_version='s3v4'),
region_name='ap-south-1',
aws_access_key_id=S3_KEY,
aws_secret_access_key=S3_SECRET)
s3.put_object(Body=image, Bucket=S3_BUCKET, Key=s3_filestore_path, ContentType=content_type)
xmlstr = etree.tostring(listings, encoding='utf8', method='xml')
conn = boto.connect_s3(
aws_access_key_id = access_key,
aws_secret_access_key = secret_key,
# host = '<bucketName>.s3.amazonaws.com',
host = 'bycket.s3.amazonaws.com',
#is_secure=False, # uncomment if you are not using ssl
calling_format = boto.s3.connection.OrdinaryCallingFormat(),
)
conn.auth_region_name = 'us-west-1'
bucket = conn.get_bucket('resources', validate=False)
key= bucket.get_key('filename.txt')
key.set_contents_from_string("SAMPLE TEXT")
key.set_canned_acl('public-read')
A lot of the existing answers here are pretty complex. A simple approach is to use cloudpathlib, which wraps boto3.
First, be sure to be authenticated properly with an ~/.aws/credentials file or environment variables set. See more options in the cloudpathlib docs.
This is how you would upload a file:
from pathlib import Path
from cloudpathlib import CloudPath
# write a local file that we will upload:
Path("test_file.txt").write_text("hello")
#> 5
# upload that file to S3
CloudPath("s3://drivendata-public-assets/testsfile.txt").upload_from("test_file.txt")
#> S3Path('s3://mybucket/testsfile.txt')
# read it back from s3
CloudPath("s3://mybucket/testsfile.txt").read_text()
#> 'hello'
Note, that you could write to the cloud path directly using the normal write_text, write_bytes, or open methods as well.
I modified your example slightly, dropping some imports and the progress to get what I needed for a boto example.
import boto.s3
from boto.s3.key import Key
AWS_ACCESS_KEY_ID = 'your-access-key-id'
AWS_SECRET_ACCESS_KEY = 'your-secret-access-key'
bucket_name = AWS_ACCESS_KEY_ID.lower() + '-form13'
conn = boto.connect_s3(AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY)
bucket = conn.create_bucket(bucket_name, location=boto.s3.connection.Location.DEFAULT)
filename = 'embedding.csv'
k = Key(bucket)
k.key = filename
k.set_contents_from_filename(filename)
Here's a boto3 example as well:
import boto3
ACCESS_KEY = 'your-access-key'
SECRET_KEY = 'your-secret-key'
file_name='embedding.csv'
object_name=file_name
bucket_name = ACCESS_KEY.lower() + '-form13'
s3 = boto3.client('s3', aws_access_key_id=ACCESS_KEY, aws_secret_access_key=SECRET_KEY)
s3.create_bucket(Bucket=bucket_name)
s3.upload_file(file_name, bucket_name, object_name)

Categories