Airflow: how to download PDF files from S3 bucket into Airflow - python

tried below code but getting error as "unable to locate Credentials"
def download():
bucket = 'bucketname'
key = 'path and filename'
s3_resource = boto3.resource('s3')
my_bucket = s3_resource.Bucket(bucket)
objects = my_bucket.objects.filter(Prefix=key)
for obj = objects:
path,filename = os.path.split(obj.key)
my_bucket.download_file(obj.key, filename)

You'll need to define the AWS connection and use
download_fileobj function via the S3Hook.
I didn't test it but it should be something like:
from tempfile import NamedTemporaryFile
from airflow.providers.amazon.aws.hooks.s3 import S3Hook
hook = S3Hook('my_aws_conn')
key_object = hook.get_key('your_path')
with NamedTemporaryFile("wb") as f:
key_object.download_fileobj(Fileobj=f)
f.flush()

Related

Loading an SKlearn model from AWS S3

I have saved a sklearn model using the following code:
def __init__(self,bucket_name):
self.bucket_name = bucket_name
self.s3_resource = boto3.resource('s3')
self.aws_access_key_id = os.environ['aws_access_key_id']
self.aws_secret_access_key = os.environ['aws_secret_access_key']
self.region ="eu-central-1"
self.s3 = boto3.client('s3', aws_access_key_id= self.aws_access_key_id,
aws_secret_access_key= self.aws_secret_access_key,
endpoint_url = 'https://s3.eu-central-1.wasabisys.com')
self.s3_resource = boto3.resource('s3', aws_access_key_id= self.aws_access_key_id,
aws_secret_access_key= self.aws_secret_access_key,
endpoint_url = 'https://s3.eu-central-1.wasabisys.com')
def write_as_joblib_to_bucket(self,file,path,file_name):
full_file_name = path + "/" + file_name
with tempfile.TemporaryFile() as fp:
joblib.dump(file, fp)
fp.seek(0)
response = self.s3.put_object(Body=fp.read(), Bucket=self.bucket_name, Key=full_file_name )
print(response)
However, I am not completely sure how to load the model:
I've been trying something like this:
def read_joblib_file(self, path, file_name):
full_file_name = path + "/" + file_name
obj = self.s3.get_object(Bucket=self.bucket_name, Key=full_file_name)
body = joblib.load(obj['Body'])
However, how do i convert the body into the original sklearn format?
By following a similar idea in your write function you can read your sklearn model as in the following:
def read_joblib_file(self, key):
with tempfile.TemporaryFile() as fp:
s3.dowload_fileobj(Fileobj=fp,Bucket=self.bucket_name, Key=key)
fp.seek(0)
model = joblib.load(fp)
return model
You can use Joblib, IO in combination with boto3
import boto3
from joblib import load
from io import BytesIO
s3 = boto3.client('s3')
bucket = 's3://my-bucket'
key = '/path/to/my/object.joblib'
s3_object = s3.get_object(Bucket=bucket, Key=key)
# Write to in-memory buffer and use in place of a file object
# In general, BytesIO is useful when something (e.g. joblib)
# expects a file object, but we can only provide data directly
bytes_stream = BytesIO(s3_object['Body'].read())
model = load(bytes_stream)
This will work, but depending on how large your model is, you may want to explore different options. If you are running into long S3 load times, some things you can try: you can look at using AWS Sagemaker to deploy your model, where you can (in your python script) just invoke an endpoint with your data as a payload and retrieve a result (see here); you can do the same Sagemaker deployment then invoke it in AWS Athena (see here); etc...

How to copy from one bucket to another bucket in s3 of certain suffix

I have 3 buckets 1.commonfolder 2.jsonfolder 3.csvfolder.
Common folder will be having both json and csv files
need to copy all csv files to csvfolder
need to copy all json files to json folder
Code is below to get all the files from commonfolder How to copy after that
import boto3
s3 = boto3.client('s3')
def lambda_handler(event, context):
#List all the bucket names
response = s3.list_buckets()
for bucket in response['Buckets']:
print (bucket)
print(f'{bucket["Name"]}')
#Get the files of particular bucket
if bucket["Name"] == 'tests3json':
resp = s3.list_objects_v2(Bucket='commonfolder')
for obj in resp['Contents']:
files = obj['Key']
print(files)
if(filename.split('.')[1].lower()=='json'):
copyjson(bucket,filename)
#copyjson(jsonfolder,filename)
elif(filename.split('.')[1].lower()=='csv'):
copycsv(bucket, filename)
#copycsv(csvfolder,filename)
need to create a new function copyjson,copycsv to do this job
Need to copy from common-bucket to either csv-bucket or json-bucket depending on the file extension
You can check the following code:
import boto3
s3 = boto3.resource('s3')
def lambda_handler(event, context):
source_bucket = s3.Bucket('01-commonfolder-231')
json_bucket = s3.Bucket('02-jsonfolder-3435')
csv_bucket = s3.Bucket('03-csvfolder-4552')
for object in source_bucket.objects.all():
#print(object)
if object.key.endswith('.json'):
print(f"{object.key} to json bucket")
copy_object = json_bucket.Object(object.key)
copy_object.copy({'Bucket': object.bucket_name,
'Key': object.key})
elif object.key.endswith('.csv'):
print(f"{object.key} to csv bucket")
copy_object = csv_bucket.Object(object.key)
copy_object.copy({'Bucket': object.bucket_name,
'Key': object.key})
I tested this using my own sample buckets with test files:
aaa.json to json bucket
bbbbb.csv to csv bucket
bbbbb.json to json bucket
hhhh.csv to csv bucket
You can use the move() method from shutil:
from shutil import move
from glob import glob
common_folder = 'C:\\Users\\User\\Desktop\\commonfolder\\'
csv_folder = 'C:\\Users\\User\\Desktop\\csvfolder\\'
json_folder = 'C:\\Users\\User\\Desktop\\jsonfolder\\'
for csv in glob(common_folder+"*.csv"):
move(csv, csv_folder)
for json in glob(common_folder+"*.json"):
move(json, json_folder)

How to download files from s3 given the file path using boto3 in python

Pretty basic but I am not able to download files given s3 path.
for eg, I have this s3://name1/name2/file_name.txt
import boto3
locations = ['s3://name1/name2/file_name.txt']
s3_client = boto3.client('s3')
bucket = 'name1'
prefix = 'name2'
for file in locations:
s3_client.download_file(bucket, 'file_name.txt', 'my_local_folder')
I am getting error as botocore.exceptions.ClientError: An error occurred (404) when calling the HeadObject operation: Not Found
This file exists as when I download. using aws cli as s3 path: s3://name1/name2/file_name.txt .
You need to have a list of filename paths, then modify your code like shown in the documentation:
import os
import boto3
import botocore
files = ['name2/file_name.txt']
bucket = 'name1'
s3 = boto3.resource('s3')
for file in files:
try:
s3.Bucket(bucket).download_file(file, os.path.basename(file))
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "404":
print("The object does not exist.")
else:
raise
You may need to do this with some type of authentication. There are several methods, but creating a session is simple and fast:
from boto3.session import Session
bucket_name = 'your_bucket_name'
folder_prefix = 'your/path/to/download/files'
credentials = 'credentials.txt'
with open(credentials, 'r', encoding='utf-8') as f:
line = f.readline().strip()
access_key = line.split(':')[0]
secret_key = line.split(':')[1]
session = Session(
aws_access_key_id=access_key,
aws_secret_access_key=secret_key
)
s3 = session.resource('s3')
bucket = s3.Bucket(bucket_name)
for s3_file in bucket.objects.filter(Prefix=folder_prefix):
file_object = s3_file.key
file_name = str(file_object.split('/')[-1])
print('Downloading file {} ...'.format(file_object))
bucket.download_file(file_object, '/tmp/{}'.format(file_name))
In credentials.txt file you must add a single line where you concatenate the access key id and the secret, for example:
~$ cat credentials.txt
AKIAIO5FODNN7EXAMPLE:ABCDEF+c2L7yXeGvUyrPgYsDnWRRC1AYEXAMPLE
Don't forget to protect this file well on your host, give read-only permissions for the user who runs this program. I hope it works for you, it works perfectly for me.

How to write a file or data to an S3 object using boto3

In boto 2, you can write to an S3 object using these methods:
Key.set_contents_from_string()
Key.set_contents_from_file()
Key.set_contents_from_filename()
Key.set_contents_from_stream()
Is there a boto 3 equivalent? What is the boto3 method for saving data to an object stored on S3?
In boto 3, the 'Key.set_contents_from_' methods were replaced by
Object.put()
Client.put_object()
For example:
import boto3
some_binary_data = b'Here we have some data'
more_binary_data = b'Here we have some more data'
# Method 1: Object.put()
s3 = boto3.resource('s3')
object = s3.Object('my_bucket_name', 'my/key/including/filename.txt')
object.put(Body=some_binary_data)
# Method 2: Client.put_object()
client = boto3.client('s3')
client.put_object(Body=more_binary_data, Bucket='my_bucket_name', Key='my/key/including/anotherfilename.txt')
Alternatively, the binary data can come from reading a file, as described in the official docs comparing boto 2 and boto 3:
Storing Data
Storing data from a file, stream, or string is easy:
# Boto 2.x
from boto.s3.key import Key
key = Key('hello.txt')
key.set_contents_from_file('/tmp/hello.txt')
# Boto 3
s3.Object('mybucket', 'hello.txt').put(Body=open('/tmp/hello.txt', 'rb'))
boto3 also has a method for uploading a file directly:
s3 = boto3.resource('s3')
s3.Bucket('bucketname').upload_file('/local/file/here.txt','folder/sub/path/to/s3key')
http://boto3.readthedocs.io/en/latest/reference/services/s3.html#S3.Bucket.upload_file
You no longer have to convert the contents to binary before writing to the file in S3. The following example creates a new text file (called newfile.txt) in an S3 bucket with string contents:
import boto3
s3 = boto3.resource(
's3',
region_name='us-east-1',
aws_access_key_id=KEY_ID,
aws_secret_access_key=ACCESS_KEY
)
content="String content to write to a new S3 file"
s3.Object('my-bucket-name', 'newfile.txt').put(Body=content)
Here's a nice trick to read JSON from s3:
import json, boto3
s3 = boto3.resource("s3").Bucket("bucket")
json.load_s3 = lambda f: json.load(s3.Object(key=f).get()["Body"])
json.dump_s3 = lambda obj, f: s3.Object(key=f).put(Body=json.dumps(obj))
Now you can use json.load_s3 and json.dump_s3 with the same API as load and dump
data = {"test":0}
json.dump_s3(data, "key") # saves json to s3://bucket/key
data = json.load_s3("key") # read json from s3://bucket/key
A cleaner and concise version which I use to upload files on the fly to a given S3 bucket and sub-folder-
import boto3
BUCKET_NAME = 'sample_bucket_name'
PREFIX = 'sub-folder/'
s3 = boto3.resource('s3')
# Creating an empty file called "_DONE" and putting it in the S3 bucket
s3.Object(BUCKET_NAME, PREFIX + '_DONE').put(Body="")
Note: You should ALWAYS put your AWS credentials (aws_access_key_id and aws_secret_access_key) in a separate file, for example- ~/.aws/credentials
After some research, I found this. It can be achieved using a simple csv writer. It is to write a dictionary to CSV directly to S3 bucket.
eg: data_dict = [{"Key1": "value1", "Key2": "value2"}, {"Key1": "value4", "Key2": "value3"}]
assuming that the keys in all the dictionary are uniform.
import csv
import boto3
# Sample input dictionary
data_dict = [{"Key1": "value1", "Key2": "value2"}, {"Key1": "value4", "Key2": "value3"}]
data_dict_keys = data_dict[0].keys()
# creating a file buffer
file_buff = StringIO()
# writing csv data to file buffer
writer = csv.DictWriter(file_buff, fieldnames=data_dict_keys)
writer.writeheader()
for data in data_dict:
writer.writerow(data)
# creating s3 client connection
client = boto3.client('s3')
# placing file to S3, file_buff.getvalue() is the CSV body for the file
client.put_object(Body=file_buff.getvalue(), Bucket='my_bucket_name', Key='my/key/including/anotherfilename.txt')
it is worth mentioning smart-open that uses boto3 as a back-end.
smart-open is a drop-in replacement for python's open that can open files from s3, as well as ftp, http and many other protocols.
for example
from smart_open import open
import json
with open("s3://your_bucket/your_key.json", 'r') as f:
data = json.load(f)
The aws credentials are loaded via boto3 credentials, usually a file in the ~/.aws/ dir or an environment variable.
You may use the below code to write, for example an image to S3 in 2019. To be able to connect to S3 you will have to install AWS CLI using command pip install awscli, then enter few credentials using command aws configure:
import urllib3
import uuid
from pathlib import Path
from io import BytesIO
from errors import custom_exceptions as cex
BUCKET_NAME = "xxx.yyy.zzz"
POSTERS_BASE_PATH = "assets/wallcontent"
CLOUDFRONT_BASE_URL = "https://xxx.cloudfront.net/"
class S3(object):
def __init__(self):
self.client = boto3.client('s3')
self.bucket_name = BUCKET_NAME
self.posters_base_path = POSTERS_BASE_PATH
def __download_image(self, url):
manager = urllib3.PoolManager()
try:
res = manager.request('GET', url)
except Exception:
print("Could not download the image from URL: ", url)
raise cex.ImageDownloadFailed
return BytesIO(res.data) # any file-like object that implements read()
def upload_image(self, url):
try:
image_file = self.__download_image(url)
except cex.ImageDownloadFailed:
raise cex.ImageUploadFailed
extension = Path(url).suffix
id = uuid.uuid1().hex + extension
final_path = self.posters_base_path + "/" + id
try:
self.client.upload_fileobj(image_file,
self.bucket_name,
final_path
)
except Exception:
print("Image Upload Error for URL: ", url)
raise cex.ImageUploadFailed
return CLOUDFRONT_BASE_URL + id

How to upload a file to directory in S3 bucket using boto

I want to copy a file in s3 bucket using python.
Ex : I have bucket name = test. And in the bucket, I have 2 folders name "dump" & "input". Now I want to copy a file from local directory to S3 "dump" folder using python... Can anyone help me?
NOTE: This answer uses boto. See the other answer that uses boto3, which is newer.
Try this...
import boto
import boto.s3
import sys
from boto.s3.key import Key
AWS_ACCESS_KEY_ID = ''
AWS_SECRET_ACCESS_KEY = ''
bucket_name = AWS_ACCESS_KEY_ID.lower() + '-dump'
conn = boto.connect_s3(AWS_ACCESS_KEY_ID,
AWS_SECRET_ACCESS_KEY)
bucket = conn.create_bucket(bucket_name,
location=boto.s3.connection.Location.DEFAULT)
testfile = "replace this with an actual filename"
print 'Uploading %s to Amazon S3 bucket %s' % \
(testfile, bucket_name)
def percent_cb(complete, total):
sys.stdout.write('.')
sys.stdout.flush()
k = Key(bucket)
k.key = 'my test file'
k.set_contents_from_filename(testfile,
cb=percent_cb, num_cb=10)
[UPDATE]
I am not a pythonist, so thanks for the heads up about the import statements.
Also, I'd not recommend placing credentials inside your own source code. If you are running this inside AWS use IAM Credentials with Instance Profiles (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2_instance-profiles.html), and to keep the same behaviour in your Dev/Test environment, use something like Hologram from AdRoll (https://github.com/AdRoll/hologram)
import boto3
s3 = boto3.resource('s3')
BUCKET = "test"
s3.Bucket(BUCKET).upload_file("your/local/file", "dump/file")
No need to make it that complicated:
s3_connection = boto.connect_s3()
bucket = s3_connection.get_bucket('your bucket name')
key = boto.s3.key.Key(bucket, 'some_file.zip')
with open('some_file.zip') as f:
key.send_file(f)
Upload file to s3 within a session with credentials.
import boto3
session = boto3.Session(
aws_access_key_id='AWS_ACCESS_KEY_ID',
aws_secret_access_key='AWS_SECRET_ACCESS_KEY',
)
s3 = session.resource('s3')
# Filename - File to upload
# Bucket - Bucket to upload to (the top level directory under AWS S3)
# Key - S3 object name (can contain subdirectories). If not specified then file_name is used
s3.meta.client.upload_file(Filename='input_file_path', Bucket='bucket_name', Key='s3_output_key')
I used this and it is very simple to implement
import tinys3
conn = tinys3.Connection('S3_ACCESS_KEY','S3_SECRET_KEY',tls=True)
f = open('some_file.zip','rb')
conn.upload('some_file.zip',f,'my_bucket')
https://www.smore.com/labs/tinys3/
from boto3.s3.transfer import S3Transfer
import boto3
#have all the variables populated which are required below
client = boto3.client('s3', aws_access_key_id=access_key,aws_secret_access_key=secret_key)
transfer = S3Transfer(client)
transfer.upload_file(filepath, bucket_name, folder_name+"/"+filename)
This is a three liner. Just follow the instructions on the boto3 documentation.
import boto3
s3 = boto3.resource(service_name = 's3')
s3.meta.client.upload_file(Filename = 'C:/foo/bar/baz.filetype', Bucket = 'yourbucketname', Key = 'baz.filetype')
Some important arguments are:
Parameters:
Filename (str) -- The path to the file to upload.
Bucket (str) -- The name of the bucket to upload to.
Key (str) -- The name of the that you want to assign to your file in your s3 bucket. This could be the same as the name of the file or a different name of your choice but the filetype should remain the same.
Note: I assume that you have saved your credentials in a ~\.aws folder as suggested in the best configuration practices in the boto3 documentation.
This will also work:
import os
import boto
import boto.s3.connection
from boto.s3.key import Key
try:
conn = boto.s3.connect_to_region('us-east-1',
aws_access_key_id = 'AWS-Access-Key',
aws_secret_access_key = 'AWS-Secrete-Key',
# host = 's3-website-us-east-1.amazonaws.com',
# is_secure=True, # uncomment if you are not using ssl
calling_format = boto.s3.connection.OrdinaryCallingFormat(),
)
bucket = conn.get_bucket('YourBucketName')
key_name = 'FileToUpload'
path = 'images/holiday' #Directory Under which file should get upload
full_key_name = os.path.join(path, key_name)
k = bucket.new_key(full_key_name)
k.set_contents_from_filename(key_name)
except Exception,e:
print str(e)
print "error"
Using boto3
import logging
import boto3
from botocore.exceptions import ClientError
def upload_file(file_name, bucket, object_name=None):
"""Upload a file to an S3 bucket
:param file_name: File to upload
:param bucket: Bucket to upload to
:param object_name: S3 object name. If not specified then file_name is used
:return: True if file was uploaded, else False
"""
# If S3 object_name was not specified, use file_name
if object_name is None:
object_name = file_name
# Upload the file
s3_client = boto3.client('s3')
try:
response = s3_client.upload_file(file_name, bucket, object_name)
except ClientError as e:
logging.error(e)
return False
return True
For more:-
https://boto3.amazonaws.com/v1/documentation/api/latest/guide/s3-uploading-files.html
import boto
from boto.s3.key import Key
AWS_ACCESS_KEY_ID = ''
AWS_SECRET_ACCESS_KEY = ''
END_POINT = '' # eg. us-east-1
S3_HOST = '' # eg. s3.us-east-1.amazonaws.com
BUCKET_NAME = 'test'
FILENAME = 'upload.txt'
UPLOADED_FILENAME = 'dumps/upload.txt'
# include folders in file path. If it doesn't exist, it will be created
s3 = boto.s3.connect_to_region(END_POINT,
aws_access_key_id=AWS_ACCESS_KEY_ID,
aws_secret_access_key=AWS_SECRET_ACCESS_KEY,
host=S3_HOST)
bucket = s3.get_bucket(BUCKET_NAME)
k = Key(bucket)
k.key = UPLOADED_FILENAME
k.set_contents_from_filename(FILENAME)
For upload folder example as following code and S3 folder picture
import boto
import boto.s3
import boto.s3.connection
import os.path
import sys
# Fill in info on data to upload
# destination bucket name
bucket_name = 'willie20181121'
# source directory
sourceDir = '/home/willie/Desktop/x/' #Linux Path
# destination directory name (on s3)
destDir = '/test1/' #S3 Path
#max size in bytes before uploading in parts. between 1 and 5 GB recommended
MAX_SIZE = 20 * 1000 * 1000
#size of parts when uploading in parts
PART_SIZE = 6 * 1000 * 1000
access_key = 'MPBVAQ*******IT****'
secret_key = '11t63yDV***********HgUcgMOSN*****'
conn = boto.connect_s3(
aws_access_key_id = access_key,
aws_secret_access_key = secret_key,
host = '******.org.tw',
is_secure=False, # uncomment if you are not using ssl
calling_format = boto.s3.connection.OrdinaryCallingFormat(),
)
bucket = conn.create_bucket(bucket_name,
location=boto.s3.connection.Location.DEFAULT)
uploadFileNames = []
for (sourceDir, dirname, filename) in os.walk(sourceDir):
uploadFileNames.extend(filename)
break
def percent_cb(complete, total):
sys.stdout.write('.')
sys.stdout.flush()
for filename in uploadFileNames:
sourcepath = os.path.join(sourceDir + filename)
destpath = os.path.join(destDir, filename)
print ('Uploading %s to Amazon S3 bucket %s' % \
(sourcepath, bucket_name))
filesize = os.path.getsize(sourcepath)
if filesize > MAX_SIZE:
print ("multipart upload")
mp = bucket.initiate_multipart_upload(destpath)
fp = open(sourcepath,'rb')
fp_num = 0
while (fp.tell() < filesize):
fp_num += 1
print ("uploading part %i" %fp_num)
mp.upload_part_from_file(fp, fp_num, cb=percent_cb, num_cb=10, size=PART_SIZE)
mp.complete_upload()
else:
print ("singlepart upload")
k = boto.s3.key.Key(bucket)
k.key = destpath
k.set_contents_from_filename(sourcepath,
cb=percent_cb, num_cb=10)
PS: For more reference URL
If you have the aws command line interface installed on your system you can make use of pythons subprocess library.
For example:
import subprocess
def copy_file_to_s3(source: str, target: str, bucket: str):
subprocess.run(["aws", "s3" , "cp", source, f"s3://{bucket}/{target}"])
Similarly you can use that logics for all sort of AWS client operations like downloading or listing files etc. It is also possible to get return values. This way there is no need to import boto3. I guess its use is not intended that way but in practice I find it quite convenient that way. This way you also get the status of the upload displayed in your console - for example:
Completed 3.5 GiB/3.5 GiB (242.8 MiB/s) with 1 file(s) remaining
To modify the method to your wishes I recommend having a look into the subprocess reference as well as to the AWS Cli reference.
Note: This is a copy of my answer to a similar question.
I have something that seems to me has a bit more order:
import boto3
from pprint import pprint
from botocore.exceptions import NoCredentialsError
class S3(object):
BUCKET = "test"
connection = None
def __init__(self):
try:
vars = get_s3_credentials("aws")
self.connection = boto3.resource('s3', 'aws_access_key_id',
'aws_secret_access_key')
except(Exception) as error:
print(error)
self.connection = None
def upload_file(self, file_to_upload_path, file_name):
if file_to_upload is None or file_name is None: return False
try:
pprint(file_to_upload)
file_name = "your-folder-inside-s3/{0}".format(file_name)
self.connection.Bucket(self.BUCKET).upload_file(file_to_upload_path,
file_name)
print("Upload Successful")
return True
except FileNotFoundError:
print("The file was not found")
return False
except NoCredentialsError:
print("Credentials not available")
return False
There're three important variables here, the BUCKET const, the file_to_upload and the file_name
BUCKET: is the name of your S3 bucket
file_to_upload_path: must be the path from file you want to upload
file_name: is the resulting file and path in your bucket (this is where you add folders or what ever)
There's many ways but you can reuse this code in another script like this
import S3
def some_function():
S3.S3().upload_file(path_to_file, final_file_name)
You should mention the content type as well to omit the file accessing issue.
import os
image='fly.png'
s3_filestore_path = 'images/fly.png'
filename, file_extension = os.path.splitext(image)
content_type_dict={".png":"image/png",".html":"text/html",
".css":"text/css",".js":"application/javascript",
".jpg":"image/png",".gif":"image/gif",
".jpeg":"image/jpeg"}
content_type=content_type_dict[file_extension]
s3 = boto3.client('s3', config=boto3.session.Config(signature_version='s3v4'),
region_name='ap-south-1',
aws_access_key_id=S3_KEY,
aws_secret_access_key=S3_SECRET)
s3.put_object(Body=image, Bucket=S3_BUCKET, Key=s3_filestore_path, ContentType=content_type)
xmlstr = etree.tostring(listings, encoding='utf8', method='xml')
conn = boto.connect_s3(
aws_access_key_id = access_key,
aws_secret_access_key = secret_key,
# host = '<bucketName>.s3.amazonaws.com',
host = 'bycket.s3.amazonaws.com',
#is_secure=False, # uncomment if you are not using ssl
calling_format = boto.s3.connection.OrdinaryCallingFormat(),
)
conn.auth_region_name = 'us-west-1'
bucket = conn.get_bucket('resources', validate=False)
key= bucket.get_key('filename.txt')
key.set_contents_from_string("SAMPLE TEXT")
key.set_canned_acl('public-read')
A lot of the existing answers here are pretty complex. A simple approach is to use cloudpathlib, which wraps boto3.
First, be sure to be authenticated properly with an ~/.aws/credentials file or environment variables set. See more options in the cloudpathlib docs.
This is how you would upload a file:
from pathlib import Path
from cloudpathlib import CloudPath
# write a local file that we will upload:
Path("test_file.txt").write_text("hello")
#> 5
# upload that file to S3
CloudPath("s3://drivendata-public-assets/testsfile.txt").upload_from("test_file.txt")
#> S3Path('s3://mybucket/testsfile.txt')
# read it back from s3
CloudPath("s3://mybucket/testsfile.txt").read_text()
#> 'hello'
Note, that you could write to the cloud path directly using the normal write_text, write_bytes, or open methods as well.
I modified your example slightly, dropping some imports and the progress to get what I needed for a boto example.
import boto.s3
from boto.s3.key import Key
AWS_ACCESS_KEY_ID = 'your-access-key-id'
AWS_SECRET_ACCESS_KEY = 'your-secret-access-key'
bucket_name = AWS_ACCESS_KEY_ID.lower() + '-form13'
conn = boto.connect_s3(AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY)
bucket = conn.create_bucket(bucket_name, location=boto.s3.connection.Location.DEFAULT)
filename = 'embedding.csv'
k = Key(bucket)
k.key = filename
k.set_contents_from_filename(filename)
Here's a boto3 example as well:
import boto3
ACCESS_KEY = 'your-access-key'
SECRET_KEY = 'your-secret-key'
file_name='embedding.csv'
object_name=file_name
bucket_name = ACCESS_KEY.lower() + '-form13'
s3 = boto3.client('s3', aws_access_key_id=ACCESS_KEY, aws_secret_access_key=SECRET_KEY)
s3.create_bucket(Bucket=bucket_name)
s3.upload_file(file_name, bucket_name, object_name)

Categories