I do not have access to the root bucket but I do have access to a key (KEY NAME) within the bucket.
Example: I cannot access 'BUCKET NAME' but I can access 'BUCKET NAME/KEY NAME'
I have been trying to move files within 'KEY NAME'. In the code below, what I've managed to get working is list_objects_v2.
upload_file gives me the following error:
An error occurred (AccessDenied) when calling the PutObject operation: Access Denied
download_file gives me the following error:
PermissionError: [WinError 5] Access is denied: 'C/Users/username/Desktop'
I'm very new to the AWS environment. What can I do on my end to fully get the access I need?
import logging
import sys
import boto3
import boto
import boto.s3.connection
from botocore.exceptions import ClientError
from boto3.session import Session
def main():
arguments = len(sys.argv) - 1
if arguments < 1:
print("You must supply a folder name")
return
bucket_name = 'BUCKET NAME'
key_name = 'KEY NAME'
folder = sys.argv[1]
s3 = boto3.client('s3')
objects = s3.list_objects_v2(Bucket = bucket_name,
Prefix = key_name + '/' + folder + '/',
Delimiter = '/')
i = 1
#
# Print the bucket's objects within 'KEY NAME'
#
if objects is not None:
# List the object names
logging.info('Objects in {bucket_name}')
print("Length of Objects: " + str(len(objects)))
for obj in objects:
print("......\n")
print(i)
print("....\n")
print(obj)
print("..\n")
print(objects[obj])
i += 1
else:
# Didn't get any keys
logging.info('No objects in {bucket_name}')
#
# Test to see if we can isolate a folder within 'KEY NAME'
#
print("\n")
print("Common Prefixes" + str(objects['CommonPrefixes']) + "\n")
keys = objects['CommonPrefixes']
print ("Object 0" + str(keys[0]) + '\n')
s3 = boto3.resource('s3')
s3.meta.client.upload_file('C:/Users/username/Desktop/Test/Test.txt',
bucket_name,
key_name)
# s3.meta.client.download_file(bucket_name,
# key_name + '/' + folder + '/' + 'Test.txt',
# 'C:/Users/username/Desktop')
if __name__ == '__main__':
main()
The most important part is to ensure that you have been given adequate permissions to upload/download/list the prefix.
Here is an example policy that grants access to a prefix of special/:
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "AllowUserToSeeBucketListInTheConsole",
"Action": [
"s3:ListAllMyBuckets",
"s3:GetBucketLocation"
],
"Effect": "Allow",
"Resource": [
"arn:aws:s3:::*"
]
},
{
"Sid": "AllowListingOfPrefix",
"Action": [
"s3:ListBucket"
],
"Effect": "Allow",
"Resource": [
"arn:aws:s3:::my-bucket"
],
"Condition": {
"StringEquals": {
"s3:prefix": [
"special/"
],
"s3:delimiter": [
"/"
]
}
}
},
{
"Sid": "UploadDownload",
"Effect": "Allow",
"Action": [
"s3:PutObject",
"s3:GetObject"
],
"Resource": "arn:aws:s3:::my-bucket/special/*"
}
]
}
Then, you can run code like this:
import boto3
s3_client = boto3.client('s3')
# Upload a file to S3
s3_client.upload_file('/tmp/hello.txt', 'my-bucket', 'special/hello.txt')
# Download an object
s3_client.download_file('my-bucket', 'special/hello.txt', '/tmp/hello2.txt')
# List objects using Client method
response = s3_client.list_objects_v2(Bucket='my-bucket',Delimiter='/',Prefix='special/')
for object in response['Contents']:
print(object['Key'], object['Size'])
# List objects using Resource method
s3_resource = boto3.resource('s3')
bucket = s3_resource.Bucket('my-bucket')
for object in bucket.objects.filter(Delimiter='/',Prefix='special/'):
print(object.key, object.size)
Related
I was able to run an Athena query that gives me a csv file with the result + a csv.metadata in the same bucket, however, I chaged the name of the csv file which works perfectly however, I should not have this metadata file in my bucket. How do I remove it? Or better saying, is there a way to prevent this file to be uploaded? I just need the csv file.
I saw in a comment a gentleman that added this to the bucket policy. What is the best aproach?
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "StatementPrefixDeny",
"Effect": "Deny",
"Principal": {
"AWS": "*"
},
"Action": "s3:*",
"Resource": "arn:aws:s3:::prefix/*.csv.metadata"
}
]
}
client = boto3.client('athena')
database = 'data_lake'
query = "SELECT buid, property_id, checkin_date, count(*) number_of_boo FROM data_lake.boo_v1 where source_logic in ('telephone') and cancelled_flag=0 and checkin_date= date_add('day', -1, current_date) group by 1,2,3"
s3_output = 's3://qualquer_coisa-prod-ga/boo-v1/'
response = client.start_query_execution(
QueryString=query,
QueryExecutionContext={
'Database': database
},
ResultConfiguration={
'OutputLocation': s3_output
}
)
describe_statement_response = client.get_query_execution(
QueryExecutionId=response['QueryExecutionId'])
query_result_status = describe_statement_response['QueryExecution']['Status']['State']
query_timeout_in_minutes = time.time() + 60 * 10
print("Waiting for query to be executed")
# Query Athena Until status is finished
while query_result_status != 'SUCCEEDED':
if time.time() > query_timeout_in_minutes or query_result_status == 'FAILED':
raise ValueError(
f'Timeout for running the query or query execution have failed. Query Status: {query_result_status} Response {describe_statement_response}. Finishing Execution')
time.sleep(5)
describe_statement_response = client.get_query_execution(
QueryExecutionId=response['QueryExecutionId'])
query_result_status = describe_statement_response['QueryExecution']['Status']['State']
today = datetime.today().strftime('%Y-%m-%d')
response_s3 = client_s3.list_objects(
Bucket=BUCKET_NAME,
Prefix=PREFIX,
)
name = response_s3["Contents"][0]["Key"]
client_s3.copy_object(Bucket=BUCKET_NAME, CopySource=BUCKET_NAME+'/'+name, Key=PREFIX+"boo_v1_"+today+".csv")
client_s3.delete_object(Bucket=BUCKET_NAME, Key=name)
I am trying to modify the bucket policy of one of my buckets using boto3. Please refer to the following inital/existing bucket policy of the bucket:
{
"Version": "2012-10-17",
"Id": "Policy1604310539665",
"Statement": [
{
"Sid": "Stmt1604310537860",
"Effect": "Allow",
"Principal": {
"AWS": "arn:aws:iam::xxx:root"
},
"Action": [
"s3:ListBucket",
"s3:PutObject"
],
"Resource": [
"arn:aws:s3:::bucket",
"arn:aws:s3:::bucket*"
]
}
]
}
I am trying to modify the above policy using the following piece of code, I am trying to attach one more role to the bucket policy :
import boto3
import json
s3 = boto3.client('s3')
result = s3.get_bucket_policy(Bucket=bucket_name)
policy_statement=json.loads(result['Policy'])
store = policy_statement['Statement'][0]['Principal']['AWS']
del policy_statement['Statement'][0]['Principal']['AWS']
if(isinstance(store, str)):
role_arn_list = [role_arn] + [store]
policy_statement['Statement'][0]['Principal'].update({"AWS": role_arn_list})
else:
role_arn_list = [role_arn] + store
policy_statement['Statement'][0]['Principal'].update({"AWS": role_arn_list})
# Convert the policy from JSON dict to string
policy_new = json.dumps(policy_statement)
# Update the policy of the given bucket
s3 = boto3.client('s3')
s3.put_bucket_policy(Bucket=bucket_name, Policy=policy_new)
The above code works fine, but when I try to put the policy to the bucket I am getting a MalformedPolicy exception. when I tried to debug and find the policy that is created using the above code, I can see the following policy:
{
'Version': '2012-10-17',
'Id': 'Policy1604310539665',
'Statement': [{
'Sid': 'Stmt1604310537860',
'Effect': 'Allow',
'Principal': {
'AWS': ['arn:aws:iam::xxx:role/xx-xx-xx', 'arn:aws:iam::xx:root',
'AROAVCQ6H5MBRCO7T5NKB'
]
},
'Action': ['s3:ListBucket', 's3:PutObject'],
'Resource': ['arn:aws:s3:::bucket', 'arn:aws:s3:::bucket/*']
}]
}
Problem: I am not able to understand from where the random string AROAVCQ6H5MBRCO7T5NKB is coming and how to handle this?
An identifier starting with AROA is a unique ID for an IAM Role, much like an Access Key always starts with AKIA.
See: IAM identifiers - AWS Identity and Access Management
Trying to append a new notification to a bucket. Couldn't find any example in the internet.
I need to have ObjectCreated events sent to SQS. I need to decide which queue the event is sent by the prefix. Thus, each notification would have a diferent queue and prefix over the same bucket.
The problem is that I cannot append new notification. I just override the previous notification configured in the bucket.
This is the code I have so far:
bucket_notifications_configuration = {
'QueueConfigurations': [{
'Events': ['s3:ObjectCreated:*'],
'Id': f"Notif_{queue_name}",
'QueueArn': queue.attributes['QueueArn'] ,
"Filter": {
"Key": {
"FilterRules": [
{
"Name": "suffix",
"Value": f"{prefix}"
}
]
}
}
}]
}
qpolicy = {
"Version": "2012-10-17",
"Id": f"{queue_arn}/SQSDefaultPolicy",
"Statement": [{
"Sid": f"allow bucket {bucket} to notify queue {queue_name}",
"Effect": "Allow",
"Principal": {"AWS": "*"},
"Action": "SQS:SendMessage",
"Resource": queue_arn,
"Condition": {
"ArnLike": {
"aws:SourceArn": f"arn:aws:s3:*:*:{bucket}"
}
}
}]
}
queue_attrs = queue.attributes
queue_attrs = {"Policy": json.dumps(qpolicy), }
queue.set_attributes(Attributes=queue_attrs)
logger.debug(f"queue ready with attributes: {queue.attributes}")
previous_notif = client.get_bucket_notification_configuration(Bucket=bucket)
previous_notif.pop('ResponseMetadata')
try:
print("apendado")
previous_notif['QueueConfigurations'].append(bucket_notifications_configuration['QueueConfigurations'][0])
print(f"apendado {previous_notif} ")
except KeyError:
previous_notif['QueueConfigurations'] = bucket_notifications_configuration['QueueConfigurations'][0]
print("cread")
client.put_bucket_notification_configuration(
Bucket=bucket,
NotificationConfiguration=bucket_notifications_configuration)
I make sure the notification id is diferent from any other, also I make sure the prefix is difefent.
This code overrides previous notification with the new one, instead of appending the new one.
I managed to get it working with the code below.
It takes the existing configurations, adds a new configuration, then saves it back to the bucket. The code assumes that there is an existing configuration.
import boto3
s3_client = boto3.client('s3', region_name = 'ap-southeast-2')
queue_name = 'queue2'
queue_arn = 'arn:aws:sqs:ap-southeast-2:123456789012:queue2'
bucket = 'my-bucket'
prefix = 'folder2/'
# Get the current notification configurations
response = s3_client.get_bucket_notification_configuration(Bucket=bucket)
configurations = response['QueueConfigurations']
# New configuration to add
new_configuration = {
'Id': f"Notif_{queue_name}",
'QueueArn': queue_arn,
'Events': [
's3:ObjectCreated:*',
],
'Filter': {
'Key': {
'FilterRules': [
{
'Name': 'prefix',
'Value': prefix
},
]
}
}
}
configurations.append(new_configuration)
# Save combined configurations
response = s3_client.put_bucket_notification_configuration(
Bucket = bucket,
NotificationConfiguration = {'QueueConfigurations' : configurations}
)
Im creating an iam user with boto but Im having an error, invalid json format.
But it seems that I have the json variable correctly, do you see where the issue is?
And also do you know after create_access_key(username) do you know how can return the secret access key to print it?
pName = "name"
username = "user"
pJson= {
"Statement": [
{
"Effect": "Allow",
"Action": "*",
"Resource": "*"
}
]
}
import boto.iam.connection
c = boto.iam.connect_to_region("us-east-1")
c.create_user(username)
c.put_user_policy(username, pName, pJson)
c.create_access_key(username)
k = c.get_all_access_keys(username)
print k['list_access_keys_response']['list_access_keys_result']['access_key_metadata'][0]['access_key_id']
# how to return secret access key?
The policy document ought to be a string according to the boto documentation; thus you'd use json.dumps yourself to convert the dictionary into a string:
policy_document = json.dumps(pJson)
c.put_user_policy(usename, pName, policy_document)
I am creating a signed url using the following:
AWS_ACCESS_KEY_ID = my_access_key
AWS_SECRET_ACCESS_KEY = my_secret_access_key
KEYPAIR_ID = my_keypair_id
KEYPAIR_FILE = path_to_keypair_file
CF_DISTRIBUTION_ID = cf_dist_id
my_connection = cloudfront.CloudFrontConnection(
AWS_ACCESS_KEY_ID,
AWS_SECRET_ACCESS_KEY
)
distro_summary = my_connection.get_all_distributions()[0]
distro_info = my_connection.get_distribution_info(distro_summary.id)
distro = distro_summary.get_distribution()
SECS = 8000
signed_url = distro.create_signed_url(
"https://%s/%s" % (distro_info.domain_name, 'restaurant_1_banner.png'),
KEYPAIR_ID,
expire_time=time.time() + SECS,
valid_after_time=None,
ip_address=None,
policy_url=None,
private_key_file=KEYPAIR_FILE
#private_key_string=KEYPAIR_ID
)
return signed_url
This returns a url like: "https://d1yllqv1oc7n6x.cloudfront.net/restaurant_1_banner.png?Expires=1426681326.67&Signature=Nsvyl-EowDRGuw-MfdgS34C6bsHKKC2L88ROfPBRAnsbpoeYfpJj6NQaTj4PGiG02Z7PRqkk5F0cBWKOik738H8xrlQQf8CuS0AouisnqMvZ4FLx94fSMo8vwFDg9jKLTMB1T0AGjWvgAcDlkLo4nYxyHQ077pwp3Do8g1eP62QD-~Ys4kejtVGtPTx6O1pM4gRLsmM8Kn7HJ618Hp4XMgRWwqJaCL-2C0YQP1PdEMbSOS6ZrmGTN~U5T-s-PZX1poS6qRiY4-Ma66DVLgmOTBh5vqjCWEqsbKZKFWFufsA2mMa4ON11yBUSyIbGJPpgKdRLU0pZuo7RX3~sIe6Q9w__&Key-Pair-Id=APKAISF4B35DSGOUTGTQ"
When I click on this link, I get the message:
<Error>
<Code>AccessDenied</Code>
<Message>Access denied</Message>
</Error>
This is my bucket policy for my s3 bucket.
{
"Version": "2008-10-17",
"Id": "PolicyForCloudFrontPrivateContent",
"Statement": [
{
"Sid": "1",
"Effect": "Allow",
"Principal": {
"AWS": "arn:aws:iam::cloudfront:user/CloudFront Origin Access Identity E3I8A03QRR3ASO"
},
"Action": "s3:GetObject",
"Resource": "arn:aws:s3:::menumaster/*"
}
]
}
Please let me know if any additional information is required.
This is my bucket policy.
{
"Version": "2008-10-17",
"Id": "PolicyForCloudFrontPrivateContent",
"Statement": [
{
"Sid": "1",
"Effect": "Allow",
"Principal": {
"AWS": "arn:aws:iam::cloudfront:user/CloudFront Origin Access Identity EH238ELEGANOC"
},
"Action": "s3:GetObject",
"Resource": "arn:aws:s3:::onur.deneme/*"
}
]
}
This is the distribution :
di53i9yykewl5.cloudfront.net
Restrict Bucket Access : Yes
Origin Access Identity : Use an Existing Identity
Restrict Viewer Access(Use Signed URLs) : Yes
Trusted Signers : Self
There should be no other ACL or policy.
Is the "Restrict Bucket Access" selected as "yes" and "origin access identity" selected?
Can you try the code below that I used before?
#!/usr/bin/python
import time,boto,rsa
from boto import cloudfront
from boto.cloudfront import distribution
AWS_ACCESS_KEY_ID="your access key"
AWS_SECRET_ACCESS_KEY="your secret access key"
conn = boto.cloudfront.CloudFrontConnection(AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY)
dist = conn.get_all_distributions()
a=dist[0].get_distribution()
#Set parameters for URL
key_pair_id = "your key pair id" #cloudfront security key
priv_key_file = "xxxxxxxxx.pem" #cloudfront private keypair file
expires = int(time.time()) + 60 #1 min
url="http://dbvvi2cumi6nj.cloudfront.net/santa.png"
signed_url = a.create_signed_url(url, key_pair_id, expires,private_key_file=priv_key_file)
print signed_url