Python3 firebase storage SDK cannot upload to Emulator - python

I have the following code snippet:
import firebase_admin
from firebase_admin import credentials
from firebase_admin import storage
from google.cloud import storage
class firebase_storage():
def __init__(self, path_to_sak, root_bucket):
try:
self.cred = credentials.Certificate(path_to_sak)
firebase_admin.initialize_app(self.cred)
except Exception as e:
print("Firebase App may have already been initialized")
self.bucket = firebase_admin.storage.bucket(root_bucket)
def upload(self, key, file_path):
blob = storage.Blob(key, self.bucket)
blob.upload_from_filename(file_path)
def download(self, key, file_path):
blob = storage.Blob(key, self.bucket)
blob.download_to_filename(file_path)
def upload_string(self, key, string, mime_type):
blob = storage.Blob(key, self.bucket)
blob.upload_from_string(string, content_type=mime_type)
I'm using Firebase Emulators for Storage, I have verified that downloads work using the method call firebase_storage.download().
However, when I try to call upload() the following exception is thrown:
Traceback (most recent call last):
File "/usr/local/lib/python3.8/dist-packages/google/cloud/storage/blob.py", line 2348, in upload_from_file
created_json = self._do_upload(
File "/usr/local/lib/python3.8/dist-packages/google/cloud/storage/blob.py", line 2170, in _do_upload
response = self._do_multipart_upload(
File "/usr/local/lib/python3.8/dist-packages/google/cloud/storage/blob.py", line 1732, in _do_multipart_upload
response = upload.transmit(
File "/usr/local/lib/python3.8/dist-packages/google/resumable_media/requests/upload.py", line 149, in transmit
self._process_response(response)
File "/usr/local/lib/python3.8/dist-packages/google/resumable_media/_upload.py", line 116, in _process_response
_helpers.require_status_code(response, (http_client.OK,), self._get_status_code)
File "/usr/local/lib/python3.8/dist-packages/google/resumable_media/_helpers.py", line 99, in require_status_code
raise common.InvalidResponse(
google.resumable_media.common.InvalidResponse: ('Request failed with status code', 400, 'Expected one of', <HTTPStatus.OK: 200>)
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "boot.py", line 55, in <module>
run()
File "boot.py", line 35, in run
fb_storage.upload(key, file)
File "/root/python_db_client/src/firebase_storage.py", line 20, in upload
blob.upload_from_filename(file_path)
File "/usr/local/lib/python3.8/dist-packages/google/cloud/storage/blob.py", line 2475, in upload_from_filename
self.upload_from_file(
File "/usr/local/lib/python3.8/dist-packages/google/cloud/storage/blob.py", line 2364, in upload_from_file
_raise_from_invalid_response(exc)
File "/usr/local/lib/python3.8/dist-packages/google/cloud/storage/blob.py", line 3933, in _raise_from_invalid_response
raise exceptions.from_http_status(response.status_code, message, response=response)
google.api_core.exceptions.BadRequest: 400 POST http://myserver.com:9194/upload/storage/v1/b/xxxxxx.appspot.com/o?uploadType=multipart: Bad Request: ('Request failed with status code', 400, 'Expected one of', <HTTPStatus.OK: 200>)
My storage.rules look like this:
rules_version = '2';
service firebase.storage {
match /b/{bucket}/o {
match /{allPaths=**} {
allow write, read: if true;
}
}
}
And so, it would appear that public read/write access is allowed.
Everything is working, I have other emulators (Firestore, Auth) that is working fine, but Storage uploads refuse to work :(
Any help would be greatly appreciated thank you!

Maybe there is a problem initializing your app. I see your are taking granted that the app is initialized if there is an error while initializing. Try checking connection first! It may help...

Python Admin SDK does not currently support the Storage emulator according to the documentation
https://firebase.google.com/docs/emulator-suite/install_and_configure#admin_sdk_availability

Related

How to add constraints in client.services.create while creating a new service in existing docker swarm Python Docker SDK?

I want to create a service in existing swarm network using python docker sdk. I have a swarm network named test_net.
Installation of library : pip3 install docker
Below is the code used for creating the service
import docker
from docker.types import RestartPolicy, Placement
def python_sdk():
client = docker.DockerClient(base_url='unix://var/run/docker.sock')
service_created = client.services.create(
image='python:3.7-alpine',
command='python /home/ubuntu/python.py',
constraints=Placement(constraints=['worker']),
mounts='/home/ubuntu/deployment/python.py:/home/ubuntu/python.py:rw',
networks=['test_net'],
restart_policy=RestartPolicy(condition='none'),
name='python_sdk'
)
print("Created service : ", service_created)
Below is the error which I got by executing above code :
Traceback (most recent call last):
File "/home/ubuntu/deployment/dags/venv/lib/python3.6/site-packages/docker/api/client.py", line 268, in _raise_for_status
response.raise_for_status()
File "/home/ubuntu/deployment/dags/venv/lib/python3.6/site-packages/requests/models.py", line 943, in raise_for_status
raise HTTPError(http_error_msg, response=self)
requests.exceptions.HTTPError: 400 Client Error: Bad Request for url: http+docker://localhost/v1.41/services/create
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "client.py", line 20, in <module>
python_sdk()
File "client.py", line 16, in python_sdk
name='python_sdk'
File "/home/ubuntu/deployment/dags/venv/lib/python3.6/site-packages/docker/models/services.py", line 227, in create
service_id = self.client.api.create_service(**create_kwargs)
File "/home/ubuntu/deployment/dags/venv/lib/python3.6/site-packages/docker/utils/decorators.py", line 34, in wrapper
return f(self, *args, **kwargs)
File "/home/ubuntu/deployment/dags/venv/lib/python3.6/site-packages/docker/api/service.py", line 190, in create_service
self._post_json(url, data=data, headers=headers), True
File "/home/ubuntu/deployment/dags/venv/lib/python3.6/site-packages/docker/api/client.py", line 274, in _result
self._raise_for_status(response)
File "/home/ubuntu/deployment/dags/venv/lib/python3.6/site-packages/docker/api/client.py", line 270, in _raise_for_status
raise create_api_error_from_http_exception(e)
File "/home/ubuntu/deployment/dags/venv/lib/python3.6/site-packages/docker/errors.py", line 31, in create_api_error_from_http_exception
raise cls(e, response=response, explanation=explanation)
docker.errors.APIError: 400 Client Error for http+docker://localhost/v1.41/services/create: Bad Request ("json: cannot unmarshal object into Go struct field Placement.TaskTemplate.Placement.Constraints of type []string")
I am referring to this documentation.
How can I use Placement object to use constraints?
I also tried constraints = ["Placement(constraints=['worker']"]
I got the answer for the above issue. In the documentation, it is mentioned that the list of str needs to be passed in constraints.
The mentioned parameter is documentation : constraints (list of str) – Placement constraints.
So the final code will be,
import docker
from docker.types import RestartPolicy, Placement
def python_sdk():
client = docker.DockerClient(base_url='unix://var/run/docker.sock')
service_created = client.services.create(
image='python:3.7-alpine',
command='python /home/ubuntu/python.py',
constraints=['node.role == worker']),
mounts='/home/ubuntu/deployment/python.py:/home/ubuntu/python.py:rw',
networks=['test_net'],
restart_policy=RestartPolicy(condition='none'),
name='python_sdk'
)
print("Created service : ", service_created)

Firestore client in python (as user) using firebase_admin or google.cloud.firestore

I am building a python client-side application that uses Firestore. I have successfully used Google Identity Platform to sign up and sign in to the Firebase project, and created a working Firestore client using google.cloud.firestore.Client which is authenticated as a user:
import json
import requests
import google.oauth2.credentials
from google.cloud import firestore
request_url = f"https://identitytoolkit.googleapis.com/v1/accounts:signInWithPassword?key={self.__api_key}"
headers = {"Content-Type": "application/json; charset=UTF-8"}
data = json.dumps({"email": self.__email, "password": self.__password, "returnSecureToken": True})
response = requests.post(request_url, headers=headers, data=data)
try:
response.raise_for_status()
except (HTTPError, Exception):
content = response.json()
error = f"error: {content['error']['message']}"
raise AuthError(error)
json_response = response.json()
self.__token = json_response["idToken"]
self.__refresh_token = json_response["refreshToken"]
credentials = google.oauth2.credentials.Credentials(self.__token,
self.__refresh_token,
client_id="",
client_secret="",
token_uri=f"https://securetoken.googleapis.com/v1/token?key={self.__api_key}"
)
self.__db = firestore.Client(self.__project_id, credentials)
I have the problem, however, that when the token has expired, I get the following error:
Traceback (most recent call last):
File "/usr/local/lib/python3.7/dist-packages/google/api_core/grpc_helpers.py", line 57, in error_remapped_callable
return callable_(*args, **kwargs)
File "/usr/local/lib/python3.7/dist-packages/grpc/_channel.py", line 826, in __call__
return _end_unary_response_blocking(state, call, False, None)
File "/usr/local/lib/python3.7/dist-packages/grpc/_channel.py", line 729, in _end_unary_response_blocking
raise _InactiveRpcError(state)
grpc._channel._InactiveRpcError: <_InactiveRpcError of RPC that terminated with:
status = StatusCode.UNAUTHENTICATED
details = "Missing or invalid authentication."
debug_error_string = "{"created":"#1613043524.699081937","description":"Error received from peer ipv4:172.217.16.74:443","file":"src/core/lib/surface/call.cc","file_line":1055,"grpc_message":"Missing or invalid authentication.","grpc_status":16}"
>
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/lib/python3.7/threading.py", line 917, in _bootstrap_inner
self.run()
File "/home/my_app/src/controllers/im_alive.py", line 20, in run
self.__device_api.set_last_updated(utils.device_id())
File "/home/my_app/src/api/firestore/firestore_device_api.py", line 21, in set_last_updated
"lastUpdatedTime": self.__firestore.SERVER_TIMESTAMP
File "/home/my_app/src/api/firestore/firestore.py", line 100, in update
ref.update(data)
File "/usr/local/lib/python3.7/dist-packages/google/cloud/firestore_v1/document.py", line 382, in update
write_results = batch.commit()
File "/usr/local/lib/python3.7/dist-packages/google/cloud/firestore_v1/batch.py", line 147, in commit
metadata=self._client._rpc_metadata,
File "/usr/local/lib/python3.7/dist-packages/google/cloud/firestore_v1/gapic/firestore_client.py", line 1121, in commit
request, retry=retry, timeout=timeout, metadata=metadata
File "/usr/local/lib/python3.7/dist-packages/google/api_core/gapic_v1/method.py", line 145, in __call__
return wrapped_func(*args, **kwargs)
File "/usr/local/lib/python3.7/dist-packages/google/api_core/retry.py", line 286, in retry_wrapped_func
on_error=on_error,
File "/usr/local/lib/python3.7/dist-packages/google/api_core/retry.py", line 184, in retry_target
return target()
File "/usr/local/lib/python3.7/dist-packages/google/api_core/timeout.py", line 214, in func_with_timeout
return func(*args, **kwargs)
File "/usr/local/lib/python3.7/dist-packages/google/api_core/grpc_helpers.py", line 59, in error_remapped_callable
six.raise_from(exceptions.from_grpc_error(exc), exc)
File "<string>", line 3, in raise_from
google.api_core.exceptions.Unauthenticated: 401 Missing or invalid authentication.
I have tried omitting the token and only specifying the refresh token, and then calling credentials.refresh(), but the expires_in in the response from the https://securetoken.googleapis.com/v1/token endpoint is a string instead of a number (docs here), which makes _parse_expiry(response_data) in google.oauth2._client.py:257 raise an exception.
Is there any way to use the firestore.Client from either google.cloud or firebase_admin and have it automatically handle refreshing tokens, or do I need to switch to the manually calling the Firestore RPC API and refreshing tokens at the correct time?
Note: There are no users interacting with the python app, so the solution must not require user interaction.
Can't you just pass the string cast as integer _parse_expiry(int(float(response_data))) ?
If it is not working you could try to make a call and refresh token after getting and error 401, see my answer for the general idea on how to handle tokens.
As mentioned by #Marco, it is recommended that you use a service account if it's going to be used in an environment without user. When you use service account, you can just set GOOGLE_APPLICATION_CREDENTIALS environment variable to location of service account json file and just instantiate the firestore Client without any credentials (The credentials will be picked up automatically):
import firestore
client = firestore.Client()
and run it as (assuming Linux):
$ export GOOGLE_APPLICATION_CREDENTIALS=/path/to/credentials.json
$ python file.py
Still, if you really want to use user credentials for the script, you can install the Google Cloud SDK, then:
$ gcloud auth application-default login
This will open browser and for you to select account and login. After logging in, it creates a "virtual" service account file corresponding to your user account (that will also be loaded automatically by clients). Here too, you don't need to pass any parameters to your client.
See also: Difference between “gcloud auth application-default login” and “gcloud auth login”

Accessing google cloud storage bucket from cloud functions throws 500 error

I'm trying to access google cloud storage bucket from cloud functions (python) instance and it's throwing mystic 500 error.
I have given the service account editor role too. It didn't make any change.
I also checked if any of the quota is going off limit. The limits were not even close.
Please, anyone can help me find cause of this error?
here is the code
from google.cloud import storage
import os
import base64
storage_client = storage.Client()
def init_analysis(event, context):
print("event", event)
pubsub_message = base64.b64decode(event['data']).decode('utf-8')
print(pubsub_message)
bucket_name = 'my-bucket'
bucket = storage_client.get_bucket(bucket_name)
blobs = bucket.list_blobs()
for blob in blobs:
print(blob.name)
Error:
Traceback (most recent call last): File "/env/local/lib/python3.7/site-packages/google/auth/compute_engine/credentials.py", line 99, in refresh service_account=self._service_account_email) File "/env/local/lib/python3.7/site-packages/google/auth/compute_engine/_metadata.py", line 208, in get_service_account_token 'instance/service-accounts/{0}/token'.format(service_account)) File "/env/local/lib/python3.7/site-packages/google/auth/compute_engine/_metadata.py", line 140, in get url, response.status, response.data), response) google.auth.exceptions.TransportError: ("Failed to retrieve http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/my-project#appspot.gserviceaccount.com/token from the Google Compute Enginemetadata service. Status: 500 Response:\nb'Could not fetch URI /computeMetadata/v1/instance/service-accounts/my-project#appspot.gserviceaccount.com/token\\n'", <google.auth.transport.requests._Response object at 0x2b0ef9edf438>) The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/env/local/lib/python3.7/site-packages/google/cloud/functions/worker.py", line 383, in run_background_function _function_handler.invoke_user_function(event_object) File "/env/local/lib/python3.7/site-packages/google/cloud/functions/worker.py", line 217, in invoke_user_function return call_user_function(request_or_event) File "/env/local/lib/python3.7/site-packages/google/cloud/functions/worker.py", line 214, in call_user_function event_context.Context(**request_or_event.context)) File "/user_code/main.py", line 21, in init_analysis bucket = storage_client.get_bucket(bucket_name) File "/env/local/lib/python3.7/site-packages/google/cloud/storage/client.py", line 227, in get_bucket bucket.reload(client=self) File "/env/local/lib/python3.7/site-packages/google/cloud/storage/_helpers.py", line 130, in reload _target_object=self, File "/env/local/lib/python3.7/site-packages/google/cloud/_http.py", line 315, in api_request target_object=_target_object, File "/env/local/lib/python3.7/site-packages/google/cloud/_http.py", line 192, in _make_request return self._do_request(method, url, headers, data, target_object) File "/env/local/lib/python3.7/site-packages/google/cloud/_http.py", line 221, in _do_request return self.http.request(url=url, method=method, headers=headers, data=data) File "/env/local/lib/python3.7/site-packages/google/auth/transport/requests.py", line 205, in request self._auth_request, method, url, request_headers) File "/env/local/lib/python3.7/site-packages/google/auth/credentials.py", line 122, in before_request self.refresh(request) File "/env/local/lib/python3.7/site-packages/google/auth/compute_engine/credentials.py", line 102, in refresh six.raise_from(new_exc, caught_exc) File "<string>", line 3, in raise_from google.auth.exceptions.RefreshError: ("Failed to retrieve http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/my-project#appspot.gserviceaccount.com/token from the Google Compute Enginemetadata service. Status: 500 Response:\nb'Could not fetch URI /computeMetadata/v1/instance/service-accounts/my-project#appspot.gserviceaccount.com/token\\n'", <google.auth.transport.requests._Response object at 0x2b0ef9edf438>)
google.auth.exceptions.TransportError: ("Failed to retrieve http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/my-project#appspot.gserviceaccount.com/token from the Google Compute Enginemetadata service. Status: 500 Response:\nb'Could not fetch URI /computeMetadata/v1/instance/service-accounts/my-project#appspot.gserviceaccount.com/token\\n'"
The error you are getting it's because your Cloud Functions service account doesn't have the cloudfunctions.serviceAgent role. As you can see on the documentation:
Authenticating as the runtime service account from inside your function may fail if you change the Cloud Functions service account's permissions.
However, I found that sometimes you can not add this role as it doesn't show up as an option. I have reported this issue to the Google Cloud Functions engineering team and they are working to solve it.
Nevertheless, you can add the role again using this gcloud command:
gcloud projects add-iam-policy-binding <project_name> --role=roles/cloudfunctions.serviceAgent --member=serviceAccount:service-<project_number>#gcf-admin-robot.iam.gserviceaccount.com

Error when trying to use zeep to connect to Netsuite

I'm writing a Python application that is to connect to Netsuite (WSDL) and then INSERT data into the table. I'm trying to use zeep to connect to our Netsuite server and I get this error:
python3.6 /xxx/python-netsuite/netsuite/client.py
Traceback (most recent call last):
File "/xxx/.virtualenvs/for-netsuite/lib/python3.6/site-packages/zeep/xsd/schema.py", line 565, in _get_component
return items[qname]
KeyError: <lxml.etree.QName object at 0x10e9bd850>
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/xxx/python-netsuite/netsuite/client.py", line 2, in <module>
from netsuite.service import (client,
File "/xxx/python-netsuite/netsuite/service.py", line 13, in <module>
Passport = model('ns1:Passport')
File "/xxx/.virtualenvs/for-netsuite/lib/python3.6/site-packages/zeep/client.py", line 263, in get_type
return self.wsdl.types.get_type(name)
File "/xxx/.virtualenvs/for-netsuite/lib/python3.6/site-packages/zeep/xsd/schema.py", line 140, in get_type
return self._get_instance(qname, 'get_type', 'type')
File "/xxx/.virtualenvs/for-netsuite/lib/python3.6/site-packages/zeep/xsd/schema.py", line 243, in _get_instance
raise last_exception
File "/xxx/.virtualenvs/for-netsuite/lib/python3.6/site-packages/zeep/xsd/schema.py", line 239, in _get_instance
return method(qname)
File "/xxx/.virtualenvs/for-netsuite/lib/python3.6/site-packages/zeep/xsd/schema.py", line 523, in get_type
return self._get_component(qname, self._types, 'type')
File "/xxx/.virtualenvs/for-netsuite/lib/python3.6/site-packages/zeep/xsd/schema.py", line 580, in _get_component
location=self._location)
zeep.exceptions.LookupError: No type 'Passport' in namespace
urn:types.core_2017_1.platform.webservices.netsuite.com. Available types are:
{urn:types.core_2017_1.platform.webservices.netsuite.com}RecordType,
{urn:types.core_2017_1.platform.webservices.netsuite.com}SearchRecordType,
{urn:types.core_2017_1.platform.webservices.netsuite.com}GetAllRecordType,
{urn:types.core_2017_1.platform.webservices.netsuite.com}GetCustomizationType,
{urn:types.core_2017_1.platform.webservices.netsuite.com}InitializeType,
{urn:types.core_2017_1.platform.webservices.netsuite.com}InitializeRefType,
{urn:types.core_2017_1.platform.webservices.netsuite.com}InitializeAuxRefType,
{urn:types.core_2017_1.platform.webservices.netsuite.com}DeletedRecordType,
{urn:types.core_2017_1.platform.webservices.netsuite.com}AsyncStatusType,
{urn:types.core_2017_1.platform.webservices.netsuite.com}SearchStringFieldOperator,
{urn:types.core_2017_1.platform.webservices.netsuite.com}SearchLongFieldOperator,
{urn:types.core_2017_1.platform.webservices.netsuite.com}SearchTextNumberFieldOperator,
{urn:types.core_2017_1.platform.webservices.netsuite.com}SearchDoubleFieldOperator,
{urn:types.core_2017_1.platform.webservices.netsuite.com}SearchDateFieldOperator,
{urn:types.core_2017_1.platform.webservices.netsuite.com}SearchEnumMultiSelectFieldOperator,
{urn:types.core_2017_1.platform.webservices.netsuite.com}SearchMultiSelectFieldOperator,
{urn:types.core_2017_1.platform.webservices.netsuite.com}SearchDate,
{urn:types.core_2017_1.platform.webservices.netsuite.com}DurationUnit,
{urn:types.core_2017_1.platform.webservices.netsuite.com}CalendarEventAttendeeResponse,
{urn:types.core_2017_1.platform.webservices.netsuite.com}GetSelectValueFilterOperator,
{urn:types.core_2017_1.platform.webservices.netsuite.com}SignatureAlgorithm
Process finished with exit code 1
This is my client.py
import ns_config
from netsuite.service import (client,
RecordRef,
ApplicationInfo,
Passport)
def make_passport():
role = RecordRef(internalId=ns_config.NS_ROLE)
return Passport(email=ns_config.NS_EMAIL,
password=ns_config.NS_PASSWORD,
account=ns_config.NS_ACCOUNT,
role=role)
def login():
app_info = ApplicationInfo(applicationId=ns_config.NS_APPID)
passport = make_passport()
login = client.service.login(passport=passport, _soapheaders={'applicationInfo': app_info})
print('Login Response: ', login.status)
return client, app_info
passport = make_passport()
client, app_info = login()
The WSDL_URL is this: https://webservices.sandbox.netsuite.com/wsdl/v2017_1_0/netsuite.wsdl. Which is the version of Netsuite that we have.
Can anyone tell me what I'm doing wrong?
I believe the correct namespace for Passport is:
urn:core_2017_1.platform.webservices.netsuite.com
not the one in your error message:
urn:types.core_2017_1.platform.webservices.netsuite.com
I'm new to zeep and netsuite, but I have noticed that so far, wherever the type of a parameter or header is known, I can just pass a plain dict with string keys and string or nested dict values, and zeep will turn it into the correct typed xml, without me having to give namespaces or instantiate and link up xsd objects. Might help simplify your code.

python and boto : 403 accessDenied

import boto
from boto.s3.connection import S3Connection
from boto.s3.connection import OrdinaryCallingFormat
conn = S3Connection(access_key, secret_key, calling_format=OrdinaryCallingFormat())
bucket = conn.get_bucket(file_name)
print(bucket.name)
And the console display:
raise err
boto.exception.S3ResponseError: S3ResponseError: 403 Forbidden
I have seen many post about the same problem but I can't figure out how to solve it...
note that I am not the owner of the bucket, but I succeed to connect and download the file with a gui tool. I need to process it by script for automation.
EDIT:
Succeed to connect, but still struggling...
I begin to hesitate to process it automatically rather than manually ...
conn = S3Connection(access_key, secret_key, calling_format=OrdinaryCallingFormat())
bucket = conn.get_bucket(bucket_name, validate=False)
print('bucket:', bucket)
print('bucket.name:', bucket.name)
key = bucket.get_key(file_name)
print("key: {name}\t{size}\t{modified}".format(name = key.name,size = key.size,modified = key.last_modified))
print('bucket.list():',bucket.list(prefix='GA-Exports/Events_3112/DEV'))
for key in bucket.list(prefix='DEV/',delimiter='/'):
print('bucket list -> key:',key)
console :
bucket: <Bucket: GA-Exports/Events_3112/>
bucket.name: GA-Exports/Events_3112/
key: DEV/EVENTS_3113_120002892.csv.gz 3826 Sat, 16 May 2015 10:05:44 GMT
bucket.list(): <boto.s3.bucketlistresultset.BucketListResultSet object at 0x0000000004E9F7F0>
Traceback (most recent call last):
File "D:\Python\lib\xml\sax\expatreader.py", line 207, in feed
self._parser.Parse(data, isFinal)
xml.parsers.expat.ExpatError: no element found: line 1, column 0
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "C:\Users\Francois\OneDrive\IDE\Workspace\eclipse\Python_test\etltest.py", line 31, in <module>
for key in bucket.list(prefix='DEV/',delimiter='/'):
File "D:\Python\lib\site-packages\boto\s3\bucketlistresultset.py", line 34, in bucket_lister
encoding_type=encoding_type)
File "D:\Python\lib\site-packages\boto\s3\bucket.py", line 472, in get_all_keys
'', headers, **params)
File "D:\Python\lib\site-packages\boto\s3\bucket.py", line 406, in _get_all
xml.sax.parseString(body, h)
File "D:\Python\lib\xml\sax\__init__.py", line 46, in parseString
parser.parse(inpsrc)
File "D:\Python\lib\xml\sax\expatreader.py", line 107, in parse
xmlreader.IncrementalParser.parse(self, source)
File "D:\Python\lib\xml\sax\xmlreader.py", line 125, in parse
self.close()
File "D:\Python\lib\xml\sax\expatreader.py", line 217, in close
self.feed("", isFinal = 1)
File "D:\Python\lib\xml\sax\expatreader.py", line 211, in feed
self._err_handler.fatalError(exc)
File "D:\Python\lib\xml\sax\handler.py", line 38, in fatalError
raise exception
xml.sax._exceptions.SAXParseException: <unknown>:1:0: no element found
By default, boto will attempt to validate the bucket when you call get_bucket by performing a HEAD request on the bucket. You may not have permission to do this even though you may have permission to retrieve objects from the bucket. Try this to disable the validation step:
bucket = conn.get_bucket(bucket_name, validate=False)
Also, make sure you are passing in the name of the bucket. Your example code is passing in file_name which doesn't sound right.

Categories