I am trying to print output from an ECR image with python boto3. I can get it to print out the imageDigest, but would like to add the imageTag. Can anyone think of a way to add the imageTag? Everyway I have tried has errored out.
import json
import boto3
def get_reponames():
client = boto3.client('ecr')
reponames = [repo['repositoryName'] for repo in client.describe_repositories()['repositories']]
return reponames
def get_imageids(prepo):
client = boto3.client('ecr')
imageids = [img['imageDigest'] for img in client.list_images(repositoryName=prepo,)['imageIds']]
return imageids
def lambda_handler(event, context):
output = get_reponames()
for rn in output:
print(rn)
outputii = get_imageids(rn)
for ii in outputii:
print(ii)
return {
'body': json.dumps("hello world")
}
I'll post the output for list_images below. The above code works to display imageDigest, but I want to add imageTag too.
{
'imageIds': [
{
'imageDigest': 'sha256:764f63476bdff6d83a09ba2a818f0d35757063724a9ac3ba5019c56f74ebf42a',
'imageTag': 'precise',
},
],
'ResponseMetadata': {
'...': '...',
},
}
Your get_imageids function only return imageDigest so you can not access it on lambda_handler function. You need to return imageTag as well to read it on lambda_handler
import json
import boto3
def get_reponames():
client = boto3.client('ecr')
reponames = [repo['repositoryName'] for repo in client.describe_repositories()['repositories']]
return reponames
def get_imageids(prepo):
client = boto3.client('ecr')
imageids = [
{"digest": img['imageDigest'], "tag": img.get('imageTag', None)} for img in
client.list_images(repositoryName=prepo, )['imageIds']
]
return imageids
def lambda_handler(event, context):
output = get_reponames()
for rn in output:
print(rn)
outputii = get_imageids(rn)
for ii in outputii:
print(f"digest : {ii['digest']}, tag: {ii['tag']}")
return {
'body': json.dumps("hello world")
}
Related
I have a chalice project (1.26.2) which is always exiting with a time out when deployed, chalice local works fine.
This is my project structure:
This is the code in my app.py:
from datetime import datetime
from decimal import Decimal
import boto3
import firebase_admin
from chalice import Chalice, AuthResponse
from chalice.app import AuthRequest
from firebase_admin import credentials, auth
from chalicelib import crud
from chalicelib import models
from chalicelib import schemas
from chalicelib.auth import full_user_from_context, user_from_context
from chalicelib.db import SessionLocal, engine
app = Chalice(app_name='server-aws')
BUCKET = 'meet-app'
s3_client = boto3.client('s3')
cred = credentials.Certificate('chalicelib/serviceAccountKey.json')
firebase_admin.initialize_app(cred)
models.Base.metadata.create_all(bind=engine)
DISTANCE_IN_METERS = 100
_DB = None
def get_db():
global _DB
if _DB is None:
_DB = SessionLocal()
return _DB
#app.lambda_function(name='test-function')
def create_user(event, context):
return {'hello': 'world'}
#app.route('/health')
def health():
return {'status': 'ok'}
#app.authorizer()
def token_authorizer(auth_request: AuthRequest) -> AuthResponse:
token = auth_request.token
try:
decoded_token = auth.verify_id_token(token)
decoded = decoded_token
allowed_routes = [
'/auth',
'/me',
]
if 'permission_verified' in decoded and decoded['permission_verified'] is True:
allowed_routes.append('/me/location')
allowed_routes.append('/nearby')
allowed_routes.append('/me/profile-image')
print('routes', allowed_routes)
return AuthResponse(routes=allowed_routes, principal_id=decoded['sub'], context=decoded)
except Exception as e:
print('error', e)
return AuthResponse(routes=[], principal_id='non-user')
#app.route('/auth', methods=['GET'], authorizer=token_authorizer)
def authorize():
u = user_from_context(app.current_request.context)
user = crud.get_user(get_db(), u['uid'])
if user is None:
user = crud.create_user(get_db(), schemas.UserCreate(
uid=u['uid'],
phone=u['phone_number'],
permission_verified=True # TODO: find verification method
))
token = auth.create_custom_token(user.uid, {
"permission_verified": user.permission_verified,
"uid": user.uid,
"phone": user.phone,
"name": user.name,
"linkedin": user.linkedin,
"instagram": user.instagram,
})
return {
'user': user.__json__(),
'token': token.decode()
}
#app.route('/me', methods=["PUT"], authorizer=token_authorizer)
def update_me():
r = app.current_request
u = full_user_from_context(r.context)
data = r.json_body
u = crud.update_user(get_db(), schemas.UserUpdate(
uid=u.uid,
name=data.get('name'),
phone=data.get('phone'),
instagram=data.get('instagram'),
linkedin=data.get('linkedin'),
))
if u is None: # todo: code
return {
"error": "could not update"
}
return {
"user": u.__json__()
}
#app.route('/me/profile-image', methods=["PUT"], content_types=['application/octet-stream'],
authorizer=token_authorizer)
def update_me():
r = app.current_request
u = full_user_from_context(r.context)
data = r.raw_body
file_name = u.uid
tmp_file_name = '/tmp/' + file_name + '.jpg'
with open(tmp_file_name, 'wb') as tmp_file:
tmp_file.write(data)
key = 'profile-images/' + file_name
try:
s3_client.upload_file(tmp_file_name, BUCKET, key, ExtraArgs={'ACL': 'public-read'})
except Exception as e:
app.log.error(e)
return {
"error": str(e)
}
url = f'https://{BUCKET}.s3.amazonaws.com/{key}'
u = crud.update_user(get_db(), schemas.UserUpdate(
uid=u.uid,
profile_image_url=url
))
return {
"url": url
}
#app.route('/me/location', methods=["PUT"], authorizer=token_authorizer)
def update_me_location():
r = app.current_request
u = full_user_from_context(r.context)
data = r.json_body
lat = Decimal(str(data.get('latitude')))
lng = Decimal(str(data.get('longitude')))
loc = crud.update_user_location(get_db(), schemas.UserLocationUpdate(
uid=u.uid,
latitude=lat,
longitude=lng,
timestamp=datetime.now(),
geo=f'POINT({lng} {lat})'
))
if loc is None:
loc = crud.create_user_location(get_db(), schemas.UserLocationCreate(
uid=u.uid,
latitude=lat,
longitude=lng
))
loc = schemas.UserLocationGet(
uid=u.uid,
user=schemas.UserGet(
uid=u.uid,
name=u.name,
linkedin=u.linkedin,
instagram=u.instagram,
profile_image_url=u.profile_image_url
),
latitude=loc.latitude,
longitude=loc.longitude,
timestamp=loc.timestamp.isoformat()
)
return {
'loc': loc.json()
}
#app.route('/nearby', methods=["GET"], authorizer=token_authorizer)
def nearby():
r = app.current_request
u = full_user_from_context(r.context)
user_location = crud.get_user_location(get_db(), u.uid)
if user_location is None:
return {
"error": "no user location"
} # todo: better errro
nearby_users = crud.get_nearby_users(get_db(), u.uid, schemas.UserLocationGet(
uid=user_location.user_id,
latitude=user_location.latitude,
longitude=user_location.longitude,
user=u,
timestamp=user_location.timestamp.isoformat()
), DISTANCE_IN_METERS)
return {
"nearby_distance_in_meters": DISTANCE_IN_METERS,
"nearby_users": list(map(lambda x: schemas.UserLocationGet(
uid=x.user_id,
latitude=x.latitude,
longitude=x.longitude,
timestamp=x.timestamp.isoformat(),
user=schemas.UserGet(
uid=x.user.uid,
name=x.user.name,
instagram=x.user.instagram,
linkedin=x.user.linkedin,
profile_image_url=x.user.profile_image_url
)
).dict(), nearby_users))
}
This is the response when I invoke the test-function from the AWS console:
This is what I get when I do chalice logs:
Traceback (most recent call last):[ERROR] Runtime.ImportModuleError: Unable to import module 'app': No module named 'crud'
Traceback (most recent call last):[ERROR] Runtime.ImportModuleError: Unable to import module 'app': No module named 'crud'
2021-11-14 19:29:04.197000 88eb68 2021-11-14T19:29:04.197Z c143dcd4-e212-41f1-a786-7d86e4b58e59 Task timed out after 60.06 seconds
This is my requirements.txt:
chalice~=1.26.2
firebase-admin
boto3~=1.18.53
botocore
sqlalchemy
pydantic
# psycopg2
GeoAlchemy2
psycopg2-binary
I need to use psycopg2 so maybe that's the problem.
Every http request results in a 504 time out.
In local mode everything works fine.
Thanks in advance.
your chalicelib package folder's __init__.py (it must be written like this) has an extra underscore at the end of it. So python import system doesn't recognize such folder as a package, hence the error.
having some problems with checking item existence in dynamo table..code so far :
#app.route("/", methods=['GET'])
def index1():
name = request.form["fullname"]
get_res=requests.get('api-url',json=name)
temp=get_res.json()
get_res=temp['body']
return get_res
Lambda get_item func :
def lambda_handler(event, context):
try:
get_response=table.get_item(
Key={
'name':event['name']
}
)
res=get_response
except:
res={
'result':'not in DB'
}
return {
'statusCode': 200,
'body': res
}
result im getting ( even if input name is in DB ) :
{
"result": "not in DB"
}
i think i somehow managed to go around it..
get_res = requests.get('https://g7ry7a4ix6.execute-api.eu-central-1.amazonaws.com/prod/req', json=item)
temp = get_res.json()
try:
if temp['body']['Item']['name']:
pass
except:
res = requests.post('api-url', json=item)
return render_template('reg.html', name=name, phone=phone)
I am trying to deploy my XGboost model into kubernetes. I am facing a problem in writing the flask code. Here is the code(imported from github). Whenever I try to deploy into web server, I am facing the error message: invalid parameters. Please help me to solve this issue and thank you in advance.
'''
#
import json
import pickle
import numpy as np
from flask import Flask, request
#
flask_app = Flask(__name__)
#ML model path
model_path = "Y:/Docker_Tests/Deploy-ML-model-master/Deploy-ML-model-master/ML_Model/model2.pkl"
#flask_app.route('/', methods=['GET'])
def index_page():
return_data = {
"error" : "0",
"message" : "Successful"
}
return flask_app.response_class(response=json.dumps(return_data), mimetype='application/json')
#flask_app.route('/predict',methods=['GET'])
def model_deploy():
try:
age = request.form.get('age')
bs_fast = request.form.get('BS_Fast')
bs_pp = request.form.get('BS_pp')
plasma_r = request.form.get('Plasma_R')
plasma_f = request.form.get('Plasma_F')
HbA1c = request.form.get('HbA1c')
fields = [age,bs_fast,bs_pp,plasma_r,plasma_f,HbA1c]
if not None in fields:
#Datapreprocessing Convert the values to float
age = float(age)
bs_fast = float(bs_fast)
bs_pp = float(bs_pp)
plasma_r = float(plasma_r)
plasma_f = float(plasma_f)
hbA1c = float(HbA1c)
result = [age,bs_fast,bs_pp,plasma_r,plasma_f,HbA1c]
#Passing data to model & loading the model from disk
classifier = pickle.load(open(model_path, 'rb'))
prediction = classifier.predict([result])[0]
conf_score = np.max(classifier.predict_proba([result]))*100
return_data = {
"error" : '0',
"message" : 'Successfull',
"prediction": prediction,
"confidence_score" : conf_score.round(2)
}
else:
return_data = {
"error" : '1',
"message": "Invalid Parameters"
}
except Exception as e:
return_data = {
'error' : '2',
"message": str(e)
}
return flask_app.response_class(response=json.dumps(return_data), mimetype='application/json')
if __name__ == "__main__":
flask_app.run(host ='0.0.0.0',port=9091, debug=True)
'''
I'm trying to use for the first time ElasticSearch 6.4 with an existing web application wrote in Python/Django. I have some issues and I would like to understand why and how I can solve these issues.
###########
# Existing : #
###########
In my application, it's possible to upload document files (.pdf or .doc for example). Then, I have a search function in my application which let to search over documents indexed by ElasticSearch when they are uploaded.
Document title is always written through the same way :
YEAR - DOC_TYPE - ORGANISATION - document_title.extension
For example :
1970_ANNUAL_REPORT_APP-TEST_1342 - loremipsum.pdf
The search function is always done among doc_type = ANNUAL_REPORT. because there are several doc_types (ANNUAL_REPORT, OTHERS, ....).
##################
# My environment : #
##################
This is some data according to my ElasticSearch part. I'm learning ES commands too.
$ curl -XGET http://127.0.0.1:9200/_cat/indices?v
health status index uuid pri rep docs.count docs.deleted store.size pri.store.size
yellow open app 5T0HZTbmQU2-ZNJXlNb-zg 5 1 742 2 396.4kb 396.4kb
So my index is app
For the above example, if I search this document : 1970_ANNUAL_REPORT_APP-TEST_1342 - loremipsum.pdf, I have :
$ curl -XGET http://127.0.0.1:9200/app/annual-report/1343?pretty
{
"_index" : "app",
"_type" : "annual-report",
"_id" : "1343",
"_version" : 33,
"found" : true,
"_source" : {
"attachment" : {
"date" : "2010-03-04T12:08:00Z",
"content_type" : "application/pdf",
"author" : "manshanden",
"language" : "et",
"title" : "Microsoft Word - Test document Word.doc",
"content" : "some text ...",
"content_length" : 3926
},
"relative_path" : "app_docs/APP-TEST/1970_ANNUAL_REPORT_APP-TEST_1342.pdf",
"title" : "1970_ANNUAL_REPORT_APP-TEST_1342 - loremipsum.pdf"
}
}
Now, with my search part in my web application, I would like to find this document with this search : 1970.
def search_in_annual(self, q):
try:
response = self.es.search(
index='app', doc_type='annual-report',
q=q, _source_exclude=['data'], size=5000)
except ConnectionError:
return -1, None
total = 0
hits = []
if response:
for hit in response["hits"]["hits"]:
hits.append({
'id': hit['_id'],
'title': hit['_source']['title'],
'file': hit['_source']['relative_path'],
})
total = response["hits"]["total"]
return total, hits
But when q=1970, the result is 0
If I write :
response = self.es.search(
index='app', doc_type='annual-report',
q="q*", _source_exclude=['data'], size=5000)
It returns my document, but many documents too with no 1970 inside the title or the document content.
#################
# My global code : #
#################
This is the global class which manage indexing functions :
class EdqmES(object):
host = 'localhost'
port = 9200
es = None
def __init__(self, *args, **kwargs):
self.host = kwargs.pop('host', self.host)
self.port = kwargs.pop('port', self.port)
# Connect to ElasticSearch server
self.es = Elasticsearch([{
'host': self.host,
'port': self.port
}])
def __str__(self):
return self.host + ':' + self.port
#staticmethod
def file_encode(filename):
with open(filename, "rb") as f:
return b64encode(f.read()).decode('utf-8')
def create_pipeline(self):
body = {
"description": "Extract attachment information",
"processors": [
{"attachment": {
"field": "data",
"target_field": "attachment",
"indexed_chars": -1
}},
{"remove": {"field": "data"}}
]
}
self.es.index(
index='_ingest',
doc_type='pipeline',
id='attachment',
body=body
)
def index_document(self, doc, bulk=False):
filename = doc.get_filename()
try:
data = self.file_encode(filename)
except IOError:
data = ''
print('ERROR with ' + filename)
# TODO: log error
item_body = {
'_id': doc.id,
'data': data,
'relative_path': str(doc.file),
'title': doc.title,
}
if bulk:
return item_body
result1 = self.es.index(
index='app', doc_type='annual-report',
id=doc.id,
pipeline='attachment',
body=item_body,
request_timeout=60
)
print(result1)
return result1
def index_annual_reports(self):
list_docs = Document.objects.filter(category=Document.OPT_ANNUAL)
print(list_docs.count())
self.create_pipeline()
bulk = []
inserted = 0
for doc in list_docs:
inserted += 1
bulk.append(self.index_document(doc, True))
if inserted == 20:
inserted = 0
try:
print(helpers.bulk(self.es, bulk, index='app',
doc_type='annual-report',
pipeline='attachment',
request_timeout=60))
except BulkIndexError as err:
print(err)
bulk = []
if inserted:
print(helpers.bulk(
self.es, bulk, index='app',
doc_type='annual-report',
pipeline='attachment', request_timeout=60))
My document is indexed when he's submitted thanks a Django form with a signal :
#receiver(signals.post_save, sender=Document, dispatch_uid='add_new_doc')
def add_document_handler(sender, instance=None, created=False, **kwargs):
""" When a document is created index new annual report (only) with Elasticsearch and update conformity date if the
document is a new declaration of conformity
:param sender: Class which is concerned
:type sender: the model class
:param instance: Object which was just saved
:type instance: model instance
:param created: True for a creation, False for an update
:type created: boolean
:param kwargs: Additional parameter of the signal
:type kwargs: dict
"""
if not created:
return
# Index only annual reports
elif instance.category == Document.OPT_ANNUAL:
es = EdqmES()
es.index_document(instance)
This is what I've done and it seems to work :
def search_in_annual(self, q):
try:
response = self.es.search(
index='app', doc_type='annual-report', q=q, _source_exclude=['data'], size=5000)
if response['hits']['total'] == 0:
response = self.es.search(
index='app', doc_type='annual-report',
body={
"query":
{"prefix": {"title": q}},
}, _source_exclude=['data'], size=5000)
except ConnectionError:
return -1, None
total = 0
hits = []
if response:
for hit in response["hits"]["hits"]:
hits.append({
'id': hit['_id'],
'title': hit['_source']['title'],
'file': hit['_source']['relative_path'],
})
total = response["hits"]["total"]
return total, hits
It lets to search over title, prefix and content to find my document.
I'm working on a project with Python(3.6) & Django(1.10) in which I need to create a function at Google cloud using API request.
How can upload code in the form of a zip archive while creating that function?
Here's what I have tried:
From views.py :
def post(self, request, *args, **kwargs):
if request.method == 'POST':
post_data = request.POST.copy()
post_data.update({'user': request.user.pk})
form = forms.SlsForm(post_data, request.FILES)
print('get post request')
if form.is_valid():
func_obj = form
func_obj.user = request.user
func_obj.project = form.cleaned_data['project']
func_obj.fname = form.cleaned_data['fname']
func_obj.fmemory = form.cleaned_data['fmemory']
func_obj.entryPoint = form.cleaned_data['entryPoint']
func_obj.sourceFile = form.cleaned_data['sourceFile']
func_obj.sc_github = form.cleaned_data['sc_github']
func_obj.sc_inline_index = form.cleaned_data['sc_inline_index']
func_obj.sc_inline_package = form.cleaned_data['sc_inline_package']
func_obj.bucket = form.cleaned_data['bucket']
func_obj.save()
service = discovery.build('cloudfunctions', 'v1', http=views.getauth(), cache_discovery=False)
requ = service.projects().locations().functions().generateUploadUrl(parent='projects/' + func_obj.project + '/locations/us-central1', body={})
resp = requ.execute()
print(resp)
try:
auth = views.getauth()
# Prepare Request Body
req_body = {
"CloudFunction": {
"name": func_obj.fname,
"entryPoint": func_obj.entryPoint,
"timeout": '60s',
"availableMemoryMb": func_obj.fmemory,
"sourceArchiveUrl": func_obj.sc_github,
},
"sourceUploadUrl": func_obj.bucket,
}
service = discovery.build('cloudfunctions', 'v1beta2', http=auth, cachce_dicovery=False)
func_req = service.projects().locations().functions().create(location='projects/' + func_obj.project
+ '/locations/-',
body=req_body)
func_res = func_req.execute()
print(func_res)
return HttpResponse('Submitted',)
except:
return HttpResponse(status=500)
return HttpResponse('Sent!')
Updated Code below:
if form.is_valid():
func_obj = form
func_obj.user = request.user
func_obj.project = form.cleaned_data['project']
func_obj.fname = form.cleaned_data['fname']
func_obj.fmemory = form.cleaned_data['fmemory']
func_obj.entryPoint = form.cleaned_data['entryPoint']
func_obj.sourceFile = form.cleaned_data['sourceFile']
func_obj.sc_github = form.cleaned_data['sc_github']
func_obj.sc_inline_index = form.cleaned_data['sc_inline_index']
func_obj.sc_inline_package = form.cleaned_data['sc_inline_package']
func_obj.bucket = form.cleaned_data['bucket']
func_obj.save()
#######################################################################
# FIRST APPROACH FOR FUNCTION CREATION USING STORAGE BUCKET
#######################################################################
file_name = os.path.join(IGui.settings.BASE_DIR, 'media/archives/', func_obj.sourceFile.name)
print(file_name)
service = discovery.build('cloudfunctions', 'v1')
func_api = service.projects().locations().functions()
url_svc_req = func_api.generateUploadUrl(parent='projects/'
+ func_obj.project
+ '/locations/us-central1',
body={})
url_svc_res = url_svc_req.execute()
print(url_svc_res)
upload_url = url_svc_res['uploadUrl']
print(upload_url)
headers = {
'content-type': 'application/zip',
'x-goog-content-length-range': '0,104857600'
}
print(requests.put(upload_url, headers=headers, data=func_obj.sourceFile.name))
auth = views.getauth()
# Prepare Request Body
name = "projects/{}/locations/us-central1/functions/{}".format(func_obj.project, func_obj.fname,)
print(name)
req_body = {
"name": name,
"entryPoint": func_obj.entryPoint,
"timeout": "3.5s",
"availableMemoryMb": func_obj.fmemory,
"sourceUploadUrl": upload_url,
"httpsTrigger": {},
}
service = discovery.build('cloudfunctions', 'v1')
func_api = service.projects().locations().functions()
response = func_api.create(location='projects/' + func_obj.project + '/locations/us-central1',
body=req_body).execute()
pprint.pprint(response)
Now the function has been created successfully, but it fails because the source code doesn't upload to storage bucket, that's maybe something wrong at:
upload_url = url_svc_res['uploadUrl']
print(upload_url)
headers = {
'content-type': 'application/zip',
'x-goog-content-length-range': '0,104857600'
}
print(requests.put(upload_url, headers=headers, data=func_obj.sourceFile.name))
In the request body you have a dictionary "CloudFunction" inside the request. The content of "CloudFunction" should be directly in request.
request_body = {
"name": parent + '/functions/' + name,
"entryPoint": entry_point,
"sourceUploadUrl": upload_url,
"httpsTrigger": {}
}
I recomend using "Try this API" to discover the structure of projects.locations.functions.create .
"sourceArchiveUrl" and "sourceUploadUrl" can't appear together. This is explained in Resorce Cloud Function:
// Union field source_code can be only one of the following:
"sourceArchiveUrl": string,
"sourceRepository": { object(SourceRepository) },
"sourceUploadUrl": string,
// End of list of possible types for union field source_code.
In the rest of the answer I assume that you want to use "sourceUploadUrl". It requires you to pass it a URL returned to you by .generateUploadUrl(...).execute(). See documentation:
sourceUploadUrl -> string
The Google Cloud Storage signed URL used for source uploading,
generated by [google.cloud.functions.v1.GenerateUploadUrl][]
But before passing it you need to upload a zip file to this URL:
curl -X PUT "${URL}" -H 'content-type:application/zip' -H 'x-goog-content-length-range: 0,104857600' -T test.zip
or in python:
headers = {
'content-type':'application/zip',
'x-goog-content-length-range':'0,104857600'
}
print(requests.put(upload_url, headers=headers, data=data))
This is the trickiest part:
the case matters and it should be lowercase. Because the signature is calculated from a hash (here)
you need 'content-type':'application/zip'. I deduced this one logically, because documentation doesn't mention it. (here)
x-goog-content-length-range: min,max is obligatory for all PUT requests for cloud storage and is assumed implicitly in this case. More on it here
104857600, the max in previous entry, is a magical number which I didn't found mentioned anywhere.
where data is a FileLikeObject.
I also assume that you want to use the httpsTrigger. For a cloud function you can only choose one trigger field. Here it's said that trigger is a Union field. For httpsTrigger however that you can just leave it to be an empty dictionary, as its content do not affect the outcome. As of now.
request_body = {
"name": parent + '/functions/' + name,
"entryPoint": entry_point,
"sourceUploadUrl": upload_url,
"httpsTrigger": {}
}
You can safely use 'v1' instead of 'v1beta2' for .create().
Here is a full working example. It would be to complicated if I presented it to you as part of your code, but you can easily integrate it.
import pprint
import zipfile
import requests
from tempfile import TemporaryFile
from googleapiclient import discovery
project_id = 'your_project_id'
region = 'us-central1'
parent = 'projects/{}/locations/{}'.format(project_id, region)
print(parent)
name = 'ExampleFunctionFibonacci'
entry_point = "fibonacci"
service = discovery.build('cloudfunctions', 'v1')
CloudFunctionsAPI = service.projects().locations().functions()
upload_url = CloudFunctionsAPI.generateUploadUrl(parent=parent, body={}).execute()['uploadUrl']
print(upload_url)
payload = """/**
* Responds to any HTTP request that can provide a "message" field in the body.
*
* #param {Object} req Cloud Function request context.
* #param {Object} res Cloud Function response context.
*/
exports.""" + entry_point + """= function """ + entry_point + """ (req, res) {
if (req.body.message === undefined) {
// This is an error case, as "message" is required
res.status(400).send('No message defined!');
} else {
// Everything is ok
console.log(req.body.message);
res.status(200).end();
}
};"""
with TemporaryFile() as data:
with zipfile.ZipFile(data, 'w', zipfile.ZIP_DEFLATED) as archive:
archive.writestr('function.js', payload)
data.seek(0)
headers = {
'content-type':'application/zip',
'x-goog-content-length-range':'0,104857600'
}
print(requests.put(upload_url, headers=headers, data=data))
# Prepare Request Body
# https://cloud.google.com/functions/docs/reference/rest/v1/projects.locations.functions#resource-cloudfunction
request_body = {
"name": parent + '/functions/' + name,
"entryPoint": entry_point,
"sourceUploadUrl": upload_url,
"httpsTrigger": {},
"runtime": 'nodejs8'
}
print('https://{}-{}.cloudfunctions.net/{}'.format(region,project_id,name))
response = CloudFunctionsAPI.create(location=parent, body=request_body).execute()
pprint.pprint(response)
Open and upload a zip file like following:
file_name = os.path.join(IGui.settings.BASE_DIR, 'media/archives/', func_obj.sourceFile.name)
headers = {
'content-type': 'application/zip',
'x-goog-content-length-range': '0,104857600'
}
with open(file_name, 'rb') as data:
print(requests.put(upload_url, headers=headers, data=data))