odoo 13 upload video files via minio api of Python - python

I'm trying to develop a module to upload videos files via MinIO API of Python.
The file can be uploaded to MinIO, but it's cannot view via url like: http://localhost:9000/lms-videos/video/output.mp4. And also the file upload via MinIO should be 19.39Mb and the one uploaded via API turns out to be 25+MB, don't know what cause.....
Following is part of my code :
# minio client
#api.model
def _get_minio_client(self):
host = '192.168.1.102:9000'
access_key = 'minioadmin'
secret_key = 'minioadmin'
if not all((host, access_key, secret_key)):
raise exceptions.UserError('Incorrect configuration of MinIO')
return Minio(
host,
access_key = access_key,
secret_key = secret_key,
secure = False
)
# upload
#api.model
def _store_file_write(self):
client = self._get_minio_client()
bin_data = self.datas_minio
fname = "output_test"
#client.put_object('lms-videos','videos/'+ fname + '.mp4',io.BytesIO(self.datas_minio), len(bin_data),'video/mp4')
with io.BytesIO(self.datas_minio) as bin_data_io:
client.put_object('lms-videos',
'videos/'+ fname + '.mp4',
bin_data_io,
len(bin_data),
'video/mp4')
#api.depends('document_id', 'slide_type', 'mime_type', 'external_url')
def _compute_embed_code(self):
res = super(Slide, self)._compute_embed_code()
for record in self:
if record.slide_type == 'miniovideo':
self._store_file_write()
content_url = 'http://localhost:9000/lms-videos/videos/' + record.name + '.mp4'
record.embed_code = '<video class="miniovideo" controls controlsList="nodownload"><source src="' + content_url + '" type=MPEG-4/></video>'
#api.onchange('datas_minio')
def _on_change_datas(self):
res = super(Slide, self)._on_change_datas()
if self.datas_minio:
#fname = self.datas_minio.decode("utf-8")
#bin_data = self.datas_minio
self._store_file_write()
#self._get_minio_client().put_object('lms-videos', '/videos/'+ fname + '.mp4',io.BytesIO(bin_data), len(bin_data),'video/mp4')
return res

Problem fixed by adding b64decode
#api.model
def _store_file_write(self):
client = self._get_minio_client()
bin_data = base64.b64decode(self.datas_minio)
fsize = len(bin_data)
fname = "output_test"
with io.BytesIO(bin_data) as bin_data_io:
client.put_object('lms-videos',
#'videos/'+ fname + '.mp4',
'videos/output_test.mp4',
bin_data_io,
fsize,
'video/mp4')

Related

upload image from pixabay api to wordpress using rest api and python

I am new to python,and want to know upload image from pixabay api or others source to wordpress using rest api and python.
When i use this :
url=url_image = "https://pixabay.com/api/?key={API_KEY}&q={keyword}.jpg"
They show this message "
{"code":"rest_upload_unknown_error","message":"Sorry, you are not
allowed to upload this file type.","data":{"status":500}}"
import base64, requests
from tempfile import NamedTemporaryFile
# keyword = input('Enter Your name')
keyword = 'flower'
def header(user, password):
credentials = user + ':' + password
token = base64.b64encode(credentials.encode())
header_json = {'Authorization': 'Basic ' + token.decode('utf-8'),
'Content-Disposition' : 'attachment; filename=%s'% "test1.jpg"}
return header_json
def upload_image_to_wordpress(file_path, header_json):
media = {'file': file_path,'caption': 'f{keyword}'}
responce = requests.post("https://yourwebsite.com/wp-json/wp/v2/media", headers = header_json, files = media)
print(responce.text)
heder = header("username","password") #username, application password
url_image = "https://pixabay.com/api/?key={API_KEY}&q={keyword}.jpg"
# url="https://cdn.pixabay.com/photo/2021/11/30/08/24/strawberries-6834750_1280.jpg"
raw = requests.get(f'{url_image}').content
with NamedTemporaryFile(delete=False,mode="wb",suffix=".jpg") as img :
img.write(raw)
# print(f.file())
c = open(img.name,"rb")
upload_image_to_wordpress(c,heder)

issue with Cloud Function python

** i have a google cloud function which needs to connect to url and get data in the form of csv files and store in one bucket. this is what written in python code .
when i test the function its compiling successfully but its not working at all. when i checked the log its giving the eblwo mentioned error.
favt_LnT_acn_blackline_data_pull_func43jttmffma0g Invalid constructor input for AccessSecretVersionRequest: 'projects/gcp-favt-acn-rpt-dev/secrets/blackline_api_key/versions/latest'
please find the code and suggest.
Thanks,
Vithal
**
'
import base64
import logging
import requests
#import pandas as pd
#from pandas import json_normalize
import json
import os
import datetime
from datetime import datetime as dt
import pytz
from google.cloud import storage
from google.cloud import secretmanager
def delete_and_upload_blob(landing_bucket_name,
source_file_name,
landing_blob_name,
retention_bucket_name,
file_retention_flag,
retn_file_suffix,
rpt_last_run_file):
storage_client = storage.Client()
bucket = storage_client.bucket(landing_bucket_name)
blob = bucket.blob(landing_blob_name)
rpt_last_run_blob = bucket.blob('some.csv')
retention_bucket = storage_client.bucket(retention_bucket_name)
if blob.exists(storage_client):
#Delete the old file
blob.delete()
print('File {} is deleted from Cloud Storage before
Upload'.format(landing_blob_name))
else:
print('No Such File Exists in Storage Bucket to Delete. So,
proceeding with Upload')
#Upload new one
blob.upload_from_filename(source_file_name)
print("File {} uploaded to Bucket {} With Name
{}.".format(source_file_name, bucket, landing_blob_name))
if file_retention_flag == 'Y':
#Copy the last file of the day to retention bucket
new_file_name = retn_file_suffix + '_' + landing_blob_name
blob_copy = bucket.copy_blob(blob, retention_bucket,
new_file_name)
print('File {} is copied to Retention Bucket
{}'.format(new_file_name, retention_bucket))
if rpt_last_run_blob.exists(storage_client):
#Delete the old file
rpt_last_run_blob.delete()
print('File {} is deleted from Cloud Storage before
Upload'.format(rpt_last_run_blob))
else:
print('No Such File Exists in Storage Bucket to Delete. So,
proceeding with Upload')
#Upload new one
rpt_last_run_blob.upload_from_filename(rpt_last_run_file)
print("File {} uploaded to Bucket {} With Name
{}.".format(rpt_last_run_file, bucket,
'Reports_Latest_Run_time.csv'))
def api_request():
et = pytz.timezone("US/Eastern")
current_et_time = dt.now().astimezone(et)
print('Current ET Time:', current_et_time)
pt = pytz.timezone("US/Pacific")
ut = pytz.timezone("UTC")
blackline_base_url = "https://....com"
blackline_sts_url = blackline_base_url + "/authorize/connect/token"
project_id = 'gcp-favt-acn-dev'
secret_id = '###_api_key'
secret_client = secretmanager.SecretManagerServiceClient()
secret_name =
secret_client.secret_version_path(project_id,secret_id,'latest')
secret_resp = secret_client.access_secret_version(secret_name)
api_key = secret_resp.payload.data.decode('UTF-8')
grant_type = 'password'
scope = '####'
username = '####'
payload = 'grant_type='+grant_type+'&scope='+scope+
'&username='+username+'&password='+api_key
sts_headers = { 'Authorization': 'Basic dXBzOk5KXXx2VENsSiEtRw==',
'Content-Type': 'application/x-www-form-urlencoded',
'Cookie':
'BLSIAPPEN=!bpJj4AOTHPcaqipWtDI6FrozN629M9xYLA/
sbM1DWVH+jjuY5fgHVMACha2rIapXRoB7CcqnlaHgBw=='}
response = requests.request("POST", ###_sts_url, headers =
sts_headers, data = payload)
if response.ok:
sts_response = response.json()
access_token = sts_response['access_token']
print(access_token)
blackline_rpt_submit_url = ##_base_url + '/api/queryruns'
rpt_payload = ''
blackline_rpt_api_headers =
{'Authorization': 'Bearer {}'.format(access_token), 'Content-Type':
'text/plain'}
rpt_resp = requests.request("GET", blackline_rpt_submit_url, headers
= blackline_rpt_api_headers, data = rpt_payload)
print(rpt_resp.text)
jl = json.loads(rpt_resp.text)
reports_list = []
rprts_filename = "tmp_rprts.csv"
rprts_full_path = os.path.join("/tmp",rprts_filename)
with open(rprts_full_path, 'w') as f:
f.write('ReportName,ReportLastRunTime'+'\n')
hrs = -2
hrs_to_subtract = datetime.timedelta(hours=hrs)
two_hrs_ago_time = current_et_time + hrs_to_subtract
#print(two_hrs_ago_time)#latest_rpt_check_time)
frmtd_curr_time = two_hrs_ago_time.strftime('%Y-%m-%d %H:%M:%S')
latest_rpt_check_time =
dt.strptime(frmtd_curr_time,'%Y-%m-%d %H:%M:%S')
print("Latest Report Check Time:", latest_rpt_check_time)
for each in jl:
strpd_time = dt.strptime(each['endTime'][0:19],'%Y-%m-
%dT%H:%M:%S')
#print(strpd_time)
pt_localize = pt.localize(strpd_time)
#print(pt_localize)
et_time = pt_localize.astimezone(et)
#print(et_time)
frmtd_et_time = et_time.strftime('%Y-%m-%d %H:%M:%S')
#print(frmtd_et_time)
cnvrted_endTime = dt.strptime(frmtd_et_time,'%Y-%m-%d %H:%M:%S')
#print("Report LastRun EndTime:", cnvrted_endTime)
ut_time = pt_localize.astimezone(ut)
frmtd_ut_time = ut_time.strftime('%Y-%m-%d %H:%M:%S')
if cnvrted_endTime > latest_rpt_check_time:
reports_list.append({each['name']:each['exportUrls'][0]
["url"]})
rpt_last_run = each['name']+','+frmtd_ut_time
print(rpt_last_run)
with open(rprts_full_path, 'a') as f:
f.write(rpt_last_run+'\n')
retn_file_suffix = each['endTime'][0:10]
#print(retn_file_suffix)
rpt_run_hr = cnvrted_endTime.hour
#print(rpt_run_hr)
#############
print(reports_list)
for report in reports_list:
for k in report:
print(report[k])
report_fetch_url = blackline_base_url + '/' + report[k]
print('Report Fetch URL: {}'.format(report_fetch_url))
filename = "temp_file.csv"
full_path = os.path.join("/tmp",filename)
rpt_data = requests.request("GET", report_fetch_url, headers
= blackline_rpt_api_headers)
print(rpt_data.text)
with open(full_path,'wb') as tmp_file:
tmp_file.write(rpt_data.content)
#Upload it to Cloud Storage
landing_bucket_name = "####_dev_landing_bkt" #CHANGE ME
source_file_name = os.path.join(full_path)
rpt_last_run_file = os.path.join(rprts_full_path)
landing_blob_name = '##.csv' #CHANGE ME
retention_bucket_name = '####_dev_retention_bkt'
print('file retention check')
if (rpt_run_hr >= 22):
file_retention_flag = 'Y'
else:
file_retention_flag = 'N'
print(file_retention_flag)
delete_and_upload_blob(landing_bucket_name,
source_file_name,
landing_blob_name,
retention_bucket_name,
file_retention_flag,
retn_file_suffix,
rpt_last_run_file)
#Remove the temp file after it is uploaded to Cloud Storage to
avoid OOM issues with the Cloud Function.
os.remove(full_path)
#Remove the tmp file after upload
os.remove(rprts_full_path)
#def pacific_to_eastern_conversion(pacific_time, eastern_time):
def main(event,context):
try:
if 'data' in event:
name = base64.b64decode(event['data']).decode('utf-8')
else:
name = 'World'
print('Hello{}',format(name))
api_request()
except Exception as e:
logging.error(e)' enter code here
The approach you are using will work for Cloud Run but won't work for Cloud functions.
To make use of secrets in Google cloud functions, following are the steps:
Make sure that the function's runtime service account must be granted access to the secret. To use Secret Manager with Cloud Functions, assign the roles/secretmanager.secretAccessor role to the service account associated with your function.
Make the secret accessible to the function. This can be done using either the Google Cloud Console or the gcloud command-line tool.
I exposed the secret as an environment variable(with name set to "api_key") and accessed them in the code as stated below:
import os
api_key = os.environ.get('api_key')
I hope this answers your question.
Your cloud functions service account haven't access to Secret manager. Grant your Cloud Functions service account on the secret, or on the project (not recommended).
If you don't set a custom service account on your Cloud Functions (which is also not a good practice), the App Engine default service account is used. Here the pattern <ProjectID>#appspot.gserviceaccount.com

Cannot upload s3 files to another region (clients bucket) despite successful response

This is my code. I am trying to copy a directory from one bucket to another. I am seeing everything is positive, but files are not appearing in the clients bucket.
import boto3
ACCESS_KEY = 'access_key'
SECRET_KEY = 'secret_key'
REGION_NAME = 'US_EAST_1'
source_bucket = 'source_bucket'
#Make sure you provide / in the end
source_prefix = 'source_prefix'
target_bucket = 'target-bucket'
target_prefix = 'target-prefix'
client = boto3.client('s3')
session_src = boto3.session.Session()
source_s3_r = session_src.resource('s3')
def get_s3_keys(bucket, prefix):
keys = []
response = client.list_objects_v2(Bucket=bucket,Prefix=prefix,MaxKeys=100)
for obj in response['Contents']:
keys.append(obj['Key'])
return keys
session_dest = boto3.session.Session(aws_access_key_id=ACCESS_KEY,
aws_secret_access_key=SECRET_KEY)
dest_s3_r = session_dest.resource('s3')
# create a reference to source image
old_obj = source_s3_r.Object(source_bucket, source_prefix)
# create a reference for destination image
new_obj = dest_s3_r.Object(target_bucket, target_prefix)
keys = get_s3_keys(source_bucket, source_prefix)
responses = []
# upload the image to destination S3 object
for filename in keys:
print("Transferring file {}, {}".format(source_bucket,filename))
old_obj = source_s3_r.Object(source_bucket, filename)
response = new_obj.put(Body=old_obj.get()['Body'].read())
response_code = response['ResponseMetadata']['HTTPStatusCode']
responses.append(response_code)
print("File transfer response {}".format(response_code))
distinct_response = list(set(responses))
if len(distinct_response) > 1 or distinct_response[0] != 200:
print("File could not be transfered to krux bucket. Exiting now")
exit(1)
else:
print("File transfer to krux bucket successful")
I am getting a successful response code of 200 but the file is not transferred across.
Srinivas, Try this
I used S3 Resource object, try equivalent S3 Client if you want...
bucket= s3.Bucket(bucket_name) #from_bucket
for osi in bucket.objects.all():
print(osi)
copy_source={
'Bucket': bucket.name,
'Key': osi.key
}
s3.Bucket('to_bucket').copy(copy_source, osi.key)
Hope it helps..
r0ck

Upload file in SharePoint using python

'm trying to upload a file using Python Script, when Run the code it gives me no Error but was not able to upload the file in my sharepoint folder.
import requests
from shareplum import Office365
from config import config
# get data from configuration
username = config['sp_user']
password = config['sp_password']
site_name = config['sp_site_name']
base_path = config['sp_base_path']
doc_library = config['sp_doc_library']
file_name = "cat_pic.jpg"
# Obtain auth cookie
authcookie = Office365(base_path, username=username, password=password).GetCookies()
session = requests.Session()
session.cookies = authcookie
session.headers.update({'user-agent': 'python_bite/v1'})
session.headers.update({'accept': 'application/json;odata=verbose'})
# perform the actual upload
with open( file_name, 'rb') as file_input:
try:
response = session.post(
url=base_path + "/sites/" + site_name + "/Shared%20Documents/Forms/AllItems.aspx/_api/web/GetFolderByServerRelativeUrl('" + doc_library + "')/Files/add(url='"
+ file_name + "',overwrite=true)",
data=file_input)
except Exception as err:
print("Some error occurred: " + str(err))
config.py
config = dict()
config['sp_user'] = 'email'
config['sp_password'] = 'pass
config['sp_base_path'] = 'https://bboxxeng.sharepoint.com'
config['sp_site_name'] = 'TESTIAN'
config['sp_doc_library'] = 'Test'
This is the url of my sharepoint https://bboxxeng.sharepoint.com/sites/TESTIAN/Shared%20Documents/Forms/AllItems.aspx I've already created a folder in it named Test...
Thank you for answering my question.
Modify the code as below.
response = session.post(
url=base_path + "/sites/" + site_name + "/_api/web/GetFolderByServerRelativeUrl('Shared%20Documents/"+doc_library+"')/Files/add(url='"
+ file_name + "',overwrite=true)",
data=file_input)

How to use python script to copy files from one bucket to another bucket at the Amazon S3 with boto

How to use Python script to copy files from one bucket to another bucket at the Amazon S3 with boto?
I know how to create but how to copy it to another bucket.
import boto
import boto.s3.connection
#CREATING A CONNECTION¶
access_key = 'MPB**********ITMO'
secret_key = '11t63y************XojO7b'
conn = boto.connect_s3(
aws_access_key_id = access_key,
aws_secret_access_key = secret_key,
host = 'twg****.org.tw',
is_secure=False, # uncomment if you are not using ssl
calling_format = boto.s3.connection.OrdinaryCallingFormat(),
)
#CREATING A BUCKET¶
bucket = conn.create_bucket('aaaa')
reference:
https://github.com/boto/boto/blob/develop/docs/source/s3_tut.rst
http://docs.ceph.com/docs/master/radosgw/s3/python/
import boto
import boto.s3.connection
#CREATING A CONNECTION¶
access_key = 'MPB*******MO'
secret_key = '11t6******rVYXojO7b'
conn = boto.connect_s3(
aws_access_key_id = access_key,
aws_secret_access_key = secret_key,
host = 'twg******.tw',
is_secure=False, # uncomment if you are not using ssl
calling_format = boto.s3.connection.OrdinaryCallingFormat(),
)
src = conn.get_bucket('roger123weddec052335422018')
dst = conn.get_bucket('aaa/aa/')
for k in src.list():
# copy stuff to your destination here
dst.copy_key(k.key, src.name, k.key)
# then delete the source key
#k.delete()
===========================================
Get subdirectory info folder¶
folders = bucket.list("","/")
for folder in folders:
print (folder.name)
========================================
Create folder¶
k = bucket.new_key('abc/123/')
k.set_contents_from_string('')
=============================================
LISTING OWNED BUCKETS¶
for bucket in conn.get_all_buckets():
print ("{name}\t{created}".format(
name = bucket.name,
created = bucket.creation_date,
))
CREATING A BUCKET¶
#bucket = conn.create_bucket('willie20181121')
bucket = conn.create_bucket('roger123.Tuedec040445192018')
print(bucket.name)
========================================================
LISTING A BUCKET’S CONTENT
foldername=','
for key in bucket.list():
print ("{name}\t{size}\t{modified}\t{xx}\t{yy}\t{zz}".format(
name = key.name, # = key.key
size = key.size,
modified = key.last_modified,
xx=key.set_contents_from_string,
yy=key.owner.id,
zz=key.name.startswith('image'),
#qq=bucket.name,
#aa=key.set_contents_from_string.startswith('//'),
))
xxx = key.key
#print(len(xxx.split('/')))
if len(xxx.split('/'))==2:
if foldername.find(xxx.split('/')[0])==-1:
foldername= foldername + xxx.split('/')[0] +","
#print(foldername)
DELETING A BUCKET¶
#conn.delete_bucket('willietest20181121')
CREATING AN OBJECT¶
#key = bucket.new_key('hello.txt')
#key.set_contents_from_string('Hello World!11:52')
DOWNLOAD AN OBJECT (TO A FILE)¶
#key = bucket.get_key('hello.txt')
#key.get_contents_to_filename('/home/willie/Desktop/hello.txt')
DELETE AN OBJECT¶
#bucket.delete_key('hello.txt')
==========================================================================
Insert files
import boto
import boto.s3
import boto.s3.connection
import os.path
import sys
#https://gist.github.com/SavvyGuard/6115006
def percent_cb(complete, total):
sys.stdout.write('.')
sys.stdout.flush()
# Fill in info on data to upload
# destination bucket name
bucket_name = 'willie20181121_'
# source directory
sourceDir = '/home/willie/Desktop/x/'
# destination directory name (on s3)
destDir = '/test2/'
#max size in bytes before uploading in parts. between 1 and 5 GB recommended
MAX_SIZE = 20 * 1000 * 1000
#size of parts when uploading in parts
PART_SIZE = 6 * 1000 * 1000
access_key = 'MPBVAQPULDHZIFUQITMO'
secret_key = '11t63yDVZTlStKoBBxHl35HgUcgMOSNrVYXojO7b'
conn = boto.connect_s3(
aws_access_key_id = access_key,
aws_secret_access_key = secret_key,
host = 'twgc-s3.nchc.org.tw',
is_secure=False, # uncomment if you are not using ssl
calling_format = boto.s3.connection.OrdinaryCallingFormat(),
)
bucket = conn.get_bucket(bucket_name,
location=boto.s3.connection.Location.DEFAULT)
uploadFileNames = []
for (sourceDir, dirname, filename) in os.walk(sourceDir):
#uploadFileNames.extend(filename)
#print("=="+filename)
break
uploadFileNames.extend(["1.jpg"])
uploadFileNames.extend(["2.py"])
for filename in uploadFileNames:
sourcepath = os.path.join(sourceDir + filename)
#sourcepath = os.path.join(filename)
destpath = os.path.join(destDir, filename)
print ('Uploading %s to Amazon S3 bucket %s' % \
(sourcepath, bucket_name))
#print("==="+ sourcepath)
filesize = os.path.getsize(sourcepath)
if filesize > MAX_SIZE:
print ("multipart upload")
mp = bucket.initiate_multipart_upload(destpath)
fp = open(sourcepath,'rb')
fp_num = 0
while (fp.tell() < filesize):
fp_num += 1
print ("uploading part %i" %fp_num)
mp.upload_part_from_file(fp, fp_num, cb=percent_cb, num_cb=10, size=PART_SIZE)
mp.complete_upload()
else:
print ("singlepart upload")
k = boto.s3.key.Key(bucket)
k.key = destpath
#print(sourcepath)
k.set_contents_from_filename(sourcepath, cb=percent_cb, num_cb=10)
=================
excetpion testing
try:
key = bucket.get_key('Mail1.txt')
key.get_contents_to_filename('/home/willie/Desktop/mail.txt')
except Exception as e:
result="False"
print("=="+str(e.args))

Categories