I got this error when I implemented from google.cloud import storage.
Traceback (most recent call last):
File "/mnt/apps/repo/oktested/oktested-backend/wsgi.py", line 1, in <module>
from ok_app import app as application
File "./ok_app/__init__.py", line 11, in <module>
from google.cloud import storage
File "/usr/lib/python3.6/site-packages/google/cloud/storage/__init__.py", line 38, in <module>
from google.cloud.storage.batch import Batch
File "/usr/lib/python3.6/site-packages/google/cloud/storage/batch.py", line 29, in <module>
from google.cloud import _helpers
File "/usr/lib/python3.6/site-packages/google/cloud/_helpers.py", line 33, in <module>
from google.protobuf import duration_pb2
File "/usr/lib64/python3.6/site-packages/google/protobuf/duration_pb2.py", line 69, in <module>
'__module__' : 'google.protobuf.duration_pb2'
TypeError: A Message class can only inherit from Message
unable to load app 1 (mountpoint='oktested') (callable not found or import error)
The code is:
from google.cloud import storage
creds = storage.Client.from_service_account_json('s-creds.json')
def uploadProfilePic(id, image_name, image):
filename = id + '__' + str(uuid.uuid4()) + '__' + image_name
dest_dir = app.config['S3_DIR'] + app.config['PROFILE_PIC_PATH']
filepath = os.path.join(dest_dir, filename)
image_data = re.sub('^data:image/.+;base64,', '', image)
img_data = base64.b64decode(image_data)
try:
if 'jpeg' in filename:
image_type = "image/jpeg"
elif 'png' in filename:
image_type = "image/png"
elif 'jpg' in filename:
image_type = "image/jpg"
# s3.Bucket(app.config['S3_BUCKET_NAME']).put_object(Key=filepath, Body=img_data, ACL=app.config['S3_ACL'], ContentType ='image/jpeg', CacheControl=app.config['S3_CACHE_CONTROL'])
bucket = creds.bucket(app.config['GCP_BUCKET_NAME'])
blob = bucket.blob(filepath)
blob.upload_from_string(img_data, content_type=image_type)
app.logger.debug("User::uploadProfilePic::SuccessfullyUploaded::{}".format(filepath))
return filepath
except Exception as e:
app.logger.error("User::uploadProfilePic:: {}". format(e))
return ''
I run the code on server using uwsgi. I get this error even after Restarting the service. Please help.
I found a workaround, I am now calling the google cloud methods only where I need it, not during the app initialization.
Related
I have the following code for reading in files from a folder:
from pyspark.sql.types import *
from pyspark.context import SparkContext
from pyspark.sql.session import SparkSession
sc = SparkContext.getOrCreate()
spark = SparkSession(sc)
class MicrosoftAcademicGraph:
def __init__(self):
self.version = '2021-12-06'
def getBasepath(self):
basepath = '/work/ScienceOfScience/Data/ScienceOfScience/mag/mag/'
if (self.version != ''):
basepath = self.version + '/'
return basepath
# return stream path
def getFullpath(self, streamName):
path = self.getBasepath() + self.streams[streamName][0]
return self
# return stream header
def getHeader(self, streamName):
return self.streams[streamName][1]
# return stream schema
def getSchema(self, streamName):
schema = StructType()
for field in self.streams[streamName][1]:
fieldname, fieldtype = field.split(':')
nullable = fieldtype.endswith('?')
if nullable:
fieldtype = fieldtype[:-1]
schema.add(StructField(fieldname, self.datatypedict[fieldtype], nullable))
return schema
# return stream dataframe
def getDataframe(self, streamName):
return spark.read.format('csv').options(header='false', delimiter='\t').schema(self.getSchema(streamName)).load(self.getFullpath(streamName))
# define stream dictionary
streams = {
'Affiliations' : ('mag/Affiliations.txt', ['AffiliationId:long', 'Rank:uint', 'NormalizedName:string', 'DisplayName:string', 'GridId:string', 'OfficialPage:string', 'WikiPage:string', 'PaperCount:long', 'PaperFamilyCount:long', 'CitationCount:long', 'Iso3166Code:string', 'Latitude:float?', 'Longitude:float?', 'CreatedDate:DateTime']),
'AuthorExtendedAttributes' : ('mag/AuthorExtendedAttributes.txt', ['AuthorId:long', 'AttributeType:int', 'AttributeValue:string'])}
I'm tring to retrieve one of the files called 'Authors' in the following way:
e = MicrosoftAcademicGraph()
e.getDataframe('Authors')
I get a long list of errors that look like this:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "<string>", line 51, in getDataframe
File "/home/ucloud/.local/lib/python3.6/site-packages/pyspark/sql/readwriter.py", line 162, in load
return self._df(self._jreader.load(self._spark._sc._jvm.PythonUtils.toSeq(path)))
File "/home/ucloud/.local/lib/python3.6/site-packages/py4j/java_gateway.py", line 1313, in __call__
args_command, temp_args = self._build_args(*args)
File "/home/ucloud/.local/lib/python3.6/site-packages/py4j/java_gateway.py", line 1277, in _build_args
(new_args, temp_args) = self._get_args(args)
command_part = REFERENCE_TYPE + parameter._get_object_id()
AttributeError: 'MicrosoftAcademicGraph' object has no attribute '_get_object_id'
is there something wrong in the code or does this maybe have to do with version mismatch between python and pyspark?
os : windows 10 64bits
python: 3.7.3(anaconda)
# -*- coding: utf-8 -*-
from aip import AipSpeech
APP_ID = ''
API_KEY = 'xxx'
SECRET_KEY = 'yyy'
client = AipSpeech(APP_ID, API_KEY, SECRET_KEY)
def get_file_content(filePath):
with open(filePath, 'rb') as fp:
return fp.read()
# call "client.asr(get_file_content('01.wav'), 'wav', 16000)" will throw exception
results = client.asr(get_file_content('01.wav'), 'wav', 16000)
print(results)
Error messages
"""
Traceback (most recent call last):
File "baidu_speech_reg_api.py", line 18, in <module>
results = client.asr(get_file_content('01.wav'), 'wav', 16000)
File "C:\Users\yyyy\Anaconda3\envs\pyside2\lib\site-packages\aip\speech.py", line 78, in asr
return self._request(self.__asrUrl, data)
File "C:\Users\yyyy\Anaconda3\envs\pyside2\lib\site-packages\aip\base.py", line 90, in _request
params = self._getParams(authObj)
File "C:\Users\yyyy\Anaconda3\envs\pyside2\lib\site-packages\aip\base.py", line 190, in _getParams
params['access_token'] = authObj['access_token']
KeyError: 'access_token'
"""
I can use the same keys for ocr and image classification, but speech recognition always fail.
I am not sure what the issue is for this. Is it an issue with the credentials? I am trying to insert data from GCP to a Google BigQuery. Here is the full error:
Traceback (most recent call last):
File "target.py", line 98, in <module>
main()
File "target.py", line 94, in main
insert_data(gcs_file)
File "target.py", line 85, in insert_data
bq = BigQueryClient(project)
File "/Users/xxx/Prog/emr-etl/xx_auth.py", line 58, in BigQueryClient
credentials = Credentials.from_service_account_file(os.getenv('GOOGLE_APPLICATION_CREDENTIALS'))
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/google/oauth2/service_account.py", line 209, in from_service_account_file
filename, require=['client_email', 'token_uri'])
File "/Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/site-packages/google/auth/_service_account_info.py", line 71, in from_filename
with io.open(filename, 'r', encoding='utf-8') as json_file:
TypeError: expected str, bytes or os.PathLike object, not NoneType
And here is the code:
def upload_files(files, gcs_bucket="tracker"):
storage_client = storage.Client(project='xxx-main')
bucket = storage_client.get_bucket("tracker")
for file in files:
destination_filepath = file['folder'] + '/' + file['filename']
source_filepath = file['local_filename']
gcs_file = bucket.blob(destination_filepath)
gcs_file.upload_from_filename(source_filepath)
return gcs_file
def insert_data(gcs_file, project="xxx-main"):
bq = BigQueryClient(project)
bq_job_config = QueryJobConfig()
job = bq.load_table_from_uri(gcs_file, 'snowplow', job_config=bq_job_config)
result = job.result()
def main():
lists = list_download(sp_bucket)
gcs_file = upload_files(lists)
insert_data(gcs_file)
if __name__ == "__main__":
main()
I'm trying to deploy a python web server. Main function of following code is to generate a qrcode and transfer the image to binary file.
def generate_qrcode(date, user_id):
qr = qrcode.QRCode(
version=1,
error_correction=constants.ERROR_CORRECT_L,
box_size=10,
border=4,
)
base_string = config_default.configs.get('const').get('url') + '/check_in/'
generate_string = date + '#' +user_id
qr.add_data(base_string + generate_string, qrcode)
qr.make(fit=True)
img = qr.make_image()
return Image.fromqimage(img) # here is the place returns exception
Then post this binary file to a url.
def upload_qrcode(datetime, user_id):
url = "https://api.weixin.qq.com/cgi-bin/media/upload
access_token = get_access_token()
querystring = {"access_token":access_token,"type":"image"}
files = {"media":generate_qrcode(datetime,user_id)}
response = requests.post(url, params=querystring, files=files)
Here is the exception:
Traceback (most recent call last):
File "/Users/yudayan/Documents/myCode/pythonCode/nuobao/QRC_maker.py", line 58, in <module>
main()
File "/Users/yudayan/Documents/myCode/pythonCode/nuobao/QRC_maker.py", line 54, in main
print(upload_qrcode("sdf", "adsf"))
File "/Users/yudayan/Documents/myCode/pythonCode/nuobao/QRC_maker.py", line 47, in upload_qrcode
files = {"media":generate_qrcode(datetime,user_id)}
File "/Users/yudayan/Documents/myCode/pythonCode/nuobao/QRC_maker.py", line 32, in generate_qrcode
return Image.fromqimage(img)
File "/Users/yudayan/anaconda/lib/python3.6/site-packages/PIL/Image.py", line 2321, in fromqimage
return ImageQt.fromqimage(im)
File "/Users/yudayan/anaconda/lib/python3.6/site-packages/PIL/ImageQt.py", line 59, in fromqimage
if im.hasAlphaChannel():
File "/Users/yudayan/anaconda/lib/python3.6/site-packages/qrcode/image/pil.py", line 50, in __getattr__
return getattr(self._img, name)
AttributeError: 'Image' object has no attribute 'hasAlphaChannel'
Hi I am unable to upload a file to S3 using boto. It fails with the following error message. Can someone help me, i am new to python and boto.
from boto.s3 import connect_to_region
from boto.s3.connection import Location
from boto.s3.key import Key
import boto
import gzip
import os
AWS_KEY = ''
AWS_SECRET_KEY = ''
BUCKET_NAME = 'mybucketname'
conn = connect_to_region(Location.USWest2,aws_access_key_id = AWS_KEY,
aws_secret_access_key = AWS_SECRET_KEY,
is_secure=False,debug = 2
)
bucket = conn.lookup(BUCKET_NAME)
bucket2 = conn.lookup('unzipped-data')
rs = bucket.list()
rs2 = bucket2.list()
compressed_files = []
all_files = []
files_to_download = []
downloaded_files = []
path = "~/tmp/"
# Check if the file has already been decompressed
def filecheck():
for filename in bucket.list():
all_files.append(filename.name)
for n in rs2:
compressed_files.append(n.name)
for file_name in all_files:
if file_name.strip('.gz') in compressed_files:
pass;
elif '.gz' in file_name and 'indeed' in file_name:
files_to_download.append(file_name)
# Download necessary files
def download_files():
for name in rs:
if name.name in files_to_download:
file_name = name.name.split('/')
print('Downloading: '+ name.name).strip('\n')
file_name = name.name.split('/')
name.get_contents_to_filename(path+file_name[-1])
print(' - Completed')
# Decompressing the file
print('Decompressing: '+ name.name).strip('\n')
inF = gzip.open(path+file_name[-1], 'rb')
outF = open(path+file_name[-1].strip('.gz'), 'wb')
for line in inF:
outF.write(line)
inF.close()
outF.close()
print(' - Completed')
# Uploading file
print('Uploading: '+name.name).strip('\n')
full_key_name = name.name.strip('.gz')
k = Key(bucket2)
k.key = full_key_name
k.set_contents_from_filename(path+file_name[-1].strip('.gz'))
print('Completed')
# Clean Up
d_list = os.listdir(path)
for d in d_list:
os.remove(path+d)
# Function Calls
filecheck()
download_files()
Error message :
Traceback (most recent call last):
File "C:\Users\Siddartha.Reddy\workspace\boto-test\com\salesify\sid\decompress_s3.py", line 86, in <module>
download_files()
File "C:\Users\Siddartha.Reddy\workspace\boto-test\com\salesify\sid\decompress_s3.py", line 75, in download_files
k.set_contents_from_filename(path+file_name[-1].strip('.gz'))
File "C:\Python27\lib\site-packages\boto\s3\key.py", line 1362, in set_contents_from_filename
encrypt_key=encrypt_key)
File "C:\Python27\lib\site-packages\boto\s3\key.py", line 1293, in set_contents_from_file
chunked_transfer=chunked_transfer, size=size)
File "C:\Python27\lib\site-packages\boto\s3\key.py", line 750, in send_file
chunked_transfer=chunked_transfer, size=size)
File "C:\Python27\lib\site-packages\boto\s3\key.py", line 951, in _send_file_internal
query_args=query_args
File "C:\Python27\lib\site-packages\boto\s3\connection.py", line 664, in make_request
retry_handler=retry_handler
File "C:\Python27\lib\site-packages\boto\connection.py", line 1070, in make_request
retry_handler=retry_handler)
File "C:\Python27\lib\site-packages\boto\connection.py", line 1029, in _mexe
raise ex
socket.error: [Errno 10053] An established connection was aborted by the software in your host machine
I have no problem downloading the files, but the upload fails for some weird reason.
If the problem is the size of files (> 5GB), you should use multipart upload:
http://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html
search for multipart_upload in the docs:
http://boto.readthedocs.org/en/latest/ref/s3.html#module-boto.s3.multipart
Also, see this question for a related issue:
How can I copy files bigger than 5 GB in Amazon S3?
The process is a little non-intuitive. You need to:
run initiate_multipart_upload(), storing the returned object
split the file into chunks (either on disk, or read from memory using CStringIO)
feed the parts sequentially into upload_part_from_file()
run complete_upload() on the stored object