from this github:
https://github.com/GoogleCloudPlatform/python-docs-samples
i am trying to test Video intelligence API and do label analysis.
import argparse
import sys
import time
import io
import base64
from google.cloud.gapic.videointelligence.v1beta1 import enums
from google.cloud.gapic.videointelligence.v1beta1 import (
video_intelligence_service_client)
# [END imports]
#python labels.py /Users/rockbaek/tildawatch-contents/EpicSkillShot/M7-_VukSueY/SKT\ vs\ KT\ Game\ 3\ _\ Grand\ Finals\ S7\ LCK\ Spring\ 2017\ _\ KT\ Rolster\ vs\ SK\ Telecom\ T1\ G3\ 1080p-M7-_VukSueY.mp4
def analyze_labels_file(path):
""" Detects labels given a file path. """
video_client = (video_intelligence_service_client.
VideoIntelligenceServiceClient())
features = [enums.Feature.LABEL_DETECTION]
with io.open(path, "rb") as movie:
content_base64 = base64.b64encode(movie.read())
operation = video_client.annotate_video(
'', features, input_content=content_base64)
print('\nProcessing video for label annotations:')
while not operation.done():
sys.stdout.write('.')
sys.stdout.flush()
time.sleep(15)
print('\nFinished processing.')
# first result is retrieved because a single video was processed
results = operation.result().annotation_results[0]
for i, label in enumerate(results.label_annotations):
print('Label description: {}'.format(label.description))
print('Locations:')
for l, location in enumerate(label.locations):
positions = 'Entire video'
if (location.segment.start_time_offset != -1 or
location.segment.end_time_offset != -1):
positions = '{} to {}'.format(
location.segment.start_time_offset / 1000000.0,
location.segment.end_time_offset / 1000000.0)
print('\t{}: {}'.format(l, positions))
print('\n')
if __name__ == '__main__':
# [START running_app]
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('path', help='GCS file path for label detection.')
args = parser.parse_args()
analyze_labels_file(args.path)
# [END running_app]
# [END full_tutorial]
and then i run it from terminal
python labels.py MP4_FILE_PATH
After quite a while, it fails with this error code:
Traceback (most recent call last):
File "labels.py", line 123, in <module>
analyze_labels_file(args.path)
File "labels.py", line 52, in analyze_labels_file
'', features, input_content=content_base64)
File "/Library/Python/2.7/site-packages/google/cloud/gapic/videointelligence/v1beta1/video_intelligence_service_client.py", line 237, in annotate_video
self._annotate_video(request, options), self.operations_client,
File "/Library/Python/2.7/site-packages/google/gax/api_callable.py", line 428, in inner
return api_caller(api_call, this_settings, request)
File "/Library/Python/2.7/site-packages/google/gax/api_callable.py", line 416, in base_caller
return api_call(*args)
File "/Library/Python/2.7/site-packages/google/gax/api_callable.py", line 376, in inner
return a_func(*args, **kwargs)
File "/Library/Python/2.7/site-packages/google/gax/retry.py", line 144, in inner
raise exc
google.gax.errors.RetryError: GaxError(Retry total timeout exceeded with exception, caused by <_Rendezvous of RPC that terminated with (StatusCode.DEADLINE_EXCEEDED, Deadline Exceeded)>)
Please help why it is not working! :(
For the visitors from future, I faced same error with Cloud Speech API.
I just increased the timeout value while calling operation.result. That solved it. Though this fragment isn't in OP's code, it should be in Google's example code OP mentioned.
operation.result(timeout=90) # increase this numeric value
I tried your code with a small video and it seems to work just fine for me. Maybe you are hitting some form of quota or limits (refer: https://cloud-dot-devsite.googleplex.com/video-intelligence/limits)? I have also run large videos loaded in Google Storage and using Python client library without issue.
Other step to try: send the request to the service via a curl command.
Related
I want to describe the configuration of one topic. I developed a script using confluent-kafka-python librairie (version 1.5.0) and my version of python is 2.7.
My final goal is to be able to change the retention time to my topic (retention.ms), but for this I need to extract all configuration of my topic and change just what I want and leaving the others as they have been defined.
My script:
def describe_topic(admin_client, topic):
resources = [ConfigResource(confluent_kafka.admin.RESOURCE_TOPIC, topic)]
fs = admin_client.describe_configs(resources)
for resource, f in fs.items():
remote_config = f.result()
print(remote_config)
return resource, remote_config
But I have this error :
Error
File "kafka.py", line 192, in describe_topic
resources = [ConfigResource(confluent_kafka.admin.RESOURCE_TOPIC, topic)]
File "environment/mamba-6Od8R-HF-py2.7/lib/python2.7/site-packages/kafka/admin/config_resource.py", line 33, in init
resource_type = ConfigResourceType[str(resource_type).upper()] # pylint: disable-msg=unsubscriptable-object
File "/product/tedhdev/environment/mamba-6Od8R-HF-py2.7/lib/python2.7/site-packages/enum/init.py", line 394, in getitem
return cls.member_map[name]
KeyError: '2'\
Can someone help me?
Thanks a lot
import kafka
from kafka.admin import KafkaAdminClient, ConfigResource, ConfigResourceType
client_conn = kafka.KafkaAdminClient(bootstrap_servers="localhost:9092")
print(client_conn.describe_configs(config_resources=[ConfigResource(ConfigResourceType.TOPIC, "topic")]))
from azure.cognitiveservices.vision.customvision.prediction import CustomVisionPredictionClient
from msrest.authentication import CognitiveServicesCredentials
from azure.cognitiveservices.vision.customvision import prediction
from PIL import Image
endpoint = "https://southcentralus.api.cognitive.microsoft.com/"
project_id = "projectidhere"
prediction_key = "predictionkeyhere"
predict = CustomVisionPredictionClient(prediction_key, endpoint)
with open("c:/users/paul.barbin/pycharmprojects/hw3/TallowTest1.jpg", mode="rb") as image_data:
tallowresult = predict.detect_image(project_id, "test1", image_data)
Python 3.7, and I'm using Azure Custom Vision 3.1? (>azure.cognitiveservices.vision.customvision) (3.1.0)
Note that I've seen the same question on SO but no real solution. The posted answer on the other question says to use the REST API instead.
I believe the error is in the endpoint (as stated in the error), and I've tried a few variants - with the slash, without, using an environment variable, without, I've tried appending various strings to my endpoint but I keep getting the same message. Any help is appreciated.
Full error here:
Traceback (most recent call last):
File "GetError.py", line 15, in <module>
tallowresult = predict.detect_image(project_id, "test1", image_data)
File "C:\Users\paul.barbin\PycharmProjects\hw3\.venv\lib\site-packages\azure\cognitiveservices\vision\customvision\prediction\operations\_custom_vision_
prediction_client_operations.py", line 354, in detect_image
request = self._client.post(url, query_parameters, header_parameters, form_content=form_data_content)
File "C:\Users\paul.barbin\PycharmProjects\hw3\.venv\lib\site-packages\msrest\service_client.py", line 193, in post
request = self._request('POST', url, params, headers, content, form_content)
File "C:\Users\paul.barbin\PycharmProjects\hw3\.venv\lib\site-packages\msrest\service_client.py", line 108, in _request
request = ClientRequest(method, self.format_url(url))
File "C:\Users\paul.barbin\PycharmProjects\hw3\.venv\lib\site-packages\msrest\service_client.py", line 155, in format_url
base = self.config.base_url.format(**kwargs).rstrip('/')
KeyError: 'Endpoint'
CustomVisionPredictionClient takes two required, positional parameters: endpoint and credentials. Endpoint needs to be passed in before credentials, try swapping the order:
predict = CustomVisionPredictionClient(endpoint, prediction_key)
I need to communicate with a scope, Agilent Infiniium DCA-J 86100C, with python 2.7. The company Keysight offers various python code, although I'm trying to run one of them to help me learn but it crashed. I'm using GPIB and pyvisa for the connection.
I've already tried to change to termination characters but it didn't change anything. I'm not sure what band rate I could try.
SCOPE_VISA_ADDRESS = "GPIB0::7::INSTR"
rm = visa.ResourceManager('C:\\Windows\\System32\\visa32.dll')
KsInfiniiVisionX = rm.open_resource(SCOPE_VISA_ADDRESS)
KsInfiniiVisionX.clear()
KsInfiniiVisionX.query(':SYSTEM:DSP "";*OPC?')
KsInfiniiVisionX.write(":HARDcopy:INKSaver OFF")
KsInfiniiVisionX.write(":DISPlay:DATA? PNG,SCReen,COLor")
my_image = KsInfiniiVisionX.read_raw()
Traceback (most recent call last):
File "X:\...\Get_screen_image_VISA_Python_modified\InfiniiVision_Save_ScreenShot_to_PC_Python-2.7_modified.py", line 201, in <module>
my_image = KsInfiniiVisionX.read_raw()
File "C:\Python27\lib\site-packages\pyvisa\resources\messagebased.py", line 306, in read_raw
chunk, status = self.visalib.read(self.session, size)
File "C:\Python27\lib\site-packages\pyvisa\ctwrapper\functions.py", line 1582, in read
ret = library.viRead(session, buffer, count, byref(return_count))
File "C:\Python27\lib\site-packages\pyvisa\ctwrapper\highlevel.py", line 188, in _return_handler
raise errors.VisaIOError(ret_value)
VisaIOError: VI_ERROR_TMO (-1073807339): Timeout expired before operation completed.
I was able to get help. The goal was to take a screenshot of the display on the scope and save this screenshot to a connected PC. The picture had to be modify before being saved. Also, the reason why the function ".read_raw()" wouldn't work is that I had to do an *OPC before but only in a .write() command not in a .query().
KsInfiniiVisionX.write('DISK:SIMAGE "D:\User Files\screen images\TEST.jpg",SCR,INV')
KsInfiniiVisionX.write('*OPC?')
complete = KsInfiniiVisionX.read()
KsInfiniiVisionX.write('DISK:BFILE? "D:\User Files\screen images\TEST.jpg"')
my_image = KsInfiniiVisionX.read_raw()
dum = (my_image[0:1].decode())
length = int(my_image[1:2].decode())
size = int(my_image[2:2+length].decode())
search = dum+str(length)+str(size)
my_file=my_image.partition(search.encode())
base_directory = "X:\\..."
target = open(base_directory + '{}.jpg'.format(file_name), 'wb')
target.write(my_file[2])
target.close()
Unfortunately, I am not an expert so I can't explain WHY it works.
I'm using Python to retrieve a Blob image from Azure storage and then send it to Custom Vision for a prediction.
This is the code:
import io
from azure.storage.blob import BlockBlobService
from azure.cognitiveservices.vision.customvision.prediction import CustomVisionPredictionClient
block_blob_service = BlockBlobService(
account_name=account_name,
account_key=account_key
)
fp = io.BytesIO()
block_blob_service.get_blob_to_stream(
container_name,
blob_name,
fp,
max_connections=2
)
predictor = CustomVisionPredictionClient(
cv_prediction_key,
endpoint=cv_endpoint
)
# This call breaks with the below error message
results = predictor.predict_image(
cv_project_id,
image_data.getvalue(),
iteration_id=cv_iteration_id
)
However, executing the predict_image function results in the following error:
System.Private.CoreLib: Exception while executing function: Functions.ReloadPostgres. System.Private.CoreLib: Result: Failure
Exception: HttpOperationError: Operation returned an invalid status code 'Resource Not Found'
Stack: File "~/.local/share/virtualenvs/py_func_app-GVYYSfCn/lib/python3.6/site-packages/azure/functions_worker/dispatcher.py", line 288, in _handle__invocation_request
self.__run_sync_func, invocation_id, fi.func, args)
File "~/.pyenv/versions/3.6.8/lib/python3.6/concurrent/futures/thread.py", line 56, in run
result = self.fn(*self.args, **self.kwargs)
File "~/.local/share/virtualenvs/py_func_app-GVYYSfCn/lib/python3.6/site-packages/azure/functions_worker/dispatcher.py", line 347, in __run_sync_func
return func(**params)
File "~/py_func_app/ReloadPostgres/__init__.py", line 14, in main
data_handler.fetch_prediction_data()
File "~/py_func_app/Shared_Code/data_handler.py", line 127, in fetch_prediction_data
cv_handler.predict_image(image_data.getvalue(), cv_model)
File "~/py_func_app/Shared_Code/custom_vision.py", line 30, in predict_image
raise e
File "~/py_func_app/Shared_Code/custom_vision.py", line 26, in predict_image
iteration_id=cv_model.cv_iteration_id
File "~/.local/share/virtualenvs/py_func_app-GVYYSfCn/lib/python3.6/site-packages/azure/cognitiveservices/vision/customvision/prediction/custom_vision_prediction_client.py", line 215, in predict_image
raise HttpOperationError(self._deserialize, response)
Here in below i am providing similar example using custom vision prediction using image URL, you can change it to image file :
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 19 11:04:54 2019
#author: moverm
"""
#from azure.storage.blob import BlockBlobService
from azure.cognitiveservices.vision.customvision.prediction import CustomVisionPredictionClient
#block_blob_service = BlockBlobService(
# account_name=account_name,
# account_key=account_key
#)
#
#fp = io.BytesIO()
#block_blob_service.get_blob_to_stream(
# container_name,
# blob_name,
# fp,
# max_connections=2
#)
predictor = CustomVisionPredictionClient(
"prediction-key",
endpoint="https://southcentralus.api.cognitive.microsoft.com"
)
# This call breaks with the below error message
#results = predictor.predict_image(
# 'prediction-key',
# image_data.getvalue(),
# iteration_id=cv_iteration_id
#)
test_img_url = "https://pointsprizes-blog.s3-accelerate.amazonaws.com/316.jpg"
results = predictor.predict_image_url("project-Id", "Iteration-Id", url=test_img_url)
# Display the results.
for prediction in results.predictions:
print ("\t" + prediction.tag_name + ": {0:.2f}%".format(prediction.probability * 100))
Basically issue is related to endpoint.Use https://southcentralus.api.cognitive.microsoft.com for an endpoint.
It should work, and you should be able to see the prediction probability.
Hope it helps.
I tried to reproduce your issue and got a similar issue, which was caused by using the incorrect endpoint from Azure portal when I created a Cognitive Service on the region of Janpa East, as the figure below.
As the figure above shown, the endpoint is https://japaneast.api.cognitive.microsoft.com/customvision/training/v1.0 for version 1, but the azure-cognitiveservices-vision-customvision PyPI page points out the corrent endpoint which should be https://{AzureRegion}.api.cognitive.microsoft.com as the figure below.
So I got the similar issue with yours if using the incorrent endpoint, as below. My code used is the same as yours, the only difference is the running environment which yours is on Azure Functions, but mine is a console script.
Meanwhile, according to the source code custom_vision_prediction_client.py of Azure Cognitive Service SDK for Custom Vision, you can see the code base_url = '{Endpoint}/customvision/v2.0/Prediction' to concat your passed endpoint with /customvision/v2.0/Prediction to generate the real endpoint for calling prediction api.
Therefore, as #MohitVerma-MSFT said, using https://<your cognitive service region>.api.cognitive.microsoft.com for the current version of Python package.
Additional notes as below, there is an announce of important update for customvision.ai you need to know, it may make effect for your current code working soon after.
I've been getting some error when trying to send a batch of audio data to the Google speech api for transcribing. Sometimes it works, and sometimes it doesn't. When it doesn't work, I get an error of the form:
Traceback (most recent call last):
File "/Users/mihaileric/Documents/Research/Ford Project/forddialogue/util/record_and_transcribe_audio.py", line 196, in <module>
listen_for_speech()
File "/Users/mihaileric/Documents/Research/Ford Project/forddialogue/util/record_and_transcribe_audio.py", line 163, in listen_for_speech
transcribe_audio(filename)
File "/Users/mihaileric/Documents/Research/Ford Project/forddialogue/util/record_and_transcribe_audio.py", line 57, in transcribe_audio
for response in responses:
File "/usr/local/lib/python2.7/site-packages/grpc/_channel.py", line 366, in next
return self._next()
File "/usr/local/lib/python2.7/site-packages/grpc/_channel.py", line 357, in _next
raise self
grpc._channel._Rendezvous: <_Rendezvous of RPC that terminated with (StatusCode.INVALID_ARGUMENT, Invalid 'audio_content': too long.)>
The most relevant chunk of code is probably the following:
client = speech.SpeechClient()
# [START migration_streaming_request]
with io.open(audio_file, 'rb') as audio_file:
content = audio_file.read()
# In practice, stream should be a generator yielding chunks of audio data.
stream = [content]
requests = (types.StreamingRecognizeRequest(audio_content=chunk)
for chunk in stream)
config = types.RecognitionConfig(
encoding=enums.RecognitionConfig.AudioEncoding.LINEAR16,
sample_rate_hertz=RATE,
language_code='en-US')
streaming_config = types.StreamingRecognitionConfig(config=config)
# streaming_recognize returns a generator.
# [START migration_streaming_response]
responses = client.streaming_recognize(streaming_config, requests)
print "Responses: ", responses
for response in responses: <-- FAILS HERE
for result in response.results:
print('Finished: {}'.format(result.is_final))
print('Stability: {}'.format(result.stability))
alternatives = result.alternatives
for alternative in alternatives:
print('Confidence: {}'.format(alternative.confidence))
print('Transcript: {}'.format(alternative.transcript))
`
The thing that confuses me is that the audio data that I send is not extremely long, never longer than ~15 sec. I use a sampling rate of 16000 on single channel audio, and the file written is ".wav". This also has been a difficult bug to find possible solutions for on Google, since it doesn't seem other people have had trouble with this. I would appreciate any leads on what I should look at for possible sources of error. Thanks!