NoBrokersAvailable: NoBrokersAvailable Error in Kafka - python

I've stumbled upon a 'NoBrokersAvailable: NoBrokersAvailable'-error in our Jupyter-notebook using this code:
from kafka import KafkaProducer
from kafka.errors import KafkaError
def on_send_success(record_metadata):
print(record_metadata.topic)
print(record_metadata.partition)
print(record_metadata.offset)
def on_send_error(excp):
log.error('I am an errback', exc_info=excp)
# handle exception
producer = KafkaProducer(bootstrap_servers=['localhost:9092'], value_serializer=lambda m: json.dumps(m).encode('utf-8'))
INTERVAL =10
while True:
data_points = get_realtime_stock('AAPL')
data = {'updated_on': data_points['updated_on'], 'ticker': data_points['security']['ticker'] ,'last_price': data_points['last_price']}
message = data_points
producer.send('data1', value=data).add_callback(on_send_success).add_errback(on_send_error)
time.sleep(INTERVAL)
Here the respective error:
---------------------------------------------------------------------------
NoBrokersAvailable Traceback (most recent call last)
<ipython-input-8-cab724428b84> in <module>
11 # handle exception
12
---> 13 producer = KafkaProducer(bootstrap_servers=['localhost:9092'], value_serializer=lambda m: json.dumps(m).encode('utf-8'))
14 INTERVAL =10
15 while True:
~/anaconda3/lib/python3.7/site-packages/kafka/producer/kafka.py in __init__(self, **configs)
379 client = KafkaClient(metrics=self._metrics, metric_group_prefix='producer',
380 wakeup_timeout_ms=self.config['max_block_ms'],
--> 381 **self.config)
382
383 # Get auto-discovered version from client if necessary
~/anaconda3/lib/python3.7/site-packages/kafka/client_async.py in __init__(self, **configs)
237 if self.config['api_version'] is None:
238 check_timeout = self.config['api_version_auto_timeout_ms'] / 1000
--> 239 self.config['api_version'] = self.check_version(timeout=check_timeout)
240
241 def _can_bootstrap(self):
~/anaconda3/lib/python3.7/site-packages/kafka/client_async.py in check_version(self, node_id, timeout, strict)
890 else:
891 self._lock.release()
--> 892 raise Errors.NoBrokersAvailable()
893
894 def wakeup(self):
NoBrokersAvailable: NoBrokersAvailable
The code worked just fine but out of nowhere it just stopped working for whatever reason.
Does anyone know what the problem might be?

I had the same error and I solved it by specifying the API version on the function KafkaProducer. Here is a sample from my code.
Please specify the version of your kafka-python library if the error persists.
producer = KafkaProducer(
bootstrap_servers=#####,
client_id=######,
value_serializer=JsonSerializer.serialize,
api_version=(0, 10, 1)
)
For the API version, you should put your Kafka version.

Related

how to prevent ratelimiterror exchangelib python

Im pulling daily all the emails from some 8 different postboxes via exchangelib. I was using it the whole week but now the code seems to be throttled by the exchangeserver as the error below gets thrown out while its trying to grab the first email. So I want to learn how to handle it and dont get throttled anymore. I already implemented one retry policy
credentials = Credentials(username='username', password='password')
config = Configuration(retry_policy=FaultTolerance(max_wait=600), credentials=credentials)
for that im using following code:
while True:
try:
for shared_postbox in tqdm(shared_postboxes):
account = Account(shared_postbox, credentials=credentials, config = config, autodiscover=True)
top_folder = account.root
email_folders = [f for f in top_folder.walk() if isinstance(f, Messages)]
for folder in tqdm(email_folders):
#added item_class in filter and removed order by
#for m in folder.all().only('text_body', 'datetime_received', "sender").filter(datetime_received__range=(start_of_month,end_of_month), sender__exists=True):
#when since statement is needed
for m in folder.all().only('text_body', 'datetime_received', "sender").filter(datetime_received__gt=midnight, sender__exists=True):
try:
senderdomain = ExtractingDomain(m.sender.email_address)
senderdomains.append(senderdomain)
except:
print("could not extract domain")
else:
if senderdomain in domains_of_interest:
postboxname = account.identity.primary_smtp_address
body = m.text_body
emails.append(body)
sender.append(senderdomain)
postbox.append(postboxname)
received.append(m.datetime_received)
#else:
# print("nicht in domains of interest")
account.protocol.close()
except RateLimitError as e:
time.sleep(60)
following error code i get:
RateLimitError Traceback (most recent call last)
Input In [4], in <cell line: 77>()
81 account = Account(shared_postbox, credentials=credentials, config = config, autodiscover=True)
---> 82 top_folder = account.root
83 email_folders = [f for f in top_folder.walk() if isinstance(f, Messages)]
File ~\.conda\envs\python383\lib\site-packages\cached_property.py:74, in threaded_cached_property.__get__(self, obj, cls)
72 except KeyError:
73 # if not, do the calculation and release the lock
---> 74 return obj_dict.setdefault(name, self.func(obj))
File ~\.conda\envs\python383\lib\site-packages\exchangelib\account.py:349, in Account.root(self)
347 #threaded_cached_property
348 def root(self):
--> 349 return Root.get_distinguished(account=self)
File ~\.conda\envs\python383\lib\site-packages\exchangelib\folders\roots.py:114, in RootOfHierarchy.get_distinguished(cls, account)
113 try:
--> 114 return cls.resolve(
115 account=account, folder=cls(account=account, name=cls.DISTINGUISHED_FOLDER_ID, is_distinguished=True)
116 )
117 except MISSING_FOLDER_ERRORS:
File ~\.conda\envs\python383\lib\site-packages\exchangelib\folders\base.py:512, in BaseFolder.resolve(cls, account, folder)
509 #classmethod
510 def resolve(cls, account, folder):
511 # Resolve a single folder
--> 512 folders = list(FolderCollection(account=account, folders=[folder]).resolve())
513 if not folders:
File ~\.conda\envs\python383\lib\site-packages\exchangelib\folders\collections.py:335, in FolderCollection.resolve(self)
334 additional_fields = self.get_folder_fields(target_cls=self._get_target_cls())
--> 335 yield from self.__class__(account=self.account, folders=resolveable_folders).get_folders(
336 additional_fields=additional_fields
337 )
File ~\.conda\envs\python383\lib\site-packages\exchangelib\folders\collections.py:403, in FolderCollection.get_folders(self, additional_fields)
399 additional_fields.update(
400 (FieldPath(field=BaseFolder.get_field_by_fieldname(f)) for f in self.REQUIRED_FOLDER_FIELDS)
401 )
--> 403 yield from GetFolder(account=self.account).call(
404 folders=self.folders,
405 additional_fields=additional_fields,
406 shape=ID_ONLY,
407 )
File ~\.conda\envs\python383\lib\site-packages\exchangelib\services\get_folder.py:43, in GetFolder._elems_to_objs(self, elems)
42 def _elems_to_objs(self, elems):
---> 43 for folder, elem in zip(self.folders, elems):
44 if isinstance(elem, Exception):
File ~\.conda\envs\python383\lib\site-packages\exchangelib\services\common.py:246, in EWSService._chunked_get_elements(self, payload_func, items, **kwargs)
245 log.debug("Processing chunk %s containing %s items", i, len(chunk))
--> 246 yield from self._get_elements(payload=payload_func(chunk, **kwargs))
File ~\.conda\envs\python383\lib\site-packages\exchangelib\services\common.py:266, in EWSService._get_elements(self, payload)
263 try:
264 # Create a generator over the response elements so exceptions in response elements are also raised
265 # here and can be handled.
--> 266 yield from self._response_generator(payload=payload)
267 return
File ~\.conda\envs\python383\lib\site-packages\exchangelib\services\common.py:228, in EWSService._response_generator(self, payload)
223 """Send the payload to the server, and return the response.
224
225 :param payload: payload as an XML object
226 :return: the response, as XML objects
227 """
--> 228 response = self._get_response_xml(payload=payload)
229 if self.supports_paging:
File ~\.conda\envs\python383\lib\site-packages\exchangelib\services\common.py:343, in EWSService._get_response_xml(self, payload, **parse_opts)
342 log.debug("Trying API version %s", api_version)
--> 343 r = self._get_response(payload=payload, api_version=api_version)
344 if self.streaming:
345 # Let 'requests' decode raw data automatically
File ~\.conda\envs\python383\lib\site-packages\exchangelib\services\common.py:298, in EWSService._get_response(self, payload, api_version)
297 session = self.protocol.get_session()
--> 298 r, session = post_ratelimited(
299 protocol=self.protocol,
300 session=session,
301 url=self.protocol.service_endpoint,
302 headers=self._extra_headers(session),
303 data=wrap(
304 content=payload,
305 api_version=api_version,
306 account_to_impersonate=self._account_to_impersonate,
307 timezone=self._timezone,
308 ),
309 stream=self.streaming,
310 timeout=self.timeout or self.protocol.TIMEOUT,
311 )
312 self._handle_response_cookies(session)
File ~\.conda\envs\python383\lib\site-packages\exchangelib\util.py:880, in post_ratelimited(protocol, session, url, headers, data, allow_redirects, stream, timeout)
879 total_wait = time.monotonic() - t_start
--> 880 if protocol.retry_policy.may_retry_on_error(response=r, wait=total_wait):
881 r.close() # Release memory
File ~\.conda\envs\python383\lib\site-packages\exchangelib\protocol.py:780, in FaultTolerance.may_retry_on_error(self, response, wait)
778 if wait > self.max_wait:
779 # We lost patience. Session is cleaned up in outer loop
--> 780 raise RateLimitError(
781 "Max timeout reached", url=response.url, status_code=response.status_code, total_wait=wait
782 )
783 if response.status_code == 401:
784 # EWS sometimes throws 401's when it wants us to throttle connections. OK to retry.
RateLimitError: Max timeout reached (gave up after 634.031 seconds. URL https://outlook.office365.com/EWS/Exchange.asmx returned status code 401)
When I looked into it, I saw that exchangelib has a function to handle the throttle policy but I don't know to implement it. could the function
def post_ratelimited(protocol, session, url, headers, data, stream=False, timeout=None)
help me in this case? I found this function in their documentation.
You defined a policy the tells exchangelib to retry up to 600 seconds. The code threw an exception after waiting for more than 600 seconds. That's how it's supposed to work.
If you want the code to retry for a longer period, then increase the max_wait value.
Guide to EWS throttling and how to handle it is here: https://learn.microsoft.com/en-us/exchange/client-developer/exchange-web-services/ews-throttling-in-exchange

How to onnect to a LDAP server using Python (Version 3.8.8)

I need to connect to a LDAP server using Python (Version 3.8.8) and I've already tried to replicate some examples such as those showed in the link bellow but none of them are working for me. I either get (TypeError: iter() returned non-iterator of type 'NoneType') or (PyAsn1Error: Attempted "__iter__" operation on ASN.1 schema object). It seems like ldap3 and python-ldap used in the examples were not updated to work with python 3.8.8. I would be very pleased if anyone could give me a real example of another existing library to connect a LPDAP server or help me with this issue.
(1) https://sixfeetup.com/blog/new-ldap3-python-ldap-library
(2) https://ldap3.readthedocs.io/en/latest/tutorial_intro.html
(3) https://stackoverflow.com/questions/58907026/ldap3-bind-failed-when-cn-and-displayname-are-different
my test error for the example in link 3:
from ldap3 import Server, Connection
server = Server('ipa.demo1.freeipa.org')
conn = Connection(server)
conn.bind()
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
ipython-input-4-470896b147da> in <module>
2 server = Server('ipa.demo1.freeipa.org')
3 conn = Connection(server)
----> 4 conn.bind()
C:\ProgramData\Anaconda3\lib\site-packages\ldap3\core\connection.py in bind(self, read_server_info, controls)
418 if log_enabled(PROTOCOL):
419 log(PROTOCOL, 'anonymous BIND request <%s> sent via <%s>', bind_request_to_dict(request), self)
--> 420 response = self.post_send_single_response(self.send('bindRequest', request, controls))
421 elif self.authentication == SIMPLE:
422 if log_enabled(PROTOCOL):
C:\ProgramData\Anaconda3\lib\site-packages\ldap3\strategy\sync.py in post_send_single_response(self, message_id)
120 Returns the result message or None
121 """
--> 122 responses, result = self.get_response(message_id)
123 self.connection.result = result
124 if result['type'] == 'intermediateResponse': # checks that all responses are intermediates (there should be only one)
C:\ProgramData\Anaconda3\lib\site-packages\ldap3\strategy\base.py in get_response(self, message_id, timeout)
296 if self._outstanding and message_id in self._outstanding:
297 while timeout >= 0: # waiting for completed message to appear in responses
--> 298 responses = self._get_response(message_id)
299 if not responses:
300 sleep(RESPONSE_SLEEPTIME)
C:\ProgramData\Anaconda3\lib\site-packages\ldap3\strategy\sync.py in _get_response(self, message_id)
166 log(EXTENDED, 'ldap message received via <%s>:%s', self.connection, format_ldap_message(ldap_resp, '<<'))
167 if int(ldap_resp['messageID']) == message_id:
--> 168 dict_response = self.decode_response(ldap_resp)
169 ldap_responses.append(dict_response)
170 if dict_response['type'] not in ['searchResEntry', 'searchResRef', 'intermediateResponse']:
C:\ProgramData\Anaconda3\lib\site-packages\ldap3\strategy\base.py in decode_response(self, ldap_message)
401 if message_type == 'bindResponse':
402 if not bytes(component['matchedDN']).startswith(b'NTLM'): # patch for microsoft ntlm authentication
--> 403 result = bind_response_to_dict(component)
404 else:
405 result = sicily_bind_response_to_dict(component)
C:\ProgramData\Anaconda3\lib\site-packages\ldap3\operation\bind.py in bind_response_to_dict(response)
116 'dn': str(response['matchedDN']),
117 'message': str(response['diagnosticMessage']),
--> 118 'referrals': referrals_to_list(response['referral']),
119 'saslCreds': bytes(response['serverSaslCreds']) if response['serverSaslCreds'] is not None else None}
120
C:\ProgramData\Anaconda3\lib\site-packages\ldap3\protocol\convert.py in referrals_to_list(referrals)
42
43 def referrals_to_list(referrals):
---> 44 return [str(referral) for referral in referrals if referral] if referrals else None
45
46
C:\ProgramData\Anaconda3\lib\site-packages\pyasn1\type\base.py in __bool__(self)
572 else:
573 def __bool__(self):
--> 574 return bool(self.components)
575
576 #property
C:\ProgramData\Anaconda3\lib\site-packages\pyasn1\type\univ.py in components(self)
1958 def components(self):
1959 return [self._componentValues[idx]
-> 1960 for idx in sorted(self._componentValues)]
1961
1962 def clear(self):
TypeError: iter() returned non-iterator of type 'NoneType'
It was a problem of library version... I ran the "pip install ldap3 upgrade" command and it worked fine

run crypto feed in a concurrent thread: There is no current event loop in thread 'ThreadPoolExecutor-0_0'

I'm trying to use crypto feed to download data concurrently.
f = FeedHandler()
f.add_feed(Gateio(channels=[TRADES], symbols=list_tmp, callbacks={ TRADES: TradePostgresGateio(**postgres_cfg)}))
f.run()
This code above can be run successfully. However, I am trying to run it in the background. So I am using concurrent futures to help.
executor = concurrent.futures.ThreadPoolExecutor(16)
job2 = executor.submit(f.run)
However, I got error:
job2.result()
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-54-f96e35ee3c66> in <module>
----> 1 job2.result()
~/anaconda3/lib/python3.8/concurrent/futures/_base.py in result(self, timeout)
430 raise CancelledError()
431 elif self._state == FINISHED:
--> 432 return self.__get_result()
433
434 self._condition.wait(timeout)
~/anaconda3/lib/python3.8/concurrent/futures/_base.py in __get_result(self)
386 def __get_result(self):
387 if self._exception:
--> 388 raise self._exception
389 else:
390 return self._result
~/anaconda3/lib/python3.8/concurrent/futures/thread.py in run(self)
55
56 try:
---> 57 result = self.fn(*self.args, **self.kwargs)
58 except BaseException as exc:
59 self.future.set_exception(exc)
~/anaconda3/lib/python3.8/site-packages/cryptofeed/feedhandler.py in run(self, start_loop, install_signal_handlers, exception_handler)
145 raise ValueError(txt)
146
--> 147 loop = asyncio.get_event_loop()
148 # Good to enable when debugging or without code change: export PYTHONASYNCIODEBUG=1)
149 # loop.set_debug(True)
~/anaconda3/lib/python3.8/asyncio/events.py in get_event_loop(self)
637
638 if self._local._loop is None:
--> 639 raise RuntimeError('There is no current event loop in thread %r.'
640 % threading.current_thread().name)
641
RuntimeError: There is no current event loop in thread 'ThreadPoolExecutor-0_0'.
Could anyone help me? Thanks so much!
Edit: following
def threadable():
f = FeedHandler()
f.add_feed(Gateio(channels=[TRADES], symbols=list_tmp, callbacks={ TRADES: TradePostgresGateio(**postgres_cfg)}))
f.run()
executor = concurrent.futures.ThreadPoolExecutor(16)
job2 = executor.submit(threadable)
job2.done()
job2.result()
I got the error: It seems I still got the same error about event loop... is it solvable?
RuntimeError Traceback (most recent call last)
<ipython-input-47-05c023dd326f> in <module>
11 job2.done()
12
---> 13 job2.result()
~/anaconda3/lib/python3.8/concurrent/futures/_base.py in result(self, timeout)
437 raise CancelledError()
438 elif self._state == FINISHED:
--> 439 return self.__get_result()
440 else:
441 raise TimeoutError()
~/anaconda3/lib/python3.8/concurrent/futures/_base.py in __get_result(self)
386 def __get_result(self):
387 if self._exception:
--> 388 raise self._exception
389 else:
390 return self._result
~/anaconda3/lib/python3.8/concurrent/futures/thread.py in run(self)
55
56 try:
---> 57 result = self.fn(*self.args, **self.kwargs)
58 except BaseException as exc:
59 self.future.set_exception(exc)
<ipython-input-47-05c023dd326f> in threadable()
2 f = FeedHandler()
3 f.add_feed(Gateio(channels=[TRADES], symbols=list_tmp, callbacks={ TRADES: TradePostgresGateio(**postgres_cfg)}))
----> 4 f.run()
5
6
~/anaconda3/lib/python3.8/site-packages/cryptofeed/feedhandler.py in run(self, start_loop, install_signal_handlers, exception_handler)
145 raise ValueError(txt)
146
--> 147 loop = asyncio.get_event_loop()
148 # Good to enable when debugging or without code change: export PYTHONASYNCIODEBUG=1)
149 # loop.set_debug(True)
~/anaconda3/lib/python3.8/asyncio/events.py in get_event_loop(self)
637
638 if self._local._loop is None:
--> 639 raise RuntimeError('There is no current event loop in thread %r.'
640 % threading.current_thread().name)
641
RuntimeError: There is no current event loop in thread 'ThreadPoolExecutor-1_0'.
In the single-threaded version of your code, all three of these statements execute in the same thread in a simple sequential fashion:
f = FeedHandler()
f.add_feed(Gateio(channels=[TRADES], symbols=list_tmp, callbacks={ TRADES: TradePostgresGateio(**postgres_cfg)}))
f.run()
In the multithreaded version, you submit only the last line to the Executor, and therefore it will run in a secondary thread. But these statements, as far as I can tell from the code you provided, still execute in the main thread:
f = FeedHandler()
f.add_feed(Gateio(channels=[TRADES], symbols=list_tmp, callbacks={ TRADES: TradePostgresGateio(**postgres_cfg)}))
How do you know that will work? In general it would depend on the implementation details of Gateio and Feedhandler. You need to be very careful about chopping up a program into pieces to be run in different threads, especially when third-party library calls are involved. So, good luck with that.
You could try this:
def threadable():
f = FeedHandler()
f.add_feed(Gateio(channels=[TRADES], symbols=list_tmp, callbacks={ TRADES: TradePostgresGateio(**postgres_cfg)}))
f.run()
...
executor = concurrent.futures.ThreadPoolExecutor(16)
job2 = executor.submit(threadable)
Then, at least, your entire sequence of steps will execute in the SAME thread.
I would be worried about those callbacks, however. They will now run in the secondary thread, and you need to understand the consequences of that. Do they interact with a user interface program? Your UI may not support multithreading.
The use of the Executor protocol is a bit weird here, since your function doesn't return a value. The Executors are most useful when they are used to aggregate returned values. You may be better off just launching the threads you need using methods in the threading module.

Range query in Redisearch with Python client

I'm trying to query a range of values in Redisearch with the python client but it's not reading the space in between the values correctly. Any thoughts on how to fix?
conn = redis.Redis(host='localhost', port=6379, db=0)
q = 'FT.SEARCH idx "#date:[20200101 20200301]" LIMIT 0 100'
conn.execute_command(q)
Throws the error:
--------------------------------------------------------------------------
ResponseError Traceback (most recent call last)
<ipython-input-31-5321b184194e> in <module>
2 q = f'''FT.SEARCH idx "#date:[20200101 20200301]" LIMIT 0 1000000'''
3
----> 4 res = conn.execute_command(q)
5 print(res)
C:\Miniconda3\envs\ssnc\lib\site-packages\redis\client.py in execute_command(self, *args, **options)
899 try:
900 conn.send_command(*args)
--> 901 return self.parse_response(conn, command_name, **options)
902 except (ConnectionError, TimeoutError) as e:
903 conn.disconnect()
C:\Miniconda3\envs\ssnc\lib\site-packages\redis\client.py in parse_response(self, connection, command_name, **options)
913 "Parses a response from the Redis server"
914 try:
--> 915 response = connection.read_response()
916 except ResponseError:
917 if EMPTY_RESPONSE in options:
C:\Miniconda3\envs\ssnc\lib\site-packages\redis\connection.py in read_response(self)
754
755 if isinstance(response, ResponseError):
--> 756 raise response
757 return response
758
ResponseError: Unknown argument `20200301]"` at position 1 for <main>
Try passing the command separately from the arguments. Here's an example:
conn.execute_command('ft.search', 'books-idx', '#average_rating:[0 1]')
We also have a dedicated Python library for RediSearch built on top of redis-py: https://github.com/RediSearch/redisearch-py

ServiceUnavailable: 503 failed to connect to all addresses during attempt to recognize audio

I'm trying to run a following code to recognize an audio file. The code is just a compilation from different official examples. But it doesn't work.
import os
from google.cloud import speech
from google.cloud.speech import enums
from google.cloud.speech import types
from google.oauth2 import service_account
import io
def transcribe_file(speech_file):
client = speech.SpeechClient(credentials=credentials)
with io.open(speech_file, 'rb') as audio_file:
content = audio_file.read()
audio = types.RecognitionAudio(content=content)
config = types.RecognitionConfig(
encoding=enums.RecognitionConfig.AudioEncoding.LINEAR16,
sample_rate_hertz=16000,
language_code='ru-RU')
response = client.long_running_recognize(config, audio)
for result in response.results:
print(u'Transcript: {}'.format(result.alternatives[0].transcript))
audio_folder_path = 'data_wav'
all_audios = os.listdir(audio_folder_path)
file_name = os.path.join(audio_folder_path, all_audios[0])
credentials = service_account.Credentials.from_service_account_file("google_aut.json")
transcribe_file(file_name)
I use Anaconda 4.7.12 for Python 3.7 under Windows 10, google-cloud-speech v 1.2.0, google-auth v 1.6.3
The error I get every time is
_Rendezvous Traceback (most recent call last)
~\AppData\Local\Continuum\anaconda3\lib\site-packages\google\api_core\grpc_helpers.py
in error_remapped_callable(*args, **kwargs)
56 try:
---> 57 return callable_(*args, **kwargs)
58 except grpc.RpcError as exc:
~\AppData\Local\Continuum\anaconda3\lib\site-packages\grpc_channel.py
in call(self, request, timeout, metadata, credentials,
wait_for_ready, compression)
564 wait_for_ready, compression)
--> 565 return _end_unary_response_blocking(state, call, False, None)
566
~\AppData\Local\Continuum\anaconda3\lib\site-packages\grpc_channel.py
in _end_unary_response_blocking(state, call, with_call, deadline)
466 else:
--> 467 raise _Rendezvous(state, None, None, deadline)
468
_Rendezvous: <_Rendezvous of RPC that terminated with:
status = StatusCode.UNAVAILABLE
details = "failed to connect to all addresses"
debug_error_string = "{"created":"#1569838382.864000000","description":"Failed to pick
subchannel","file":"src/core/ext/filters/client_channel/client_channel.cc","file_line":3818,"referenced_errors":[{"created":"#1569838382.863000000","description":"failed
to connect to all
addresses","file":"src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc","file_line":395,"grpc_status":14}]}"
>
The above exception was the direct cause of the following exception:
ServiceUnavailable Traceback (most recent call
last) in
----> 1 transcribe_file(file_name)
in transcribe_file(speech_file)
20
21 # [START speech_python_migration_sync_response]
---> 22 response = client.long_running_recognize(config, audio)
23 # [END speech_python_migration_sync_request]
24 # Each result is for a consecutive portion of the audio. Iterate through
~\AppData\Local\Continuum\anaconda3\lib\site-packages\google\cloud\speech_v1\gapic\speech_client.py
in long_running_recognize(self, config, audio, retry, timeout,
metadata)
339 )
340 operation = self._inner_api_calls["long_running_recognize"](
--> 341 request, retry=retry, timeout=timeout, metadata=metadata
342 )
343 return google.api_core.operation.from_gapic(
~\AppData\Local\Continuum\anaconda3\lib\site-packages\google\api_core\gapic_v1\method.py
in call(self, *args, **kwargs)
141 kwargs["metadata"] = metadata
142
--> 143 return wrapped_func(*args, **kwargs)
144
145
~\AppData\Local\Continuum\anaconda3\lib\site-packages\google\api_core\retry.py
in retry_wrapped_func(*args, **kwargs)
271 sleep_generator,
272 self._deadline,
--> 273 on_error=on_error,
274 )
275
~\AppData\Local\Continuum\anaconda3\lib\site-packages\google\api_core\retry.py
in retry_target(target, predicate, sleep_generator, deadline,
on_error)
180 for sleep in sleep_generator:
181 try:
--> 182 return target()
183
184 # pylint: disable=broad-except
~\AppData\Local\Continuum\anaconda3\lib\site-packages\google\api_core\timeout.py
in func_with_timeout(*args, **kwargs)
212 """Wrapped function that adds timeout."""
213 kwargs["timeout"] = next(timeouts)
--> 214 return func(*args, **kwargs)
215
216 return func_with_timeout
~\AppData\Local\Continuum\anaconda3\lib\site-packages\google\api_core\grpc_helpers.py
in error_remapped_callable(*args, **kwargs)
57 return callable_(*args, **kwargs)
58 except grpc.RpcError as exc:
---> 59 six.raise_from(exceptions.from_grpc_error(exc), exc)
60
61 return error_remapped_callable
~\AppData\Local\Continuum\anaconda3\lib\site-packages\six.py in
raise_from(value, from_value)
ServiceUnavailable: 503 failed to connect to all addresses
How can I fix it?
This could be failing due to the credentials. Let's try few things:
Ensure that your service account key is correct, you should have something like this:
from google.oauth2 import service_account
credentials = service_account.Credentials. from_service_account_file('service_account_key.json')
speech = speech.SpeechClient(credentials=credentials)
OR
speech = speech_v1.SpeechClient(credentials=credentials)
Use a Scope:
credentials = service_account.Credentials.from_service_account_file(
credentials_json,
scopes=['https://www.googleapis.com/auth/cloud-platform'])
More info here.
In this thread was solve by using a single instance of a session client object for multiple requests.
This could be either a network issue as Dustin said. More info here 503 Service Unavailable
Please let us know if you manage to solve this error.

Categories