I keep getting the StopIteration note when performing my code, my output is just to test if the code works as the whole thing involves retrieving data using an api. My code is showed as followed:
import pandas as pd
import json
import requests
from socketIO_client import SocketIO
TRADING_API_URL = 'https://api-demo.fxcm.com:443'
WEBSOCKET_PORT = 443
ACCESS_TOKEN = "ba08382c61b2b35f258e8ea64dcf4928c4263053"
def on_connect():
print('websocket Connected: ' + socketIO._engineIO_session.id)
def on_close():
print('websocket Closed.')
socketIO = SocketIO(TRADING_API_URL, WEBSOCKET_PORT, params={'access_token': ACCESS_TOKEN})
socketIO.on('connect', on_connect)
socketIO.on('disconnect', on_close)
Bearer_access_token = "Bearer " + socketIO._engineIO_session.id + ACCESS_TOKEN
print(Bearer_access_token)
The notes I get are below:
---------------------------------------------------------------------------
StopIteration Traceback (most recent call last)
~\AppData\Local\Temp/ipykernel_26108/1749199291.py in <module>
14 print('websocket Closed.')
15
---> 16 socketIO = SocketIO(TRADING_API_URL, WEBSOCKET_PORT, params={'access_token': ACCESS_TOKEN})
17
18 socketIO.on('connect', on_connect)
~\anaconda3\lib\site-packages\socketIO_client\__init__.py in __init__(self, host, port, Namespace, wait_for_connection, transports, resource, hurry_interval_in_seconds, **kw)
349 self._callback_by_ack_id = {}
350 self._ack_id = 0
--> 351 super(SocketIO, self).__init__(
352 host, port, Namespace, wait_for_connection, transports,
353 resource, hurry_interval_in_seconds, **kw)
~\anaconda3\lib\site-packages\socketIO_client\__init__.py in __init__(self, host, port, Namespace, wait_for_connection, transports, resource, hurry_interval_in_seconds, **kw)
52 if Namespace:
53 self.define(Namespace)
---> 54 self._transport
55
56 # Connect
~\anaconda3\lib\site-packages\socketIO_client\__init__.py in _transport(self)
60 if self._opened:
61 return self._transport_instance
---> 62 self._engineIO_session = self._get_engineIO_session()
63 self._negotiate_transport()
64 self._connect_namespaces()
~\anaconda3\lib\site-packages\socketIO_client\__init__.py in _get_engineIO_session(self)
73 self._http_session, self._is_secure, self._url)
74 try:
---> 75 engineIO_packet_type, engineIO_packet_data = next(
76 transport.recv_packet())
77 break
StopIteration:
Is there anything I can add (or take away) from my code to help stop this issue, without affecting the desired output? Thanks
Related
I am trying to connect to an HDFS Cluster using python code, library(snakebite-py3) and I see that when I set use_sasl to True I am getting the following error:
Code Snippet:
from snakebite.client import Client
client = Client(host='hostname', port=8020,
effective_user='user', use_sasl=True)
for x in client.ls(['/']):
print(x,"\n")
Error:
---------------------------------------------------------------------------
GSSError Traceback (most recent call last)
<ipython-input-21-62c8b8df16ea> in <module>
2 from snakebite.client import Client
3
----> 4 client = Client(host='hostname',port=8020, effective_user='user', use_sasl=True)
5
6 for x in client.ls(['/test_abha']): print(x,"\n")
C:\ProgramData\Anaconda3\lib\site-packages\snakebite\client.py in __init__(self, host, port, hadoop_version, use_trash, effective_user, use_sasl, hdfs_namenode_principal, sock_connect_timeout, sock_request_timeout, use_datanode_hostname)
126 self.hdfs_namenode_principal = hdfs_namenode_principal
127 self.service_stub_class = client_proto.ClientNamenodeProtocol_Stub
--> 128 self.service = RpcService(self.service_stub_class, self.port, self.host, hadoop_version,
129 effective_user,self.use_sasl, self.hdfs_namenode_principal,
130 sock_connect_timeout, sock_request_timeout)
C:\ProgramData\Anaconda3\lib\site-packages\snakebite\service.py in __init__(self, service_stub_class, port, host, hadoop_version, effective_user, use_sasl, hdfs_namenode_principal, sock_connect_timeout, sock_request_timeout)
30
31 # Setup the RPC channel
---> 32 self.channel = SocketRpcChannel(host=self.host, port=self.port, version=hadoop_version,
33 effective_user=effective_user, use_sasl=use_sasl,
34 hdfs_namenode_principal=hdfs_namenode_principal,
C:\ProgramData\Anaconda3\lib\site-packages\snakebite\channel.py in __init__(self, host, port, version, effective_user, use_sasl, hdfs_namenode_principal, sock_connect_timeout, sock_request_timeout)
193 raise FatalException("Kerberos libs not found. Please install snakebite using 'pip install snakebite[kerberos]'")
194
--> 195 kerberos = Kerberos()
196 self.effective_user = effective_user or kerberos.user_principal()
197 else:
C:\ProgramData\Anaconda3\lib\site-packages\snakebite\kerberos.py in __init__(self)
41 class Kerberos:
42 def __init__(self):
---> 43 self.credentials = gssapi.Credentials(usage='initiate')
44
45 def user_principal(self):
C:\ProgramData\Anaconda3\lib\site-packages\gssapi\creds.py in __new__(cls, base, token, name, lifetime, mechs, usage, store)
61 base_creds = rcred_imp_exp.import_cred(token)
62 else:
---> 63 res = cls.acquire(name, lifetime, mechs, usage,
64 store=store)
65 base_creds = res.creds
C:\ProgramData\Anaconda3\lib\site-packages\gssapi\creds.py in acquire(cls, name, lifetime, mechs, usage, store)
134
135 if store is None:
--> 136 res = rcreds.acquire_cred(name, lifetime,
137 mechs, usage)
138 else:
gssapi/raw/creds.pyx in gssapi.raw.creds.acquire_cred()
GSSError: Major (851968): Unspecified GSS failure. Minor code may provide more information, Minor (39756044): Credential cache is empty
Please kindly suggest, thank you.
I was following simple py2neo tutorial here: http://nicolewhite.github.io/neo4j-jupyter/hello-world.html
Everything worked fine, all the entries appear in the neo4j in-browser version, however when I try to run inline Cypher queries, I get a 404 error.
%%cypher
http://neo4j:password#localhost:7474/db/data/
MATCH (person:Person)-[:LIKES]->(drink:Drink)
RETURN person.name, drink.name, drink.calories
Here's the traceback:
Format: (http|https)://username:password#hostname:port/db/name
---------------------------------------------------------------------------
NotFoundError Traceback (most recent call last)
<ipython-input-12-de2d5705ff61> in <module>
----> 1 get_ipython().run_cell_magic('cypher', '', 'http://neo4j:password#localhost:7474/db/data/\nMATCH (person:Person)-[:LIKES]->(drink:Drink)\nRETURN person.name, drink.name, drink.calories\n')
~/.local/lib/python3.6/site-packages/IPython/core/interactiveshell.py in run_cell_magic(self, magic_name, line, cell)
2369 with self.builtin_trap:
2370 args = (magic_arg_s, cell)
-> 2371 result = fn(*args, **kwargs)
2372 return result
2373
<decorator-gen-127> in execute(self, line, cell, local_ns)
~/.local/lib/python3.6/site-packages/IPython/core/magic.py in <lambda>(f, *a, **k)
185 # but it's overkill for just that one bit of state.
186 def magic_deco(arg):
--> 187 call = lambda f, *a, **k: f(*a, **k)
188
189 if callable(arg):
<decorator-gen-126> in execute(self, line, cell, local_ns)
~/.local/lib/python3.6/site-packages/IPython/core/magic.py in <lambda>(f, *a, **k)
185 # but it's overkill for just that one bit of state.
186 def magic_deco(arg):
--> 187 call = lambda f, *a, **k: f(*a, **k)
188
189 if callable(arg):
~/.local/lib/python3.6/site-packages/cypher/magic.py in execute(self, line, cell, local_ns)
106 user_ns.update(local_ns)
107 parsed = parse("""{0}\n{1}""".format(line, cell), self)
--> 108 conn = Connection.get(parsed['as'] or parsed['uri'], parsed['as'])
109 first_word = parsed['cypher'].split(None, 1)[:1]
110 if first_word and first_word[0].lower() == 'persist':
~/.local/lib/python3.6/site-packages/cypher/connection.py in get(cls, descriptor, alias)
45 cls.current = conn
46 else:
---> 47 cls.current = Connection(descriptor, alias)
48 if cls.current:
49 return cls.current
~/.local/lib/python3.6/site-packages/cypher/connection.py in __init__(self, connect_str, alias)
24 gdb = GraphDatabase(self.connections[connect_str])
25 else:
---> 26 gdb = GraphDatabase(connect_str)
27 alias = alias or connect_str
28 except:
~/.local/lib/python3.6/site-packages/neo4jrestclient/client.py in __init__(self, url, username, password, cert_file, key_file)
81 response_json = response.json()
82 else:
---> 83 raise NotFoundError(response.status_code, "Unable get root")
84 if "data" in response_json and "management" in response_json:
85 response = Request(**self._auth).get(response_json["data"])
NotFoundError: Code [404]: Not Found. Nothing matches the given URI.
Unable get root
I tried checking if the URI works according to this answer here, and I get 404 error there too:
~$ curl -i --user neo4j:password http://localhost:7474/db/data/
HTTP/1.1 404 Not Found
Access-Control-Allow-Origin: *
Cache-Control: must-revalidate,no-cache,no-store
Content-Type: text/html;charset=iso-8859-1
Content-Length: 0
I tried setting the Graph option to include the link, but it didn't help:
graph = Graph("http://neo4j:password#localhost:7474/db/data/")
Could you please tell me where have I made a mistake?
I am using py2neo most of the time. Here is how I connect to my local neo4j db.
from py2neo import Graph
graph = Graph("bolt://localhost:7687", auth=("neo4j", "xxxxx"))
try:
graph.run("Match () Return 1 Limit 1")
print('ok')
except Exception:
print('not ok')
I'm trying to run a following code to recognize an audio file. The code is just a compilation from different official examples. But it doesn't work.
import os
from google.cloud import speech
from google.cloud.speech import enums
from google.cloud.speech import types
from google.oauth2 import service_account
import io
def transcribe_file(speech_file):
client = speech.SpeechClient(credentials=credentials)
with io.open(speech_file, 'rb') as audio_file:
content = audio_file.read()
audio = types.RecognitionAudio(content=content)
config = types.RecognitionConfig(
encoding=enums.RecognitionConfig.AudioEncoding.LINEAR16,
sample_rate_hertz=16000,
language_code='ru-RU')
response = client.long_running_recognize(config, audio)
for result in response.results:
print(u'Transcript: {}'.format(result.alternatives[0].transcript))
audio_folder_path = 'data_wav'
all_audios = os.listdir(audio_folder_path)
file_name = os.path.join(audio_folder_path, all_audios[0])
credentials = service_account.Credentials.from_service_account_file("google_aut.json")
transcribe_file(file_name)
I use Anaconda 4.7.12 for Python 3.7 under Windows 10, google-cloud-speech v 1.2.0, google-auth v 1.6.3
The error I get every time is
_Rendezvous Traceback (most recent call last)
~\AppData\Local\Continuum\anaconda3\lib\site-packages\google\api_core\grpc_helpers.py
in error_remapped_callable(*args, **kwargs)
56 try:
---> 57 return callable_(*args, **kwargs)
58 except grpc.RpcError as exc:
~\AppData\Local\Continuum\anaconda3\lib\site-packages\grpc_channel.py
in call(self, request, timeout, metadata, credentials,
wait_for_ready, compression)
564 wait_for_ready, compression)
--> 565 return _end_unary_response_blocking(state, call, False, None)
566
~\AppData\Local\Continuum\anaconda3\lib\site-packages\grpc_channel.py
in _end_unary_response_blocking(state, call, with_call, deadline)
466 else:
--> 467 raise _Rendezvous(state, None, None, deadline)
468
_Rendezvous: <_Rendezvous of RPC that terminated with:
status = StatusCode.UNAVAILABLE
details = "failed to connect to all addresses"
debug_error_string = "{"created":"#1569838382.864000000","description":"Failed to pick
subchannel","file":"src/core/ext/filters/client_channel/client_channel.cc","file_line":3818,"referenced_errors":[{"created":"#1569838382.863000000","description":"failed
to connect to all
addresses","file":"src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc","file_line":395,"grpc_status":14}]}"
>
The above exception was the direct cause of the following exception:
ServiceUnavailable Traceback (most recent call
last) in
----> 1 transcribe_file(file_name)
in transcribe_file(speech_file)
20
21 # [START speech_python_migration_sync_response]
---> 22 response = client.long_running_recognize(config, audio)
23 # [END speech_python_migration_sync_request]
24 # Each result is for a consecutive portion of the audio. Iterate through
~\AppData\Local\Continuum\anaconda3\lib\site-packages\google\cloud\speech_v1\gapic\speech_client.py
in long_running_recognize(self, config, audio, retry, timeout,
metadata)
339 )
340 operation = self._inner_api_calls["long_running_recognize"](
--> 341 request, retry=retry, timeout=timeout, metadata=metadata
342 )
343 return google.api_core.operation.from_gapic(
~\AppData\Local\Continuum\anaconda3\lib\site-packages\google\api_core\gapic_v1\method.py
in call(self, *args, **kwargs)
141 kwargs["metadata"] = metadata
142
--> 143 return wrapped_func(*args, **kwargs)
144
145
~\AppData\Local\Continuum\anaconda3\lib\site-packages\google\api_core\retry.py
in retry_wrapped_func(*args, **kwargs)
271 sleep_generator,
272 self._deadline,
--> 273 on_error=on_error,
274 )
275
~\AppData\Local\Continuum\anaconda3\lib\site-packages\google\api_core\retry.py
in retry_target(target, predicate, sleep_generator, deadline,
on_error)
180 for sleep in sleep_generator:
181 try:
--> 182 return target()
183
184 # pylint: disable=broad-except
~\AppData\Local\Continuum\anaconda3\lib\site-packages\google\api_core\timeout.py
in func_with_timeout(*args, **kwargs)
212 """Wrapped function that adds timeout."""
213 kwargs["timeout"] = next(timeouts)
--> 214 return func(*args, **kwargs)
215
216 return func_with_timeout
~\AppData\Local\Continuum\anaconda3\lib\site-packages\google\api_core\grpc_helpers.py
in error_remapped_callable(*args, **kwargs)
57 return callable_(*args, **kwargs)
58 except grpc.RpcError as exc:
---> 59 six.raise_from(exceptions.from_grpc_error(exc), exc)
60
61 return error_remapped_callable
~\AppData\Local\Continuum\anaconda3\lib\site-packages\six.py in
raise_from(value, from_value)
ServiceUnavailable: 503 failed to connect to all addresses
How can I fix it?
This could be failing due to the credentials. Let's try few things:
Ensure that your service account key is correct, you should have something like this:
from google.oauth2 import service_account
credentials = service_account.Credentials. from_service_account_file('service_account_key.json')
speech = speech.SpeechClient(credentials=credentials)
OR
speech = speech_v1.SpeechClient(credentials=credentials)
Use a Scope:
credentials = service_account.Credentials.from_service_account_file(
credentials_json,
scopes=['https://www.googleapis.com/auth/cloud-platform'])
More info here.
In this thread was solve by using a single instance of a session client object for multiple requests.
This could be either a network issue as Dustin said. More info here 503 Service Unavailable
Please let us know if you manage to solve this error.
I was trying to import ecoinvent 3.5 cutoff to a project using brightway, with the following:
if 'ecoinvent 3.5 cutoff' not in databases:
ei35cutofflink=r"H:\Data\ecoinvent 3.5_cutoff_lci_ecoSpold02\datasets"
ei35cutoff=SingleOutputEcospold2Importer(ei35cutofflink, 'ecoinvent 3.5 cutoff')
ei35cutoff.apply_strategies()
ei35cutoff.statistics()
ei35cutoff.write_database()
But I got the following error. It looks like the issue is not that related to brightway, but rather multiprocessing or pickle? I don't understand what the error message means.
---------------------------------------------------------------------------
MaybeEncodingError Traceback (most recent call last)
<ipython-input-4-f9acb2bc0c84> in <module>
1 if 'ecoinvent 3.5 cutoff' not in databases:
2 ei35cutofflink=r"H:\Data\ecoinvent 3.5_cutoff_lci_ecoSpold02\datasets"
----> 3 ei35cutoff=SingleOutputEcospold2Importer(ei35cutofflink, 'ecoinvent 3.5 cutoff')
4 ei35cutoff.apply_strategies()
5 ei35cutoff.statistics()
C:\miniconda3_py37\envs\ab\lib\site-packages\bw2io\importers\ecospold2.py in __init__(self, dirpath, db_name, extractor, use_mp, signal)
63 start = time()
64 try:
---> 65 self.data = extractor.extract(dirpath, db_name, use_mp=use_mp)
66 except RuntimeError as e:
67 raise MultiprocessingError('Multiprocessing error; re-run using `use_mp=False`'
C:\miniconda3_py37\envs\ab\lib\site-packages\bw2io\extractors\ecospold2.py in extract(cls, dirpath, db_name, use_mp)
91 ) for x in filelist
92 ]
---> 93 data = [p.get() for p in results]
94 else:
95 pbar = pyprind.ProgBar(len(filelist), title="Extracting ecospold2 files:", monitor=True)
C:\miniconda3_py37\envs\ab\lib\site-packages\bw2io\extractors\ecospold2.py in <listcomp>(.0)
91 ) for x in filelist
92 ]
---> 93 data = [p.get() for p in results]
94 else:
95 pbar = pyprind.ProgBar(len(filelist), title="Extracting ecospold2 files:", monitor=True)
C:\miniconda3_py37\envs\ab\lib\multiprocessing\pool.py in get(self, timeout)
655 return self._value
656 else:
--> 657 raise self._value
658
659 def _set(self, i, obj):
MaybeEncodingError: Error sending result: '<multiprocessing.pool.ExceptionWithTraceback object at 0x000001D257C55358>'. Reason: 'TypeError("can't pickle lxml.etree._ListErrorLog objects")'```
Use can use use_mp=False to get a sense of what the actual error is (instead of the error not being pickle-able, and this raising a separate errror). In this case I think you have a problem with the data folder, which you can solve by deleting it and downloading or extracting it again.
I am trying to use https://github.com/dowjones/dj-dna-streams-python/tree/master/dnaStreaming . It's a package to receive news streams from Dow Jones. When I try to "listen" to the streams I receive the following error:
---------------------------------------------------------------------------
ImportError Traceback (most recent call last)
<ipython-input-36-372f4305a9e1> in <module>()
1 while True:
----> 2 listener.listen(callback, maximum_messages=4, subscription_id=subscription_id)
~\AppData\Local\Continuum\Anaconda3\lib\site-packages\dj_dna_streaming_python-1.0.10-py3.5.egg\dnaStreaming\listener.py in listen(self, on_message_callback, maximum_messages, subscription_id)
21 def listen(self, on_message_callback, maximum_messages=DEFAULT_UNLIMITED_MESSAGES, subscription_id=None):
22 limit_pull_calls = not (maximum_messages == self.DEFAULT_UNLIMITED_MESSAGES)
---> 23 pubsub_client = pubsub_service.get_client(self.config)
24
25 subscription_id = subscription_id if subscription_id is not None else self.config.subscription()
~\AppData\Local\Continuum\Anaconda3\lib\site-packages\dj_dna_streaming_python-1.0.10-py3.5.egg\dnaStreaming\services\pubsub_service.py in get_client(config)
7
8
----> 9 def get_client(config):
10 streaming_credentials = credentials_service.fetch_credentials(config)
11 credentials = authentication_service.get_authenticated_oauth_credentials(streaming_credentials)
~\AppData\Local\Continuum\Anaconda3\lib\site-packages\dj_dna_streaming_python-1.0.10-py3.5.egg\dnaStreaming\services\credentials_service.py in fetch_credentials(config)
11 response = _get_requests().get(config.credentials_uri(), headers=headers)
12
---> 13 streaming_credentials_string = json.loads(response.text)['data']['attributes']['streaming_credentials']
14
15 return json.loads(streaming_credentials_string)
~\AppData\Local\Continuum\Anaconda3\lib\site-packages\requests\models.py in text(self)
824
825 self._content_consumed = True
--> 826 # don't need to release the connection; that's been handled by urllib3
827 # since we exhausted the data.
828 return self._content
~\AppData\Local\Continuum\Anaconda3\lib\site-packages\requests\models.py in apparent_encoding(self)
694 is **not** a check to see if the response code is ``200 OK``.
695 """
--> 696 try:
697 self.raise_for_status()
698 except HTTPError:
~\AppData\Local\Continuum\Anaconda3\lib\site-packages\requests\packages\chardet\__init__.py in detect(aBuf)
ImportError: cannot import name 'universaldetector'
I understand that the key part is that I can't import universaldetector. Any idea why is that? I have seen this answer but can't really relate to my problem. I have upgraded chardet and requests.
I am on Python3 and win . Executing code in Jupyter Notebook.