Related
I am doing a machine learning task. I keep getting a connection refused error on colab when I convert my pandas series to a numpy array. I have to restart runtime everytime because if this happens once then none of the cells seem to work anymore and give the same error. It works sometimes when the length of the series is small i guess? But most of the times it doesnt work and gives me this error.
X = train['finished_embeddings'].values
ConnectionRefusedError Traceback (most recent call last)
<ipython-input-21-fc36e96b715b> in <module>
----> 1 X = train['finished_embeddings'].values
12 frames
/usr/local/lib/python3.7/dist-packages/pyspark/pandas/generic.py in values(self)
637 """
638 warnings.warn("We recommend using `{}.to_numpy()` instead.".format(type(self).__name__))
--> 639 return self.to_numpy()
640
641 def to_csv(
/usr/local/lib/python3.7/dist-packages/pyspark/pandas/generic.py in to_numpy(self)
574 array(['a', 'b', 'a'], dtype=object)
575 """
--> 576 return self.to_pandas().values
577
578 #property
/usr/local/lib/python3.7/dist-packages/pyspark/pandas/series.py in to_pandas(self)
1540 Name: dogs, dtype: float64
1541 """
-> 1542 return self._to_internal_pandas().copy()
1543
1544 def to_list(self) -> List:
/usr/local/lib/python3.7/dist-packages/pyspark/pandas/series.py in _to_internal_pandas(self)
6287 This method is for internal use only.
6288 """
-> 6289 return self._psdf._internal.to_pandas_frame[self.name]
6290
6291 def __repr__(self) -> str_type:
/usr/local/lib/python3.7/dist-packages/pyspark/pandas/utils.py in wrapped_lazy_property(self)
578 def wrapped_lazy_property(self):
579 if not hasattr(self, attr_name):
--> 580 setattr(self, attr_name, fn(self))
581 return getattr(self, attr_name)
582
/usr/local/lib/python3.7/dist-packages/pyspark/pandas/internal.py in to_pandas_frame(self)
1049 """Return as pandas DataFrame."""
1050 sdf = self.to_internal_spark_frame
-> 1051 pdf = sdf.toPandas()
1052 if len(pdf) == 0 and len(sdf.schema) > 0:
1053 pdf = pdf.astype(
/usr/local/lib/python3.7/dist-packages/pyspark/sql/pandas/conversion.py in toPandas(self)
65 import pandas as pd
66
---> 67 timezone = self.sql_ctx._conf.sessionLocalTimeZone()
68
69 if self.sql_ctx._conf.arrowPySparkEnabled():
/usr/local/lib/python3.7/dist-packages/pyspark/sql/context.py in _conf(self)
107 def _conf(self):
108 """Accessor for the JVM SQL-specific configurations"""
--> 109 return self.sparkSession._jsparkSession.sessionState().conf()
110
111 #classmethod
/usr/local/lib/python3.7/dist-packages/py4j/java_gateway.py in __call__(self, *args)
1318 proto.END_COMMAND_PART
1319
-> 1320 answer = self.gateway_client.send_command(command)
1321 return_value = get_return_value(
1322 answer, self.gateway_client, self.target_id, self.name)
/usr/local/lib/python3.7/dist-packages/py4j/java_gateway.py in send_command(self, command, retry, binary)
1034 if `binary` is `True`.
1035 """
-> 1036 connection = self._get_connection()
1037 try:
1038 response = connection.send_command(command)
/usr/local/lib/python3.7/dist-packages/py4j/clientserver.py in _get_connection(self)
279
280 if connection is None or connection.socket is None:
--> 281 connection = self._create_new_connection()
282 return connection
283
/usr/local/lib/python3.7/dist-packages/py4j/clientserver.py in _create_new_connection(self)
286 self.java_parameters, self.python_parameters,
287 self.gateway_property, self)
--> 288 connection.connect_to_java_server()
289 self.set_thread_connection(connection)
290 return connection
/usr/local/lib/python3.7/dist-packages/py4j/clientserver.py in connect_to_java_server(self)
400 self.socket = self.ssl_context.wrap_socket(
401 self.socket, server_hostname=self.java_address)
--> 402 self.socket.connect((self.java_address, self.java_port))
403 self.stream = self.socket.makefile("rb")
404 self.is_connected = True
ConnectionRefusedError: [Errno 111] Connection refused
I am following https://towardsdatascience.com/how-to-connect-to-a-postgresql-database-with-python-using-ssh-tunnelling-d803282f71e7 to connect my postgresql database to a Heroku deployed application. This is done in Jupyter Notebooks and voila. When I run locally, the application works. However, when I deploy to Heroku I get the error:
ValueError Traceback (most recent call last)
Input In [3], in <cell line: 24>()
20 engine = sqlalchemy.create_engine(DB_SQA_CONNECT_STRING)
22 return engine
---> 24 SQL_ENGINE = create_engine()
26 def query(q) -> sqlalchemy.engine.CursorResult:
27 with SQL_ENGINE.connect() as conn:
Input In [3], in create_engine()
1 def create_engine():
----> 2 ssh_tunnel = SSHTunnelForwarder(
3 creds["SSH_HOST"],
4 ssh_username=creds["PG_UN"],
5
6 ssh_private_key= <file_path>,
7
8 remote_bind_address=(creds["DB_HOST"], 5432)
9 )
11 ssh_tunnel.start()
13 # engine = pg.connect('postgresql://{user}:{password}#{host}:{port}/{db}'.format(
14 # host=creds["LOCALHOST"],
15 # port=ssh_tunnel.local_bind_port,
(...)
18 # db=creds["PG_DB_NAME"]
19 # ))
File ~/.heroku/python/lib/python3.10/site-packages/sshtunnel.py:966, in SSHTunnelForwarder.__init__(self, ssh_address_or_host, ssh_config_file, ssh_host_key, ssh_password, ssh_pkey, ssh_private_key_password, ssh_proxy, ssh_proxy_enabled, ssh_username, local_bind_address, local_bind_addresses, logger, mute_exceptions, remote_bind_address, remote_bind_addresses, set_keepalive, threaded, compression, allow_agent, host_pkey_directories, *args, **kwargs)
947 self._local_binds = self._consolidate_binds(self._local_binds,
948 self._remote_binds)
950 (self.ssh_host,
951 self.ssh_username,
952 ssh_pkey, # still needs to go through _consolidate_auth
(...)
963 self.logger
964 )
--> 966 (self.ssh_password, self.ssh_pkeys) = self._consolidate_auth(
967 ssh_password=ssh_password,
968 ssh_pkey=ssh_pkey,
969 ssh_pkey_password=ssh_private_key_password,
970 allow_agent=allow_agent,
971 host_pkey_directories=host_pkey_directories,
972 logger=self.logger
973 )
975 check_host(self.ssh_host)
976 check_port(self.ssh_port)
File ~/.heroku/python/lib/python3.10/site-packages/sshtunnel.py:1169, in SSHTunnelForwarder._consolidate_auth(ssh_password, ssh_pkey, ssh_pkey_password, allow_agent, host_pkey_directories, logger)
1166 ssh_loaded_pkeys.insert(0, ssh_pkey)
1168 if not ssh_password and not ssh_loaded_pkeys:
-> 1169 raise ValueError('No password or public key available!')
1170 return (ssh_password, ssh_loaded_pkeys)
ValueError: No password or public key available!
I have omitted ssh_private_key_pw as the article suggests because my private key is not password protected. Any suggestions?
I have tried changing my private key to a pem file, which did not fix the issue.
I am trying to replicate this example on neo4j desktop:
https://stellargraph.readthedocs.io/en/stable/demos/connector/neo4j/load-cora-into-neo4j.html
I am able to reproduce everything until I get to the following line:
import py2neo
default_host = os.environ.get("STELLARGRAPH_NEO4J_HOST")
# Create the Neo4j Graph database object; the arguments can be edited to specify location and authentication
graph = py2neo.Graph(host=default_host, port=None, user=None, password=None)
I have tried the following attempts to create the neo4j database object:
#1
default_host = os.environ.get("StellarGraph")
graph = py2neo.Graph(host=default_host, port=None, user=None, password=None)
#2
uri = 'bolt://localhost:7687'
graph = Graph(uri, auth=("neo4j", "password"), port= 7687, secure=True)
#3
uri = uri = 'bolt://localhost:7687'
graph = Graph(uri, auth=("neo4j", "password"), port= 7687, secure=True, name= "StellarGraph")
However, each time I attempt this, it results in some variation of this error:
IndexError Traceback (most recent call last)
File ~/.local/lib/python3.8/site-packages/py2neo/client/__init__.py:806, in ConnectionPool.acquire(self, force_reset, can_overfill)
804 try:
805 # Plan A: select a free connection from the pool
--> 806 cx = self._free_list.popleft()
807 except IndexError:
IndexError: pop from an empty deque
During handling of the above exception, another exception occurred:
ConnectionRefusedError Traceback (most recent call last)
File ~/.local/lib/python3.8/site-packages/py2neo/wiring.py:62, in Wire.open(cls, address, timeout, keep_alive, on_broken)
61 try:
---> 62 s.connect(address)
63 except (IOError, OSError) as error:
ConnectionRefusedError: [Errno 111] Connection refused
The above exception was the direct cause of the following exception:
WireError Traceback (most recent call last)
File ~/.local/lib/python3.8/site-packages/py2neo/client/bolt.py:355, in Bolt.open(cls, profile, user_agent, on_release, on_broken)
354 try:
--> 355 wire = cls._connect(profile, on_broken=on_broken)
356 protocol_version = cls._handshake(wire)
File ~/.local/lib/python3.8/site-packages/py2neo/client/bolt.py:369, in Bolt._connect(cls, profile, on_broken)
368 log.debug("[#%04X] C: (Dialing <%s>)", 0, profile.address)
--> 369 wire = Wire.open(profile.address, keep_alive=True, on_broken=on_broken)
370 local_port = wire.local_address.port_number
File ~/.local/lib/python3.8/site-packages/py2neo/wiring.py:64, in Wire.open(cls, address, timeout, keep_alive, on_broken)
63 except (IOError, OSError) as error:
---> 64 raise_from(WireError("Cannot connect to %r" % (address,)), error)
65 return cls(s, on_broken=on_broken)
File <string>:3, in raise_from(value, from_value)
WireError: Cannot connect to IPv4Address(('localhost', 7687))
The above exception was the direct cause of the following exception:
ConnectionUnavailable Traceback (most recent call last)
/home/myname/Project1/graph_import.ipynb Cell 13' in <cell line: 2>()
1 uri = 'bolt://localhost:7687'
----> 2 graph = Graph(uri, auth=("neo4j", "mypass"), port= 7687, secure=True, name= "StellarGraph")
File ~/.local/lib/python3.8/site-packages/py2neo/database.py:288, in Graph.__init__(self, profile, name, **settings)
287 def __init__(self, profile=None, name=None, **settings):
--> 288 self.service = GraphService(profile, **settings)
289 self.__name__ = name
290 self.schema = Schema(self)
File ~/.local/lib/python3.8/site-packages/py2neo/database.py:119, in GraphService.__init__(self, profile, **settings)
116 if connector_settings["init_size"] is None and not profile.routing:
117 # Ensures credentials are checked on construction
118 connector_settings["init_size"] = 1
--> 119 self._connector = Connector(profile, **connector_settings)
120 self._graphs = {}
File ~/.local/lib/python3.8/site-packages/py2neo/client/__init__.py:960, in Connector.__init__(self, profile, user_agent, init_size, max_size, max_age, routing_refresh_ttl)
958 else:
959 self._router = None
--> 960 self._add_pools(*self._initial_routers)
File ~/.local/lib/python3.8/site-packages/py2neo/client/__init__.py:982, in Connector._add_pools(self, *profiles)
980 continue
981 log.debug("Adding connection pool for profile %r", profile)
--> 982 pool = ConnectionPool.open(
983 profile,
984 user_agent=self._user_agent,
985 init_size=self._init_size,
986 max_size=self._max_size,
987 max_age=self._max_age,
988 on_broken=self._on_broken)
989 self._pools[profile] = pool
File ~/.local/lib/python3.8/site-packages/py2neo/client/__init__.py:649, in ConnectionPool.open(cls, profile, user_agent, init_size, max_size, max_age, on_broken)
627 """ Create a new connection pool, with an option to seed one
628 or more initial connections.
629
(...)
646 scheme
647 """
648 pool = cls(profile, user_agent, max_size, max_age, on_broken)
--> 649 seeds = [pool.acquire() for _ in range(init_size or cls.default_init_size)]
650 for seed in seeds:
651 seed.release()
File ~/.local/lib/python3.8/site-packages/py2neo/client/__init__.py:649, in <listcomp>(.0)
627 """ Create a new connection pool, with an option to seed one
628 or more initial connections.
629
(...)
646 scheme
647 """
648 pool = cls(profile, user_agent, max_size, max_age, on_broken)
--> 649 seeds = [pool.acquire() for _ in range(init_size or cls.default_init_size)]
650 for seed in seeds:
651 seed.release()
File ~/.local/lib/python3.8/site-packages/py2neo/client/__init__.py:813, in ConnectionPool.acquire(self, force_reset, can_overfill)
807 except IndexError:
808 if self._has_capacity() or can_overfill:
809 # Plan B: if the pool isn't full, open
810 # a new connection. This may raise a
811 # ConnectionUnavailable exception, which
812 # should bubble up to the caller.
--> 813 cx = self._connect()
814 if cx.supports_multi():
815 self._supports_multi = True
File ~/.local/lib/python3.8/site-packages/py2neo/client/__init__.py:764, in ConnectionPool._connect(self)
761 def _connect(self):
762 """ Open and return a new connection.
763 """
--> 764 cx = Connection.open(self.profile, user_agent=self.user_agent,
765 on_release=lambda c: self.release(c),
766 on_broken=lambda msg: self.__on_broken(msg))
767 self._server_agent = cx.server_agent
768 return cx
File ~/.local/lib/python3.8/site-packages/py2neo/client/__init__.py:174, in Connection.open(cls, profile, user_agent, on_release, on_broken)
172 if profile.protocol == "bolt":
173 from py2neo.client.bolt import Bolt
--> 174 return Bolt.open(profile, user_agent=user_agent,
175 on_release=on_release, on_broken=on_broken)
176 elif profile.protocol == "http":
177 from py2neo.client.http import HTTP
File ~/.local/lib/python3.8/site-packages/py2neo/client/bolt.py:364, in Bolt.open(cls, profile, user_agent, on_release, on_broken)
362 return bolt
363 except (TypeError, WireError) as error:
--> 364 raise_from(ConnectionUnavailable("Cannot open connection to %r" % profile), error)
File <string>:3, in raise_from(value, from_value)
ConnectionUnavailable: Cannot open connection to ConnectionProfile('bolt+s://localhost:7687')
I have also tried variations on this fix as well, but had the same error:
ISSUE IN CONNECTING py2neo v4 to my neo4j server
I appreciate any help resolving this issue. Thanks!
I was able to resolve this with the following syntax:
graph = Graph('neo4j://localhost:7687', user="neo4j", password="999")
However, I am now having an issue with the following block:
empty_db_query = """
MATCH(n) DETACH
DELETE(n)
"""
tx = graph.begin(autocommit=True)
tx.evaluate(empty_db_query)
For the newer version of py2neo, the graph.begin argument takes readonly = F instead of autocommit = True, but in any case, I have this error now:
ServiceUnavailable Traceback (most recent call last)
/home/myname/Project1/graph_import.ipynb Cell 13' in <cell line: 6>()
1 empty_db_query = """
2 MATCH(n) DETACH
3 DELETE(n)
4 """
----> 6 tx = graph.begin(readonly=False)
7 tx.evaluate(empty_db_query)
File ~/.local/lib/python3.8/site-packages/py2neo/database.py:351, in Graph.begin(self, readonly)
340 def begin(self, readonly=False,
341 # after=None, metadata=None, timeout=None
342 ):
343 """ Begin a new :class:`~py2neo.Transaction`.
344
345 :param readonly: if :py:const:`True`, will begin a readonly
(...)
349 removed. Use the 'auto' method instead.*
350 """
--> 351 return Transaction(self, autocommit=False, readonly=readonly,
352 # after, metadata, timeout
353 )
File ~/.local/lib/python3.8/site-packages/py2neo/database.py:915, in Transaction.__init__(self, graph, autocommit, readonly)
913 self._ref = None
914 else:
--> 915 self._ref = self._connector.begin(self.graph.name, readonly=readonly,
916 # after, metadata, timeout
917 )
918 self._readonly = readonly
919 self._closed = False
File ~/.local/lib/python3.8/site-packages/py2neo/client/__init__.py:1357, in Connector.begin(self, graph_name, readonly)
1345 def begin(self, graph_name, readonly=False,
1346 # after=None, metadata=None, timeout=None
1347 ):
1348 """ Begin a new explicit transaction.
1349
1350 :param graph_name:
(...)
1355 :raises Failure: if the server signals a failure condition
1356 """
-> 1357 cx = self._acquire(graph_name)
1358 try:
1359 return cx.begin(graph_name, readonly=readonly,
1360 # after=after, metadata=metadata, timeout=timeout
1361 )
File ~/.local/lib/python3.8/site-packages/py2neo/client/__init__.py:1111, in Connector._acquire(self, graph_name, readonly)
1109 return self._acquire_ro(graph_name)
1110 else:
-> 1111 return self._acquire_rw(graph_name)
File ~/.local/lib/python3.8/site-packages/py2neo/client/__init__.py:1203, in Connector._acquire_rw(self, graph_name)
1199 # TODO: exit immediately if the server/cluster is in readonly mode
1201 while True:
-> 1203 ro_profiles, rw_profiles = self._get_profiles(graph_name, readonly=False)
1204 if rw_profiles:
1205 # There is at least one writer, so collect the pools
1206 # for those writers. In all implementations to date,
1207 # a Neo4j cluster will only ever contain at most one
1208 # writer (per database). But this algorithm should
1209 # still survive if that changes.
1210 pools = [pool for profile, pool in list(self._pools.items())
1211 if profile in rw_profiles]
File ~/.local/lib/python3.8/site-packages/py2neo/client/__init__.py:1016, in Connector._get_profiles(self, graph_name, readonly)
1014 rt.wait_until_updated()
1015 else:
-> 1016 self.refresh_routing_table(graph_name)
File ~/.local/lib/python3.8/site-packages/py2neo/client/__init__.py:1064, in Connector.refresh_routing_table(self, graph_name)
1062 cx.release()
1063 else:
-> 1064 raise ServiceUnavailable("Cannot connect to any known routers")
1065 finally:
1066 rt.set_not_updating()
ServiceUnavailable: Cannot connect to any known routers
Appreciate any help in resolving this. Thank you!
I am using paramiko (v2.7.1) to connect to a host with an authorized key. When I do not pass the key explicitly, I can connect. However, when I pass it with key_filename, I get an error:
ValueError: ('Invalid private key', [_OpenSSLErrorWithText(code=67764350, lib=4, func=160, reason=126, reason_text=b'error:040A007E:rsa routines:RSA_check_key_ex:iqmp not inverse of q')])
I have checked that the path is correct, and also that the key really does match the public key on the remote host (in case we are logging in with some other method).
Also I can connect w/ ssh directly, both w/o explicit key and with. When I don't specify the key explicitly, ssh -vvv prints the matching path to the private key. The key is encoded using rsa-sha2-512. Finally, when I move the key from ~/.ssh, and clear the auth cache, I can no longer connect with ssh, proving that its definitely that key that is letting me in.
Is there something else I can do? I'm currently manually testing a script that is meant to run where the key is not installed in the default location?
My code:
vm = paramiko.SSHClient()
vm.set_missing_host_key_policy(paramiko.AutoAddPolicy())
vm.connect(
DEST_ADDR, username=DEST_USER, key_filename=DEST_KEY_FILE,
)
The code does not work as-is, but does work if I comment out key_filename parameter. Are keys accessed differently depending on how they are found?
Traceback:
ValueError Traceback (most recent call last)
<ipython-input-25-57a83e5f85d1> in <module>
3 # vm.connect(
4 # JUMP_ADDR, username=JUMP_USER, key_filename=JUMP_KEY_FILE)
----> 5 vm.connect(
6 DEST_ADDR, username=DEST_USER, key_filename=DEST_KEY_FILE,
7 ) # sock=vmchannel)
/usr/local/share/anaconda3/lib/python3.8/site-packages/paramiko/client.py in connect(self, hostname, port, username, password, pkey, key_filename, timeout, allow_agent, look_for_keys, compress, sock, gss_auth, gss_kex, gss_deleg_creds, gss_host, banner_timeout, auth_timeout, gss_trust_dns, passphrase, disabled_algorithms)
433 key_filenames = key_filename
434
--> 435 self._auth(
436 username,
437 password,
/usr/local/share/anaconda3/lib/python3.8/site-packages/paramiko/client.py in _auth(self, username, password, pkey, key_filenames, allow_agent, look_for_keys, gss_auth, gss_kex, gss_deleg_creds, gss_host, passphrase)
674 for pkey_class in (RSAKey, DSSKey, ECDSAKey, Ed25519Key):
675 try:
--> 676 key = self._key_from_filepath(
677 key_filename, pkey_class, passphrase
678 )
/usr/local/share/anaconda3/lib/python3.8/site-packages/paramiko/client.py in _key_from_filepath(self, filename, klass, password)
584 cert_path = filename + cert_suffix
585 # Blindly try the key path; if no private key, nothing will work.
--> 586 key = klass.from_private_key_file(key_path, password)
587 # TODO: change this to 'Loading' instead of 'Trying' sometime; probably
588 # when #387 is released, since this is a critical log message users are
/usr/local/share/anaconda3/lib/python3.8/site-packages/paramiko/pkey.py in from_private_key_file(cls, filename, password)
233 :raises: `.SSHException` -- if the key file is invalid
234 """
--> 235 key = cls(filename=filename, password=password)
236 return key
237
/usr/local/share/anaconda3/lib/python3.8/site-packages/paramiko/rsakey.py in __init__(self, msg, data, filename, password, key, file_obj)
53 return
54 if filename is not None:
---> 55 self._from_private_key_file(filename, password)
56 return
57 if (msg is None) and (data is not None):
/usr/local/share/anaconda3/lib/python3.8/site-packages/paramiko/rsakey.py in _from_private_key_file(self, filename, password)
174 def _from_private_key_file(self, filename, password):
175 data = self._read_private_key_file("RSA", filename, password)
--> 176 self._decode_key(data)
177
178 def _from_private_key(self, file_obj, password):
/usr/local/share/anaconda3/lib/python3.8/site-packages/paramiko/rsakey.py in _decode_key(self, data)
192 n, e, d, iqmp, q, p = self._uint32_cstruct_unpack(data, "iiiiii")
193 public_numbers = rsa.RSAPublicNumbers(e=e, n=n)
--> 194 key = rsa.RSAPrivateNumbers(
195 p=p,
196 q=q,
/usr/local/share/anaconda3/lib/python3.8/site-packages/cryptography/hazmat/primitives/asymmetric/rsa.py in private_key(self, backend)
365 def private_key(self, backend=None) -> RSAPrivateKey:
366 backend = _get_backend(backend)
--> 367 return backend.load_rsa_private_numbers(self)
368
369 def __eq__(self, other):
/usr/local/share/anaconda3/lib/python3.8/site-packages/cryptography/hazmat/backends/openssl/backend.py in load_rsa_private_numbers(self, numbers)
600 evp_pkey = self._rsa_cdata_to_evp_pkey(rsa_cdata)
601
--> 602 return _RSAPrivateKey(self, rsa_cdata, evp_pkey)
603
604 def load_rsa_public_numbers(self, numbers):
/usr/local/share/anaconda3/lib/python3.8/site-packages/cryptography/hazmat/backends/openssl/rsa.py in __init__(self, backend, rsa_cdata, evp_pkey)
387 if res != 1:
388 errors = backend._consume_errors_with_text()
--> 389 raise ValueError("Invalid private key", errors)
390
391 # Blinding is on by default in many versions of OpenSSL, but let's
ValueError: ('Invalid private key', [_OpenSSLErrorWithText(code=67764350, lib=4, func=160, reason=126, reason_text=b'error:040A007E:rsa routines:RSA_check_key_ex:iqmp not inverse of q')])
ref: https://github.com/paramiko/paramiko/issues/1929
This is an issue for paramiko 2.7.1 -- fixed by installing 2.8.0
I'm following this https://www.mongodb.com/blog/post/getting-started-with-python-and-mongodb introductory tutorial. I can connect to the cluster fine with mongo shell, but not with pymongo (Python: 3.6.1, Pymongo 3.4.0). Pymongo works okay with a local mongodb. What is the problem? Below is the exception I get:
----------------------------------------------------------------------
-----
KeyError Traceback (most recent call
last)
<ipython-input-22-1c9d47341338> in <module>()
----> 1 server_status_result = db.command('serverStatus')
2 pprint(server_status_result)
/home/tim/.virtualenvs/main/lib/python3.6/site-
packages/pymongo/database.py in command(self, command, value, check,
allowable_errors, read_preference, codec_options, **kwargs)
489 """
490 client = self.__client
--> 491 with client._socket_for_reads(read_preference) as
(sock_info, slave_ok):
492 return self._command(sock_info, command, slave_ok,
value,
493 check, allowable_errors,
read_preference,
/usr/lib/python3.6/contextlib.py in __enter__(self)
80 def __enter__(self):
81 try:
---> 82 return next(self.gen)
83 except StopIteration:
84 raise RuntimeError("generator didn't yield") from None
/home/tim/.virtualenvs/main/lib/python3.6/site-
packages/pymongo/mongo_client.py in _socket_for_reads(self,
read_preference)
857 topology = self._get_topology()
858 single = topology.description.topology_type ==
TOPOLOGY_TYPE.Single
--> 859 with self._get_socket(read_preference) as sock_info:
860 slave_ok = (single and not sock_info.is_mongos) or (
861 preference != ReadPreference.PRIMARY)
/usr/lib/python3.6/contextlib.py in __enter__(self)
80 def __enter__(self):
81 try:
---> 82 return next(self.gen)
83 except StopIteration:
84 raise RuntimeError("generator didn't yield") from None
/home/tim/.virtualenvs/main/lib/python3.6/site-
packages/pymongo/mongo_client.py in _get_socket(self, selector)
823 server = self._get_topology().select_server(selector)
824 try:
--> 825 with server.get_socket(self.__all_credentials) as
sock_info:
826 yield sock_info
827 except NetworkTimeout:
/usr/lib/python3.6/contextlib.py in __enter__(self)
80 def __enter__(self):
81 try:
---> 82 return next(self.gen)
83 except StopIteration:
84 raise RuntimeError("generator didn't yield") from None
/home/tim/.virtualenvs/main/lib/python3.6/site-
packages/pymongo/server.py in get_socket(self, all_credentials,
checkout)
166 #contextlib.contextmanager
167 def get_socket(self, all_credentials, checkout=False):
--> 168 with self.pool.get_socket(all_credentials, checkout)
as sock_info:
169 yield sock_info
170
/usr/lib/python3.6/contextlib.py in __enter__(self)
80 def __enter__(self):
81 try:
---> 82 return next(self.gen)
83 except StopIteration:
84 raise RuntimeError("generator didn't yield") from None
/home/tim/.virtualenvs/main/lib/python3.6/site-
packages/pymongo/pool.py in get_socket(self, all_credentials,
checkout)
790 sock_info = self._get_socket_no_auth()
791 try:
--> 792 sock_info.check_auth(all_credentials)
793 yield sock_info
794 except:
/home/tim/.virtualenvs/main/lib/python3.6/site-
packages/pymongo/pool.py in check_auth(self, all_credentials)
510
511 for credentials in cached - authset:
--> 512 auth.authenticate(credentials, self)
513 self.authset.add(credentials)
514
/home/tim/.virtualenvs/main/lib/python3.6/site-
packages/pymongo/auth.py in authenticate(credentials, sock_info)
468 mechanism = credentials.mechanism
469 auth_func = _AUTH_MAP.get(mechanism)
--> 470 auth_func(credentials, sock_info)
471
472
/home/tim/.virtualenvs/main/lib/python3.6/site-
packages/pymongo/auth.py in _authenticate_default(credentials,
sock_info)
448 def _authenticate_default(credentials, sock_info):
449 if sock_info.max_wire_version >= 3:
--> 450 return _authenticate_scram_sha1(credentials,
sock_info)
451 else:
452 return _authenticate_mongo_cr(credentials, sock_info)
/home/tim/.virtualenvs/main/lib/python3.6/site-
packages/pymongo/auth.py in _authenticate_scram_sha1(credentials,
sock_info)
227 ('conversationId', res['conversationId']),
228 ('payload', Binary(client_final))])
--> 229 res = sock_info.command(source, cmd)
230
231 parsed = _parse_scram_response(res['payload'])
/home/tim/.virtualenvs/main/lib/python3.6/site-
packages/pymongo/pool.py in command(self, dbname, spec, slave_ok,
read_preference, codec_options, check, allowable_errors, check_keys,
read_concern, write_concern, parse_write_concern_error, collation)
422 # Catch socket.error, KeyboardInterrupt, etc. and close
ourselves.
423 except BaseException as error:
--> 424 self._raise_connection_failure(error)
425
426 def send_message(self, message, max_doc_size):
/home/tim/.virtualenvs/main/lib/python3.6/site-
packages/pymongo/pool.py in _raise_connection_failure(self, error)
550 _raise_connection_failure(self.address, error)
551 else:
--> 552 raise error
553
554 def __eq__(self, other):
/home/tim/.virtualenvs/main/lib/python3.6/site-
packages/pymongo/pool.py in command(self, dbname, spec, slave_ok,
read_preference, codec_options, check, allowable_errors, check_keys,
read_concern, write_concern, parse_write_concern_error, collation)
417 read_concern,
418
parse_write_concern_error=parse_write_concern_error,
--> 419 collation=collation)
420 except OperationFailure:
421 raise
/home/tim/.virtualenvs/main/lib/python3.6/site-p
ackages/pymongo/network.py in command(sock, dbname, spec, slave_ok,
is_mongos, read_preference, codec_options, check, allowable_errors,
address, check_keys, listeners, max_bson_size, read_concern,
parse_write_concern_error, collation)
108 response = receive_message(sock, 1, request_id)
109 unpacked = helpers._unpack_response(
--> 110 response, codec_options=codec_options)
111
112 response_doc = unpacked['data'][0]
/home/tim/.virtualenvs/main/lib/python3.6/site-
packages/pymongo/helpers.py in _unpack_response(response, cursor_id,
codec_options)
126 # Fake the ok field if it doesn't exist.
127 error_object.setdefault("ok", 0)
--> 128 if error_object["$err"].startswith("not master"):
129 raise NotMasterError(error_object["$err"],
error_object)
130 elif error_object.get("code") == 50:
KeyError: '$err'
I believe this is an Atlas bug, I've reported it to the team. The bug is, if you fail to log in to Atlas because your username or password are incorrect, it replies in a way that makes PyMongo throw a KeyError instead of the proper OperationFailure("auth failed").
PyMongo does work with Atlas, however, if you properly format your connection string with your username and password. Make sure your username and password are URL-quoted. Substitute your username and password into this Python code:
from urllib import quote_plus
print(quote_plus('MY USERNAME'))
print(quote_plus('MY PASSWORD'))
Take the output and put it into the connection string Atlas gave you, e.g. if your username is jesse#example.com and your password is "foo:bar", put that in the first part of the string, and get the rest of the string from the Atlas control panel for your account:
mongodb://jesse%40example.com:foo%3Abar/#cluster0-shard-00-00-abc.mongodb.net:27017,cluster0-shard-00-01-abc.mongodb.net:27017,cluster0-shard-00-02-abc.mongodb.net:27017/test?ssl=true&replicaSet=Cluster0-shard-0&authSource=admin
Note how "jesse#example.com" has become "jesse%40example.com", and "foo:bar" has become "foo%3Abar".