pymysql, Access Denied Error in connecting to SQL Server - python

I am trying to load an excel file into a database for which I have to first connect to my SQL server using python. MYSQL server is already running in the background. Now when I try to run this code:
import xlrd
import pymysql
xl_data = xlrd.open_workbook('C:/Users/xxx/Desktop/xyz.xlsx')
mydb = pymysql.connect( host = 'localhost' , user ="x" , passwd = "x" , db = "")
cursor = mydb.cursor()
I get the following error:
---------------------------------------------------------------------------
OperationalError Traceback (most recent call last)
<ipython-input-2-d43a74bc83ce> in <module>
3
4 xl_data = xlrd.open_workbook('C:/Users/Sankalp/Desktop/Problem_Sample Linkedin Data.xlsx')
----> 5 mydb = pymysql.connect( host = 'localhost' , user ="x" , passwd = "x" , db = "")
6 cursor = mydb.cursor()
D:\Softwares\Anaconda\lib\site-packages\pymysql\__init__.py in Connect(*args, **kwargs)
92 """
93 from .connections import Connection
---> 94 return Connection(*args, **kwargs)
95
96 from . import connections as _orig_conn
D:\Softwares\Anaconda\lib\site-packages\pymysql\connections.py in __init__(self, host, user, password, database, port, unix_socket, charset, sql_mode, read_default_file, conv, use_unicode, client_flag, cursorclass, init_command, connect_timeout, ssl, read_default_group, compress, named_pipe, autocommit, db, passwd, local_infile, max_allowed_packet, defer_connect, auth_plugin_map, read_timeout, write_timeout, bind_address, binary_prefix, program_name, server_public_key)
323 self._sock = None
324 else:
--> 325 self.connect()
326
327 def _create_ssl_ctx(self, sslp):
D:\Softwares\Anaconda\lib\site-packages\pymysql\connections.py in connect(self, sock)
597
598 self._get_server_information()
--> 599 self._request_authentication()
600
601 if self.sql_mode is not None:
D:\Softwares\Anaconda\lib\site-packages\pymysql\connections.py in _request_authentication(self)
869 plugin_name = auth_packet.read_string()
870 if self.server_capabilities & CLIENT.PLUGIN_AUTH and plugin_name is not None:
--> 871 auth_packet = self._process_auth(plugin_name, auth_packet)
872 else:
873 # send legacy handshake
D:\Softwares\Anaconda\lib\site-packages\pymysql\connections.py in _process_auth(self, plugin_name, auth_packet)
900 return _auth.caching_sha2_password_auth(self, auth_packet)
901 elif plugin_name == b"sha256_password":
--> 902 return _auth.sha256_password_auth(self, auth_packet)
903 elif plugin_name == b"mysql_native_password":
904 data = _auth.scramble_native_password(self.password, auth_packet.read_all())
D:\Softwares\Anaconda\lib\site-packages\pymysql\_auth.py in sha256_password_auth(conn, pkt)
181 data = b''
182
--> 183 return _roundtrip(conn, data)
184
185
D:\Softwares\Anaconda\lib\site-packages\pymysql\_auth.py in _roundtrip(conn, send_data)
120 def _roundtrip(conn, send_data):
121 conn.write_packet(send_data)
--> 122 pkt = conn._read_packet()
123 pkt.check_error()
124 return pkt
D:\Softwares\Anaconda\lib\site-packages\pymysql\connections.py in _read_packet(self, packet_type)
682
683 packet = packet_type(buff, self.encoding)
--> 684 packet.check_error()
685 return packet
686
D:\Softwares\Anaconda\lib\site-packages\pymysql\protocol.py in check_error(self)
218 errno = self.read_uint16()
219 if DEBUG: print("errno =", errno)
--> 220 err.raise_mysql_exception(self._data)
221
222 def dump(self):
D:\Softwares\Anaconda\lib\site-packages\pymysql\err.py in raise_mysql_exception(data)
107 errval = data[3:].decode('utf-8', 'replace')
108 errorclass = error_map.get(errno, InternalError)
--> 109 raise errorclass(errno, errval)
**OperationalError: (1045, "Access denied for user 'x'#'localhost' (using password: YES)")**
How to rectify this error? Can anyone help?
Is it the permissions problem? Or incorrect details?
I need a heads up.
Thanks!

Related

py2neo Issue: ConnectionUnavailable: Cannot open connection to ConnectionProfile('bolt://localhost:7687')

I am trying to replicate this example on neo4j desktop:
https://stellargraph.readthedocs.io/en/stable/demos/connector/neo4j/load-cora-into-neo4j.html
I am able to reproduce everything until I get to the following line:
import py2neo
default_host = os.environ.get("STELLARGRAPH_NEO4J_HOST")
# Create the Neo4j Graph database object; the arguments can be edited to specify location and authentication
graph = py2neo.Graph(host=default_host, port=None, user=None, password=None)
I have tried the following attempts to create the neo4j database object:
#1
default_host = os.environ.get("StellarGraph")
graph = py2neo.Graph(host=default_host, port=None, user=None, password=None)
#2
uri = 'bolt://localhost:7687'
graph = Graph(uri, auth=("neo4j", "password"), port= 7687, secure=True)
#3
uri = uri = 'bolt://localhost:7687'
graph = Graph(uri, auth=("neo4j", "password"), port= 7687, secure=True, name= "StellarGraph")
However, each time I attempt this, it results in some variation of this error:
IndexError Traceback (most recent call last)
File ~/.local/lib/python3.8/site-packages/py2neo/client/__init__.py:806, in ConnectionPool.acquire(self, force_reset, can_overfill)
804 try:
805 # Plan A: select a free connection from the pool
--> 806 cx = self._free_list.popleft()
807 except IndexError:
IndexError: pop from an empty deque
During handling of the above exception, another exception occurred:
ConnectionRefusedError Traceback (most recent call last)
File ~/.local/lib/python3.8/site-packages/py2neo/wiring.py:62, in Wire.open(cls, address, timeout, keep_alive, on_broken)
61 try:
---> 62 s.connect(address)
63 except (IOError, OSError) as error:
ConnectionRefusedError: [Errno 111] Connection refused
The above exception was the direct cause of the following exception:
WireError Traceback (most recent call last)
File ~/.local/lib/python3.8/site-packages/py2neo/client/bolt.py:355, in Bolt.open(cls, profile, user_agent, on_release, on_broken)
354 try:
--> 355 wire = cls._connect(profile, on_broken=on_broken)
356 protocol_version = cls._handshake(wire)
File ~/.local/lib/python3.8/site-packages/py2neo/client/bolt.py:369, in Bolt._connect(cls, profile, on_broken)
368 log.debug("[#%04X] C: (Dialing <%s>)", 0, profile.address)
--> 369 wire = Wire.open(profile.address, keep_alive=True, on_broken=on_broken)
370 local_port = wire.local_address.port_number
File ~/.local/lib/python3.8/site-packages/py2neo/wiring.py:64, in Wire.open(cls, address, timeout, keep_alive, on_broken)
63 except (IOError, OSError) as error:
---> 64 raise_from(WireError("Cannot connect to %r" % (address,)), error)
65 return cls(s, on_broken=on_broken)
File <string>:3, in raise_from(value, from_value)
WireError: Cannot connect to IPv4Address(('localhost', 7687))
The above exception was the direct cause of the following exception:
ConnectionUnavailable Traceback (most recent call last)
/home/myname/Project1/graph_import.ipynb Cell 13' in <cell line: 2>()
1 uri = 'bolt://localhost:7687'
----> 2 graph = Graph(uri, auth=("neo4j", "mypass"), port= 7687, secure=True, name= "StellarGraph")
File ~/.local/lib/python3.8/site-packages/py2neo/database.py:288, in Graph.__init__(self, profile, name, **settings)
287 def __init__(self, profile=None, name=None, **settings):
--> 288 self.service = GraphService(profile, **settings)
289 self.__name__ = name
290 self.schema = Schema(self)
File ~/.local/lib/python3.8/site-packages/py2neo/database.py:119, in GraphService.__init__(self, profile, **settings)
116 if connector_settings["init_size"] is None and not profile.routing:
117 # Ensures credentials are checked on construction
118 connector_settings["init_size"] = 1
--> 119 self._connector = Connector(profile, **connector_settings)
120 self._graphs = {}
File ~/.local/lib/python3.8/site-packages/py2neo/client/__init__.py:960, in Connector.__init__(self, profile, user_agent, init_size, max_size, max_age, routing_refresh_ttl)
958 else:
959 self._router = None
--> 960 self._add_pools(*self._initial_routers)
File ~/.local/lib/python3.8/site-packages/py2neo/client/__init__.py:982, in Connector._add_pools(self, *profiles)
980 continue
981 log.debug("Adding connection pool for profile %r", profile)
--> 982 pool = ConnectionPool.open(
983 profile,
984 user_agent=self._user_agent,
985 init_size=self._init_size,
986 max_size=self._max_size,
987 max_age=self._max_age,
988 on_broken=self._on_broken)
989 self._pools[profile] = pool
File ~/.local/lib/python3.8/site-packages/py2neo/client/__init__.py:649, in ConnectionPool.open(cls, profile, user_agent, init_size, max_size, max_age, on_broken)
627 """ Create a new connection pool, with an option to seed one
628 or more initial connections.
629
(...)
646 scheme
647 """
648 pool = cls(profile, user_agent, max_size, max_age, on_broken)
--> 649 seeds = [pool.acquire() for _ in range(init_size or cls.default_init_size)]
650 for seed in seeds:
651 seed.release()
File ~/.local/lib/python3.8/site-packages/py2neo/client/__init__.py:649, in <listcomp>(.0)
627 """ Create a new connection pool, with an option to seed one
628 or more initial connections.
629
(...)
646 scheme
647 """
648 pool = cls(profile, user_agent, max_size, max_age, on_broken)
--> 649 seeds = [pool.acquire() for _ in range(init_size or cls.default_init_size)]
650 for seed in seeds:
651 seed.release()
File ~/.local/lib/python3.8/site-packages/py2neo/client/__init__.py:813, in ConnectionPool.acquire(self, force_reset, can_overfill)
807 except IndexError:
808 if self._has_capacity() or can_overfill:
809 # Plan B: if the pool isn't full, open
810 # a new connection. This may raise a
811 # ConnectionUnavailable exception, which
812 # should bubble up to the caller.
--> 813 cx = self._connect()
814 if cx.supports_multi():
815 self._supports_multi = True
File ~/.local/lib/python3.8/site-packages/py2neo/client/__init__.py:764, in ConnectionPool._connect(self)
761 def _connect(self):
762 """ Open and return a new connection.
763 """
--> 764 cx = Connection.open(self.profile, user_agent=self.user_agent,
765 on_release=lambda c: self.release(c),
766 on_broken=lambda msg: self.__on_broken(msg))
767 self._server_agent = cx.server_agent
768 return cx
File ~/.local/lib/python3.8/site-packages/py2neo/client/__init__.py:174, in Connection.open(cls, profile, user_agent, on_release, on_broken)
172 if profile.protocol == "bolt":
173 from py2neo.client.bolt import Bolt
--> 174 return Bolt.open(profile, user_agent=user_agent,
175 on_release=on_release, on_broken=on_broken)
176 elif profile.protocol == "http":
177 from py2neo.client.http import HTTP
File ~/.local/lib/python3.8/site-packages/py2neo/client/bolt.py:364, in Bolt.open(cls, profile, user_agent, on_release, on_broken)
362 return bolt
363 except (TypeError, WireError) as error:
--> 364 raise_from(ConnectionUnavailable("Cannot open connection to %r" % profile), error)
File <string>:3, in raise_from(value, from_value)
ConnectionUnavailable: Cannot open connection to ConnectionProfile('bolt+s://localhost:7687')
I have also tried variations on this fix as well, but had the same error:
ISSUE IN CONNECTING py2neo v4 to my neo4j server
I appreciate any help resolving this issue. Thanks!
I was able to resolve this with the following syntax:
graph = Graph('neo4j://localhost:7687', user="neo4j", password="999")
However, I am now having an issue with the following block:
empty_db_query = """
MATCH(n) DETACH
DELETE(n)
"""
tx = graph.begin(autocommit=True)
tx.evaluate(empty_db_query)
For the newer version of py2neo, the graph.begin argument takes readonly = F instead of autocommit = True, but in any case, I have this error now:
ServiceUnavailable Traceback (most recent call last)
/home/myname/Project1/graph_import.ipynb Cell 13' in <cell line: 6>()
1 empty_db_query = """
2 MATCH(n) DETACH
3 DELETE(n)
4 """
----> 6 tx = graph.begin(readonly=False)
7 tx.evaluate(empty_db_query)
File ~/.local/lib/python3.8/site-packages/py2neo/database.py:351, in Graph.begin(self, readonly)
340 def begin(self, readonly=False,
341 # after=None, metadata=None, timeout=None
342 ):
343 """ Begin a new :class:`~py2neo.Transaction`.
344
345 :param readonly: if :py:const:`True`, will begin a readonly
(...)
349 removed. Use the 'auto' method instead.*
350 """
--> 351 return Transaction(self, autocommit=False, readonly=readonly,
352 # after, metadata, timeout
353 )
File ~/.local/lib/python3.8/site-packages/py2neo/database.py:915, in Transaction.__init__(self, graph, autocommit, readonly)
913 self._ref = None
914 else:
--> 915 self._ref = self._connector.begin(self.graph.name, readonly=readonly,
916 # after, metadata, timeout
917 )
918 self._readonly = readonly
919 self._closed = False
File ~/.local/lib/python3.8/site-packages/py2neo/client/__init__.py:1357, in Connector.begin(self, graph_name, readonly)
1345 def begin(self, graph_name, readonly=False,
1346 # after=None, metadata=None, timeout=None
1347 ):
1348 """ Begin a new explicit transaction.
1349
1350 :param graph_name:
(...)
1355 :raises Failure: if the server signals a failure condition
1356 """
-> 1357 cx = self._acquire(graph_name)
1358 try:
1359 return cx.begin(graph_name, readonly=readonly,
1360 # after=after, metadata=metadata, timeout=timeout
1361 )
File ~/.local/lib/python3.8/site-packages/py2neo/client/__init__.py:1111, in Connector._acquire(self, graph_name, readonly)
1109 return self._acquire_ro(graph_name)
1110 else:
-> 1111 return self._acquire_rw(graph_name)
File ~/.local/lib/python3.8/site-packages/py2neo/client/__init__.py:1203, in Connector._acquire_rw(self, graph_name)
1199 # TODO: exit immediately if the server/cluster is in readonly mode
1201 while True:
-> 1203 ro_profiles, rw_profiles = self._get_profiles(graph_name, readonly=False)
1204 if rw_profiles:
1205 # There is at least one writer, so collect the pools
1206 # for those writers. In all implementations to date,
1207 # a Neo4j cluster will only ever contain at most one
1208 # writer (per database). But this algorithm should
1209 # still survive if that changes.
1210 pools = [pool for profile, pool in list(self._pools.items())
1211 if profile in rw_profiles]
File ~/.local/lib/python3.8/site-packages/py2neo/client/__init__.py:1016, in Connector._get_profiles(self, graph_name, readonly)
1014 rt.wait_until_updated()
1015 else:
-> 1016 self.refresh_routing_table(graph_name)
File ~/.local/lib/python3.8/site-packages/py2neo/client/__init__.py:1064, in Connector.refresh_routing_table(self, graph_name)
1062 cx.release()
1063 else:
-> 1064 raise ServiceUnavailable("Cannot connect to any known routers")
1065 finally:
1066 rt.set_not_updating()
ServiceUnavailable: Cannot connect to any known routers
Appreciate any help in resolving this. Thank you!

Python SQLAlchemy create_Engine error "Connection time out"

I have read quite a few posts around this but couldn't find a resolution. I am able to read from Postgres but getting a connection time out error when I use create_engine string. I know the credentials are correct because I can read from PostGres.
My aim is to write dataframe (df) to postgres directly from Python and going forward keep appending the rows to that table. I have changed the password and the IP address in the example below. Not sure if this is relevant but I am using Tunnel in Putty to connect to remote server and launch Jupyter notebook and then trying to write to Postgres DB.
My code
from sqlalchemy import create_engine
import psycopg2
import io
password = 'Pas#$tk$#a' #This is just an example. MY password has special characters
engine = create_engine('postgresql+psycopg2://admin:password#1.12.11.1:5432/DEV_Sach_D', connect_args={'sslmode':'require'}, echo=True).connect() # I have changed the IP here for security reasons
conn = engine.connect()
df.to_sql('py_stg_test', con=conn, if_exists='replace',index=False)
Error I get
---------------------------------------------------------------------------
OperationalError Traceback (most recent call last)
/usr/local/lib/python3.6/dist-packages/sqlalchemy/engine/base.py in _wrap_pool_connect(self, fn, connection)
3211 try:
-> 3212 return fn()
3213 except dialect.dbapi.Error as e:
/usr/local/lib/python3.6/dist-packages/sqlalchemy/pool/base.py in connect(self)
306 """
--> 307 return _ConnectionFairy._checkout(self)
308
/usr/local/lib/python3.6/dist-packages/sqlalchemy/pool/base.py in _checkout(cls, pool, threadconns, fairy)
766 if not fairy:
--> 767 fairy = _ConnectionRecord.checkout(pool)
768
/usr/local/lib/python3.6/dist-packages/sqlalchemy/pool/base.py in checkout(cls, pool)
424 def checkout(cls, pool):
--> 425 rec = pool._do_get()
426 try:
/usr/local/lib/python3.6/dist-packages/sqlalchemy/pool/impl.py in _do_get(self)
145 with util.safe_reraise():
--> 146 self._dec_overflow()
147 else:
/usr/local/lib/python3.6/dist-packages/sqlalchemy/util/langhelpers.py in __exit__(self, type_, value, traceback)
71 exc_value,
---> 72 with_traceback=exc_tb,
73 )
/usr/local/lib/python3.6/dist-packages/sqlalchemy/util/compat.py in raise_(***failed resolving arguments***)
206 try:
--> 207 raise exception
208 finally:
/usr/local/lib/python3.6/dist-packages/sqlalchemy/pool/impl.py in _do_get(self)
142 try:
--> 143 return self._create_connection()
144 except:
/usr/local/lib/python3.6/dist-packages/sqlalchemy/pool/base.py in _create_connection(self)
252
--> 253 return _ConnectionRecord(self)
254
/usr/local/lib/python3.6/dist-packages/sqlalchemy/pool/base.py in __init__(self, pool, connect)
367 if connect:
--> 368 self.__connect()
369 self.finalize_callback = deque()
/usr/local/lib/python3.6/dist-packages/sqlalchemy/pool/base.py in __connect(self)
610 with util.safe_reraise():
--> 611 pool.logger.debug("Error on connect(): %s", e)
612 else:
/usr/local/lib/python3.6/dist-packages/sqlalchemy/util/langhelpers.py in __exit__(self, type_, value, traceback)
71 exc_value,
---> 72 with_traceback=exc_tb,
73 )
/usr/local/lib/python3.6/dist-packages/sqlalchemy/util/compat.py in raise_(***failed resolving arguments***)
206 try:
--> 207 raise exception
208 finally:
/usr/local/lib/python3.6/dist-packages/sqlalchemy/pool/base.py in __connect(self)
604 self.starttime = time.time()
--> 605 connection = pool._invoke_creator(self)
606 pool.logger.debug("Created new connection %r", connection)
/usr/local/lib/python3.6/dist-packages/sqlalchemy/engine/create.py in connect(connection_record)
577 return connection
--> 578 return dialect.connect(*cargs, **cparams)
579
/usr/local/lib/python3.6/dist-packages/sqlalchemy/engine/default.py in connect(self, *cargs, **cparams)
583 # inherits the docstring from interfaces.Dialect.connect
--> 584 return self.dbapi.connect(*cargs, **cparams)
585
/usr/local/lib/python3.6/dist-packages/psycopg2/__init__.py in connect(dsn, connection_factory, cursor_factory, **kwargs)
121 dsn = _ext.make_dsn(dsn, **kwargs)
--> 122 conn = _connect(dsn, connection_factory=connection_factory, **kwasync)
123 if cursor_factory is not None:
OperationalError: could not connect to server: Connection timed out
Is the server running on host "1.12.11.1" and accepting
TCP/IP connections on port 5432?
The above exception was the direct cause of the following exception:
OperationalError Traceback (most recent call last)
<ipython-input-5-28cdb6a0c387> in <module>()
4 password = Pas#$tk$#a'
5
----> 6 engine = create_engine('postgresql+psycopg2://libertypassageadminuser#libertypassage:password#1.12.11.1:5432/Dev_Sach_D, connect_args={'sslmode':'require'}, echo=True).connect()
7 conn = engine.connect()
8 df.to_sql('py_stg_test', con=conn, if_exists='replace',index=False)
/usr/local/lib/python3.6/dist-packages/sqlalchemy/engine/base.py in connect(self, close_with_result)
3164 """
3165
-> 3166 return self._connection_cls(self, close_with_result=close_with_result)
3167
3168 #util.deprecated(
/usr/local/lib/python3.6/dist-packages/sqlalchemy/engine/base.py in __init__(self, engine, connection, close_with_result, _branch_from, _execution_options, _dispatch, _has_events, _allow_revalidate)
94 connection
95 if connection is not None
---> 96 else engine.raw_connection()
97 )
98
/usr/local/lib/python3.6/dist-packages/sqlalchemy/engine/base.py in raw_connection(self, _connection)
3243
3244 """
-> 3245 return self._wrap_pool_connect(self.pool.connect, _connection)
3246
3247
/usr/local/lib/python3.6/dist-packages/sqlalchemy/engine/base.py in _wrap_pool_connect(self, fn, connection)
3214 if connection is None:
3215 Connection._handle_dbapi_exception_noconnection(
-> 3216 e, dialect, self
3217 )
3218 else:
/usr/local/lib/python3.6/dist-packages/sqlalchemy/engine/base.py in _handle_dbapi_exception_noconnection(cls, e, dialect, engine)
2068 elif should_wrap:
2069 util.raise_(
-> 2070 sqlalchemy_exception, with_traceback=exc_info[2], from_=e
2071 )
2072 else:
/usr/local/lib/python3.6/dist-packages/sqlalchemy/util/compat.py in raise_(***failed resolving arguments***)
205
206 try:
--> 207 raise exception
208 finally:
209 # credit to
/usr/local/lib/python3.6/dist-packages/sqlalchemy/engine/base.py in _wrap_pool_connect(self, fn, connection)
3210 dialect = self.dialect
3211 try:
-> 3212 return fn()
3213 except dialect.dbapi.Error as e:
3214 if connection is None:
/usr/local/lib/python3.6/dist-packages/sqlalchemy/pool/base.py in connect(self)
305
306 """
--> 307 return _ConnectionFairy._checkout(self)
308
309 def _return_conn(self, record):
/usr/local/lib/python3.6/dist-packages/sqlalchemy/pool/base.py in _checkout(cls, pool, threadconns, fairy)
765 def _checkout(cls, pool, threadconns=None, fairy=None):
766 if not fairy:
--> 767 fairy = _ConnectionRecord.checkout(pool)
768
769 fairy._pool = pool
/usr/local/lib/python3.6/dist-packages/sqlalchemy/pool/base.py in checkout(cls, pool)
423 #classmethod
424 def checkout(cls, pool):
--> 425 rec = pool._do_get()
426 try:
427 dbapi_connection = rec.get_connection()
/usr/local/lib/python3.6/dist-packages/sqlalchemy/pool/impl.py in _do_get(self)
144 except:
145 with util.safe_reraise():
--> 146 self._dec_overflow()
147 else:
148 return self._do_get()
/usr/local/lib/python3.6/dist-packages/sqlalchemy/util/langhelpers.py in __exit__(self, type_, value, traceback)
70 compat.raise_(
71 exc_value,
---> 72 with_traceback=exc_tb,
73 )
74 else:
/usr/local/lib/python3.6/dist-packages/sqlalchemy/util/compat.py in raise_(***failed resolving arguments***)
205
206 try:
--> 207 raise exception
208 finally:
209 # credit to
/usr/local/lib/python3.6/dist-packages/sqlalchemy/pool/impl.py in _do_get(self)
141 if self._inc_overflow():
142 try:
--> 143 return self._create_connection()
144 except:
145 with util.safe_reraise():
/usr/local/lib/python3.6/dist-packages/sqlalchemy/pool/base.py in _create_connection(self)
251 """Called by subclasses to create a new ConnectionRecord."""
252
--> 253 return _ConnectionRecord(self)
254
255 def _invalidate(self, connection, exception=None, _checkin=True):
/usr/local/lib/python3.6/dist-packages/sqlalchemy/pool/base.py in __init__(self, pool, connect)
366 self.__pool = pool
367 if connect:
--> 368 self.__connect()
369 self.finalize_callback = deque()
370
/usr/local/lib/python3.6/dist-packages/sqlalchemy/pool/base.py in __connect(self)
609 except Exception as e:
610 with util.safe_reraise():
--> 611 pool.logger.debug("Error on connect(): %s", e)
612 else:
613 # in SQLAlchemy 1.4 the first_connect event is not used by
/usr/local/lib/python3.6/dist-packages/sqlalchemy/util/langhelpers.py in __exit__(self, type_, value, traceback)
70 compat.raise_(
71 exc_value,
---> 72 with_traceback=exc_tb,
73 )
74 else:
/usr/local/lib/python3.6/dist-packages/sqlalchemy/util/compat.py in raise_(***failed resolving arguments***)
205
206 try:
--> 207 raise exception
208 finally:
209 # credit to
/usr/local/lib/python3.6/dist-packages/sqlalchemy/pool/base.py in __connect(self)
603 try:
604 self.starttime = time.time()
--> 605 connection = pool._invoke_creator(self)
606 pool.logger.debug("Created new connection %r", connection)
607 self.connection = connection
/usr/local/lib/python3.6/dist-packages/sqlalchemy/engine/create.py in connect(connection_record)
576 if connection is not None:
577 return connection
--> 578 return dialect.connect(*cargs, **cparams)
579
580 creator = pop_kwarg("creator", connect)
/usr/local/lib/python3.6/dist-packages/sqlalchemy/engine/default.py in connect(self, *cargs, **cparams)
582 def connect(self, *cargs, **cparams):
583 # inherits the docstring from interfaces.Dialect.connect
--> 584 return self.dbapi.connect(*cargs, **cparams)
585
586 def create_connect_args(self, url):
/usr/local/lib/python3.6/dist-packages/psycopg2/__init__.py in connect(dsn, connection_factory, cursor_factory, **kwargs)
120
121 dsn = _ext.make_dsn(dsn, **kwargs)
--> 122 conn = _connect(dsn, connection_factory=connection_factory, **kwasync)
123 if cursor_factory is not None:
124 conn.cursor_factory = cursor_factory
OperationalError: (psycopg2.OperationalError) could not connect to server: Connection timed out
Is the server running on host "1.12.11.1" and accepting
TCP/IP connections on port 5432?
(Background on this error at: https://sqlalche.me/e/14/e3q8)
Very silly of me. I was using the wrong IP in the string. Instead of using POSTGRES DB name I was using the IP of the machine itself.
Thanks to all who answered.

smtplib/exchangelib connection to the email server error

I used to use auto discover to retrieve some emails from my inbox using exchangelib. However, last week they updated my email and auto discover is not working anymore. Now I am testing the connection with SMTPLIB using SMTP_SSL
server = smtplib.SMTP_SSL('xxxxxxxx.com',port=25)
but I get
ConnectionRefusedError: [WinError 10061]
c:\python37\lib\smtplib.py in __init__(self, host, port, local_hostname, keyfile, certfile, timeout, source_address, context)
1029 self.context = context
1030 SMTP.__init__(self, host, port, local_hostname, timeout,
-> 1031 source_address)
1032
1033 def _get_socket(self, host, port, timeout):
c:\python37\lib\smtplib.py in __init__(self, host, port, local_hostname, timeout, source_address)
249
250 if host:
--> 251 (code, msg) = self.connect(host, port)
252 if code != 220:
253 self.close()
c:\python37\lib\smtplib.py in connect(self, host, port, source_address)
334 if self.debuglevel > 0:
335 self._print_debug('connect:', (host, port))
--> 336 self.sock = self._get_socket(host, port, self.timeout)
337 self.file = None
338 (code, msg) = self.getreply()
c:\python37\lib\smtplib.py in _get_socket(self, host, port, timeout)
1035 self._print_debug('connect:', (host, port))
1036 new_socket = socket.create_connection((host, port), timeout,
-> 1037 self.source_address)
1038 new_socket = self.context.wrap_socket(new_socket,
1039 server_hostname=self._host)
c:\python37\lib\socket.py in create_connection(address, timeout, source_address)
725
726 if err is not None:
--> 727 raise err
728 else:
729 raise error("getaddrinfo returns an empty list")
c:\python37\lib\socket.py in create_connection(address, timeout, source_address)
714 if source_address:
715 sock.bind(source_address)
--> 716 sock.connect(sa)
717 # Break explicitly a reference cycle
718 err = None
ConnectionRefusedError: [WinError 10061] No connection could be made because the target machine actively refused it
When I used auto discover on exchangelib I got
AutoDiscoverFailed Traceback (most recent call last)
<ipython-input-48-c03f25ef8bb7> in <module>
5 autodiscover=True,
6 # config=self.config,
----> 7 access_type=DELEGATE
8 )
c:\python37\lib\site-packages\exchangelib\account.py in __init__(self, primary_smtp_address, fullname, access_type, autodiscover, credentials, config, locale, default_timezone)
78 raise AttributeError('config is ignored when autodiscover is active')
79 self.primary_smtp_address, self.protocol = discover(email=self.primary_smtp_address,
---> 80 credentials=credentials)
81 else:
82 if not config:
c:\python37\lib\site-packages\exchangelib\autodiscover.py in discover(email, credentials)
221 # We fell out of the with statement, so either cache was filled by someone else, or autodiscover redirected us to
222 # another email address. Start over after releasing the lock.
--> 223 return discover(email=email, credentials=credentials)
224
225
c:\python37\lib\site-packages\exchangelib\autodiscover.py in discover(email, credentials)
211 try:
212 # This eventually fills the cache in _autodiscover_hostname
--> 213 return _try_autodiscover(hostname=domain, credentials=credentials, email=email)
214 except AutoDiscoverRedirect as e:
215 if email.lower() == e.redirect_email.lower():
c:\python37\lib\site-packages\exchangelib\autodiscover.py in _try_autodiscover(hostname, credentials, email)
259 ), None)
260 log.info('autodiscover.%s redirected us to %s', hostname, e.server)
--> 261 return _try_autodiscover(e.server, credentials, email)
262 except AutoDiscoverFailed as e:
263 log.info('Autodiscover on autodiscover.%s (no TLS) failed (%s). Trying DNS records', hostname, e)
c:\python37\lib\site-packages\exchangelib\autodiscover.py in _try_autodiscover(hostname, credentials, email)
277 return _try_autodiscover(hostname=hostname_from_dns, credentials=credentials, email=email)
278 except AutoDiscoverFailed:
--> 279 raise_from(AutoDiscoverFailed('All steps in the autodiscover protocol failed'), None)
280
281
c:\python37\lib\site-packages\future\utils\__init__.py in raise_from(exc, cause)
398 myglobals['__python_future_raise_from_cause'] = cause
399 execstr = "raise __python_future_raise_from_exc from __python_future_raise_from_cause"
--> 400 exec(execstr, myglobals, mylocals)
401
402 def raise_(tp, value=None, tb=None):
c:\python37\lib\site-packages\exchangelib\autodiscover.py in <module>
AutoDiscoverFailed: All steps in the autodiscover protocol failed
I don't know how to deal with these exceptions

Running HIve query through impala.dbapi fails when embedding "Add Jar"

I'm launching a hive query through python impala.dbapi, which works nicely as following:
import os
import pandas as pd
from impala.dbapi import connect
from impala.util import as_pandas
from datetime import datetime
user=os.environ['HIVE_USER']
password=os.environ['HIVE_PASSWORD']
up_to_date_query = '''
select * from dejavu.tracking_events limit 1
'''
conn = connect(host='ecprdbhdp02-clientgw.kenshooprd.local', port=10000,
user=user,
password=password,
auth_mechanism='PLAIN')
cursor = conn.cursor()
cursor.execute(up_to_date_query)
df = as_pandas(cursor)
df.head()
But when i'm adding the following "Add Jar" clause as following:
up_to_date_query = '''
ADD JAR hdfs://BICluster/user/yossis/udfs/hive-udf-0.1-SNAPSHOT.jar;
select * from dejavu.tracking_events limit 1
'''
I'm getting the following error:
---------------------------------------------------------------------------
HiveServer2Error Traceback (most recent call last)
<ipython-input-10-1e512abcc69e> in <module>()
4 auth_mechanism='PLAIN')
5 cursor = conn.cursor()
----> 6 cursor.execute(up_to_date_query)
7 df = as_pandas(cursor)
8 df.head()
/home/yehoshaphats/anaconda/lib/python2.7/site-packages/impala/hiveserver2.pyc in execute(self, operation, parameters, configuration)
300 # PEP 249
301 self.execute_async(operation, parameters=parameters,
--> 302 configuration=configuration)
303 log.debug('Waiting for query to finish')
304 self._wait_to_finish() # make execute synchronous
/home/yehoshaphats/anaconda/lib/python2.7/site-packages/impala/hiveserver2.pyc in execute_async(self, operation, parameters, configuration)
341 self._last_operation = op
342
--> 343 self._execute_async(op)
344
345 def _debug_log_state(self):
/home/yehoshaphats/anaconda/lib/python2.7/site-packages/impala/hiveserver2.pyc in _execute_async(self, operation_fn)
360 self._reset_state()
361 self._debug_log_state()
--> 362 operation_fn()
363 self._last_operation_active = True
364 self._debug_log_state()
/home/yehoshaphats/anaconda/lib/python2.7/site-packages/impala/hiveserver2.pyc in op()
338 op = self.session.execute(self._last_operation_string,
339 configuration,
--> 340 async=True)
341 self._last_operation = op
342
/home/yehoshaphats/anaconda/lib/python2.7/site-packages/impala/hiveserver2.pyc in execute(self, statement, configuration, async)
1025 confOverlay=configuration,
1026 runAsync=async)
-> 1027 return self._operation('ExecuteStatement', req)
1028
1029 def get_databases(self, schema='.*'):
/home/yehoshaphats/anaconda/lib/python2.7/site-packages/impala/hiveserver2.pyc in _operation(self, kind, request)
955
956 def _operation(self, kind, request):
--> 957 resp = self._rpc(kind, request)
958 return self._get_operation(resp.operationHandle)
959
/home/yehoshaphats/anaconda/lib/python2.7/site-packages/impala/hiveserver2.pyc in _rpc(self, func_name, request)
923 response = self._execute(func_name, request)
924 self._log_response(func_name, response)
--> 925 err_if_rpc_not_ok(response)
926 return response
927
/home/yehoshaphats/anaconda/lib/python2.7/site-packages/impala/hiveserver2.pyc in err_if_rpc_not_ok(resp)
702 resp.status.statusCode != TStatusCode.SUCCESS_WITH_INFO_STATUS and
703 resp.status.statusCode != TStatusCode.STILL_EXECUTING_STATUS):
--> 704 raise HiveServer2Error(resp.status.errorMessage)
705
706
HiveServer2Error: Error while processing statement: null
Notice that the query works properly when running it directly in Hive (through the hue console).
After searching for similar questions, it seems like no one asked for exactly the same problem :(
Thanks in advance!
It seems like the ; separator causes trouble, I've just separated it as following:
cursor.execute('ADD JAR hdfs://BICluster/user/yossis/udfs/hive-udf-0.1-SNAPSHOT.jar')
up_to_date_query = '''
select * from dejavu.tracking_events limit 1
'''
cursor.execute(up_to_date_query)

Pymongo KeyError: '$err' MongoDB Atlas

I'm following this https://www.mongodb.com/blog/post/getting-started-with-python-and-mongodb introductory tutorial. I can connect to the cluster fine with mongo shell, but not with pymongo (Python: 3.6.1, Pymongo 3.4.0). Pymongo works okay with a local mongodb. What is the problem? Below is the exception I get:
----------------------------------------------------------------------
-----
KeyError Traceback (most recent call
last)
<ipython-input-22-1c9d47341338> in <module>()
----> 1 server_status_result = db.command('serverStatus')
2 pprint(server_status_result)
/home/tim/.virtualenvs/main/lib/python3.6/site-
packages/pymongo/database.py in command(self, command, value, check,
allowable_errors, read_preference, codec_options, **kwargs)
489 """
490 client = self.__client
--> 491 with client._socket_for_reads(read_preference) as
(sock_info, slave_ok):
492 return self._command(sock_info, command, slave_ok,
value,
493 check, allowable_errors,
read_preference,
/usr/lib/python3.6/contextlib.py in __enter__(self)
80 def __enter__(self):
81 try:
---> 82 return next(self.gen)
83 except StopIteration:
84 raise RuntimeError("generator didn't yield") from None
/home/tim/.virtualenvs/main/lib/python3.6/site-
packages/pymongo/mongo_client.py in _socket_for_reads(self,
read_preference)
857 topology = self._get_topology()
858 single = topology.description.topology_type ==
TOPOLOGY_TYPE.Single
--> 859 with self._get_socket(read_preference) as sock_info:
860 slave_ok = (single and not sock_info.is_mongos) or (
861 preference != ReadPreference.PRIMARY)
/usr/lib/python3.6/contextlib.py in __enter__(self)
80 def __enter__(self):
81 try:
---> 82 return next(self.gen)
83 except StopIteration:
84 raise RuntimeError("generator didn't yield") from None
/home/tim/.virtualenvs/main/lib/python3.6/site-
packages/pymongo/mongo_client.py in _get_socket(self, selector)
823 server = self._get_topology().select_server(selector)
824 try:
--> 825 with server.get_socket(self.__all_credentials) as
sock_info:
826 yield sock_info
827 except NetworkTimeout:
/usr/lib/python3.6/contextlib.py in __enter__(self)
80 def __enter__(self):
81 try:
---> 82 return next(self.gen)
83 except StopIteration:
84 raise RuntimeError("generator didn't yield") from None
/home/tim/.virtualenvs/main/lib/python3.6/site-
packages/pymongo/server.py in get_socket(self, all_credentials,
checkout)
166 #contextlib.contextmanager
167 def get_socket(self, all_credentials, checkout=False):
--> 168 with self.pool.get_socket(all_credentials, checkout)
as sock_info:
169 yield sock_info
170
/usr/lib/python3.6/contextlib.py in __enter__(self)
80 def __enter__(self):
81 try:
---> 82 return next(self.gen)
83 except StopIteration:
84 raise RuntimeError("generator didn't yield") from None
/home/tim/.virtualenvs/main/lib/python3.6/site-
packages/pymongo/pool.py in get_socket(self, all_credentials,
checkout)
790 sock_info = self._get_socket_no_auth()
791 try:
--> 792 sock_info.check_auth(all_credentials)
793 yield sock_info
794 except:
/home/tim/.virtualenvs/main/lib/python3.6/site-
packages/pymongo/pool.py in check_auth(self, all_credentials)
510
511 for credentials in cached - authset:
--> 512 auth.authenticate(credentials, self)
513 self.authset.add(credentials)
514
/home/tim/.virtualenvs/main/lib/python3.6/site-
packages/pymongo/auth.py in authenticate(credentials, sock_info)
468 mechanism = credentials.mechanism
469 auth_func = _AUTH_MAP.get(mechanism)
--> 470 auth_func(credentials, sock_info)
471
472
/home/tim/.virtualenvs/main/lib/python3.6/site-
packages/pymongo/auth.py in _authenticate_default(credentials,
sock_info)
448 def _authenticate_default(credentials, sock_info):
449 if sock_info.max_wire_version >= 3:
--> 450 return _authenticate_scram_sha1(credentials,
sock_info)
451 else:
452 return _authenticate_mongo_cr(credentials, sock_info)
/home/tim/.virtualenvs/main/lib/python3.6/site-
packages/pymongo/auth.py in _authenticate_scram_sha1(credentials,
sock_info)
227 ('conversationId', res['conversationId']),
228 ('payload', Binary(client_final))])
--> 229 res = sock_info.command(source, cmd)
230
231 parsed = _parse_scram_response(res['payload'])
/home/tim/.virtualenvs/main/lib/python3.6/site-
packages/pymongo/pool.py in command(self, dbname, spec, slave_ok,
read_preference, codec_options, check, allowable_errors, check_keys,
read_concern, write_concern, parse_write_concern_error, collation)
422 # Catch socket.error, KeyboardInterrupt, etc. and close
ourselves.
423 except BaseException as error:
--> 424 self._raise_connection_failure(error)
425
426 def send_message(self, message, max_doc_size):
/home/tim/.virtualenvs/main/lib/python3.6/site-
packages/pymongo/pool.py in _raise_connection_failure(self, error)
550 _raise_connection_failure(self.address, error)
551 else:
--> 552 raise error
553
554 def __eq__(self, other):
/home/tim/.virtualenvs/main/lib/python3.6/site-
packages/pymongo/pool.py in command(self, dbname, spec, slave_ok,
read_preference, codec_options, check, allowable_errors, check_keys,
read_concern, write_concern, parse_write_concern_error, collation)
417 read_concern,
418
parse_write_concern_error=parse_write_concern_error,
--> 419 collation=collation)
420 except OperationFailure:
421 raise
/home/tim/.virtualenvs/main/lib/python3.6/site-p
ackages/pymongo/network.py in command(sock, dbname, spec, slave_ok,
is_mongos, read_preference, codec_options, check, allowable_errors,
address, check_keys, listeners, max_bson_size, read_concern,
parse_write_concern_error, collation)
108 response = receive_message(sock, 1, request_id)
109 unpacked = helpers._unpack_response(
--> 110 response, codec_options=codec_options)
111
112 response_doc = unpacked['data'][0]
/home/tim/.virtualenvs/main/lib/python3.6/site-
packages/pymongo/helpers.py in _unpack_response(response, cursor_id,
codec_options)
126 # Fake the ok field if it doesn't exist.
127 error_object.setdefault("ok", 0)
--> 128 if error_object["$err"].startswith("not master"):
129 raise NotMasterError(error_object["$err"],
error_object)
130 elif error_object.get("code") == 50:
KeyError: '$err'
I believe this is an Atlas bug, I've reported it to the team. The bug is, if you fail to log in to Atlas because your username or password are incorrect, it replies in a way that makes PyMongo throw a KeyError instead of the proper OperationFailure("auth failed").
PyMongo does work with Atlas, however, if you properly format your connection string with your username and password. Make sure your username and password are URL-quoted. Substitute your username and password into this Python code:
from urllib import quote_plus
print(quote_plus('MY USERNAME'))
print(quote_plus('MY PASSWORD'))
Take the output and put it into the connection string Atlas gave you, e.g. if your username is jesse#example.com and your password is "foo:bar", put that in the first part of the string, and get the rest of the string from the Atlas control panel for your account:
mongodb://jesse%40example.com:foo%3Abar/#cluster0-shard-00-00-abc.mongodb.net:27017,cluster0-shard-00-01-abc.mongodb.net:27017,cluster0-shard-00-02-abc.mongodb.net:27017/test?ssl=true&replicaSet=Cluster0-shard-0&authSource=admin
Note how "jesse#example.com" has become "jesse%40example.com", and "foo:bar" has become "foo%3Abar".

Categories