I'm following this https://www.mongodb.com/blog/post/getting-started-with-python-and-mongodb introductory tutorial. I can connect to the cluster fine with mongo shell, but not with pymongo (Python: 3.6.1, Pymongo 3.4.0). Pymongo works okay with a local mongodb. What is the problem? Below is the exception I get:
----------------------------------------------------------------------
-----
KeyError Traceback (most recent call
last)
<ipython-input-22-1c9d47341338> in <module>()
----> 1 server_status_result = db.command('serverStatus')
2 pprint(server_status_result)
/home/tim/.virtualenvs/main/lib/python3.6/site-
packages/pymongo/database.py in command(self, command, value, check,
allowable_errors, read_preference, codec_options, **kwargs)
489 """
490 client = self.__client
--> 491 with client._socket_for_reads(read_preference) as
(sock_info, slave_ok):
492 return self._command(sock_info, command, slave_ok,
value,
493 check, allowable_errors,
read_preference,
/usr/lib/python3.6/contextlib.py in __enter__(self)
80 def __enter__(self):
81 try:
---> 82 return next(self.gen)
83 except StopIteration:
84 raise RuntimeError("generator didn't yield") from None
/home/tim/.virtualenvs/main/lib/python3.6/site-
packages/pymongo/mongo_client.py in _socket_for_reads(self,
read_preference)
857 topology = self._get_topology()
858 single = topology.description.topology_type ==
TOPOLOGY_TYPE.Single
--> 859 with self._get_socket(read_preference) as sock_info:
860 slave_ok = (single and not sock_info.is_mongos) or (
861 preference != ReadPreference.PRIMARY)
/usr/lib/python3.6/contextlib.py in __enter__(self)
80 def __enter__(self):
81 try:
---> 82 return next(self.gen)
83 except StopIteration:
84 raise RuntimeError("generator didn't yield") from None
/home/tim/.virtualenvs/main/lib/python3.6/site-
packages/pymongo/mongo_client.py in _get_socket(self, selector)
823 server = self._get_topology().select_server(selector)
824 try:
--> 825 with server.get_socket(self.__all_credentials) as
sock_info:
826 yield sock_info
827 except NetworkTimeout:
/usr/lib/python3.6/contextlib.py in __enter__(self)
80 def __enter__(self):
81 try:
---> 82 return next(self.gen)
83 except StopIteration:
84 raise RuntimeError("generator didn't yield") from None
/home/tim/.virtualenvs/main/lib/python3.6/site-
packages/pymongo/server.py in get_socket(self, all_credentials,
checkout)
166 #contextlib.contextmanager
167 def get_socket(self, all_credentials, checkout=False):
--> 168 with self.pool.get_socket(all_credentials, checkout)
as sock_info:
169 yield sock_info
170
/usr/lib/python3.6/contextlib.py in __enter__(self)
80 def __enter__(self):
81 try:
---> 82 return next(self.gen)
83 except StopIteration:
84 raise RuntimeError("generator didn't yield") from None
/home/tim/.virtualenvs/main/lib/python3.6/site-
packages/pymongo/pool.py in get_socket(self, all_credentials,
checkout)
790 sock_info = self._get_socket_no_auth()
791 try:
--> 792 sock_info.check_auth(all_credentials)
793 yield sock_info
794 except:
/home/tim/.virtualenvs/main/lib/python3.6/site-
packages/pymongo/pool.py in check_auth(self, all_credentials)
510
511 for credentials in cached - authset:
--> 512 auth.authenticate(credentials, self)
513 self.authset.add(credentials)
514
/home/tim/.virtualenvs/main/lib/python3.6/site-
packages/pymongo/auth.py in authenticate(credentials, sock_info)
468 mechanism = credentials.mechanism
469 auth_func = _AUTH_MAP.get(mechanism)
--> 470 auth_func(credentials, sock_info)
471
472
/home/tim/.virtualenvs/main/lib/python3.6/site-
packages/pymongo/auth.py in _authenticate_default(credentials,
sock_info)
448 def _authenticate_default(credentials, sock_info):
449 if sock_info.max_wire_version >= 3:
--> 450 return _authenticate_scram_sha1(credentials,
sock_info)
451 else:
452 return _authenticate_mongo_cr(credentials, sock_info)
/home/tim/.virtualenvs/main/lib/python3.6/site-
packages/pymongo/auth.py in _authenticate_scram_sha1(credentials,
sock_info)
227 ('conversationId', res['conversationId']),
228 ('payload', Binary(client_final))])
--> 229 res = sock_info.command(source, cmd)
230
231 parsed = _parse_scram_response(res['payload'])
/home/tim/.virtualenvs/main/lib/python3.6/site-
packages/pymongo/pool.py in command(self, dbname, spec, slave_ok,
read_preference, codec_options, check, allowable_errors, check_keys,
read_concern, write_concern, parse_write_concern_error, collation)
422 # Catch socket.error, KeyboardInterrupt, etc. and close
ourselves.
423 except BaseException as error:
--> 424 self._raise_connection_failure(error)
425
426 def send_message(self, message, max_doc_size):
/home/tim/.virtualenvs/main/lib/python3.6/site-
packages/pymongo/pool.py in _raise_connection_failure(self, error)
550 _raise_connection_failure(self.address, error)
551 else:
--> 552 raise error
553
554 def __eq__(self, other):
/home/tim/.virtualenvs/main/lib/python3.6/site-
packages/pymongo/pool.py in command(self, dbname, spec, slave_ok,
read_preference, codec_options, check, allowable_errors, check_keys,
read_concern, write_concern, parse_write_concern_error, collation)
417 read_concern,
418
parse_write_concern_error=parse_write_concern_error,
--> 419 collation=collation)
420 except OperationFailure:
421 raise
/home/tim/.virtualenvs/main/lib/python3.6/site-p
ackages/pymongo/network.py in command(sock, dbname, spec, slave_ok,
is_mongos, read_preference, codec_options, check, allowable_errors,
address, check_keys, listeners, max_bson_size, read_concern,
parse_write_concern_error, collation)
108 response = receive_message(sock, 1, request_id)
109 unpacked = helpers._unpack_response(
--> 110 response, codec_options=codec_options)
111
112 response_doc = unpacked['data'][0]
/home/tim/.virtualenvs/main/lib/python3.6/site-
packages/pymongo/helpers.py in _unpack_response(response, cursor_id,
codec_options)
126 # Fake the ok field if it doesn't exist.
127 error_object.setdefault("ok", 0)
--> 128 if error_object["$err"].startswith("not master"):
129 raise NotMasterError(error_object["$err"],
error_object)
130 elif error_object.get("code") == 50:
KeyError: '$err'
I believe this is an Atlas bug, I've reported it to the team. The bug is, if you fail to log in to Atlas because your username or password are incorrect, it replies in a way that makes PyMongo throw a KeyError instead of the proper OperationFailure("auth failed").
PyMongo does work with Atlas, however, if you properly format your connection string with your username and password. Make sure your username and password are URL-quoted. Substitute your username and password into this Python code:
from urllib import quote_plus
print(quote_plus('MY USERNAME'))
print(quote_plus('MY PASSWORD'))
Take the output and put it into the connection string Atlas gave you, e.g. if your username is jesse#example.com and your password is "foo:bar", put that in the first part of the string, and get the rest of the string from the Atlas control panel for your account:
mongodb://jesse%40example.com:foo%3Abar/#cluster0-shard-00-00-abc.mongodb.net:27017,cluster0-shard-00-01-abc.mongodb.net:27017,cluster0-shard-00-02-abc.mongodb.net:27017/test?ssl=true&replicaSet=Cluster0-shard-0&authSource=admin
Note how "jesse#example.com" has become "jesse%40example.com", and "foo:bar" has become "foo%3Abar".
Related
I am trying to get Terraclimate Data from Microsoft Planetary and facing time out error. Is there a possiblity of increasing the timeout time ? Please find the code below and the error I am facing. I am using fsspec and xarray for downloading spatial data from MS Planetary portal.
import fsspec
import xarray as xr
store = fsspec.get_mapper(asset.href)
data = xr.open_zarr(store, **asset.extra_fields["xarray:open_kwargs"])
clipped_data = data.sel(time=slice('2015-01-01','2019-12-31'),lon=slice(min_lon,max_lon),lat=slice(max_lat,min_lat))
parsed_data = clipped_data[['tmax', 'tmin', 'ppt', 'soil']]
lat_list = parsed_data['lat'].values.tolist()
lon_list = parsed_data['lon'].values.tolist()
filename = "Soil_Moisture_sample.csv"
for(i,j) in zip(lat_list,lon_list):
parsed_data[["soil","tmax","tmin","ppt"]].sel(lon=i, lat=j, method="nearest").to_dataframe().to_csv(filename,mode='a',index=False, header=False)
I am getting the following error
TimeoutError Traceback (most recent call last)
File ~\Anaconda3\envs\satellite\lib\site-packages\fsspec\asyn.py:53, in _runner(event, coro, result, timeout)
52 try:
---> 53 result[0] = await coro
54 except Exception as ex:
File ~\Anaconda3\envs\satellite\lib\site-packages\fsspec\asyn.py:423, in AsyncFileSystem._cat(self, path, recursive, on_error, batch_size, **kwargs)
422 if ex:
--> 423 raise ex
424 if (
425 len(paths) > 1
426 or isinstance(path, list)
427 or paths[0] != self._strip_protocol(path)
428 ):
File ~\Anaconda3\envs\satellite\lib\asyncio\tasks.py:455, in wait_for(fut, timeout, loop)
454 if timeout is None:
--> 455 return await fut
457 if timeout <= 0:
File ~\Anaconda3\envs\satellite\lib\site-packages\fsspec\implementations\http.py:221, in HTTPFileSystem._cat_file(self, url, start, end, **kwargs)
220 async with session.get(url, **kw) as r:
--> 221 out = await r.read()
222 self._raise_not_found_for_status(r, url)
File ~\Anaconda3\envs\satellite\lib\site-packages\aiohttp\client_reqrep.py:1036, in ClientResponse.read(self)
1035 try:
-> 1036 self._body = await self.content.read()
1037 for trace in self._traces:
File ~\Anaconda3\envs\satellite\lib\site-packages\aiohttp\streams.py:375, in StreamReader.read(self, n)
374 while True:
--> 375 block = await self.readany()
376 if not block:
File ~\Anaconda3\envs\satellite\lib\site-packages\aiohttp\streams.py:397, in StreamReader.readany(self)
396 while not self._buffer and not self._eof:
--> 397 await self._wait("readany")
399 return self._read_nowait(-1)
File ~\Anaconda3\envs\satellite\lib\site-packages\aiohttp\streams.py:304, in StreamReader._wait(self, func_name)
303 with self._timer:
--> 304 await waiter
305 else:
File ~\Anaconda3\envs\satellite\lib\site-packages\aiohttp\helpers.py:721, in TimerContext.__exit__(self, exc_type, exc_val, exc_tb)
720 if exc_type is asyncio.CancelledError and self._cancelled:
--> 721 raise asyncio.TimeoutError from None
722 return None
TimeoutError:
The above exception was the direct cause of the following exception:
FSTimeoutError Traceback (most recent call last)
Input In [62], in <cell line: 3>()
1 # Flood Region Point - Thiruvanthpuram
2 filename = "Soil_Moisture_sample.csv"
----> 3 parsed_data[["soil","tmax","tmin","ppt"]].sel(lon=8.520833, lat=76.4375, method="nearest").to_dataframe().to_csv(filename,mode='a',index=False, header=False)
File ~\Anaconda3\envs\satellite\lib\site-packages\xarray\core\dataset.py:5898, in Dataset.to_dataframe(self, dim_order)
5870 """Convert this dataset into a pandas.DataFrame.
5871
5872 Non-index variables in this dataset form the columns of the
(...)
5893
5894 """
5896 ordered_dims = self._normalize_dim_order(dim_order=dim_order)
-> 5898 return self._to_dataframe(ordered_dims=ordered_dims)
File ~\Anaconda3\envs\satellite\lib\site-packages\xarray\core\dataset.py:5862, in Dataset._to_dataframe(self, ordered_dims)
5860 def _to_dataframe(self, ordered_dims: Mapping[Any, int]):
5861 columns = [k for k in self.variables if k not in self.dims]
-> 5862 data = [
5863 self._variables[k].set_dims(ordered_dims).values.reshape(-1)
5864 for k in columns
5865 ]
5866 index = self.coords.to_index([*ordered_dims])
5867 return pd.DataFrame(dict(zip(columns, data)), index=index)
File ~\Anaconda3\envs\satellite\lib\site-packages\xarray\core\dataset.py:5863, in <listcomp>(.0)
5860 def _to_dataframe(self, ordered_dims: Mapping[Any, int]):
5861 columns = [k for k in self.variables if k not in self.dims]
5862 data = [
-> 5863 self._variables[k].set_dims(ordered_dims).values.reshape(-1)
5864 for k in columns
5865 ]
5866 index = self.coords.to_index([*ordered_dims])
5867 return pd.DataFrame(dict(zip(columns, data)), index=index)
File ~\Anaconda3\envs\satellite\lib\site-packages\xarray\core\variable.py:527, in Variable.values(self)
524 #property
525 def values(self):
526 """The variable's data as a numpy.ndarray"""
--> 527 return _as_array_or_item(self._data)
File ~\Anaconda3\envs\satellite\lib\site-packages\xarray\core\variable.py:267, in _as_array_or_item(data)
253 def _as_array_or_item(data):
254 """Return the given values as a numpy array, or as an individual item if
255 it's a 0d datetime64 or timedelta64 array.
256
(...)
265 TODO: remove this (replace with np.asarray) once these issues are fixed
266 """
--> 267 data = np.asarray(data)
268 if data.ndim == 0:
269 if data.dtype.kind == "M":
File ~\AppData\Roaming\Python\Python38\site-packages\dask\array\core.py:1696, in Array.__array__(self, dtype, **kwargs)
1695 def __array__(self, dtype=None, **kwargs):
-> 1696 x = self.compute()
1697 if dtype and x.dtype != dtype:
1698 x = x.astype(dtype)
File ~\AppData\Roaming\Python\Python38\site-packages\dask\base.py:315, in DaskMethodsMixin.compute(self, **kwargs)
291 def compute(self, **kwargs):
292 """Compute this dask collection
293
294 This turns a lazy Dask collection into its in-memory equivalent.
(...)
313 dask.base.compute
314 """
--> 315 (result,) = compute(self, traverse=False, **kwargs)
316 return result
File ~\AppData\Roaming\Python\Python38\site-packages\dask\base.py:600, in compute(traverse, optimize_graph, scheduler, get, *args, **kwargs)
597 keys.append(x.__dask_keys__())
598 postcomputes.append(x.__dask_postcompute__())
--> 600 results = schedule(dsk, keys, **kwargs)
601 return repack([f(r, *a) for r, (f, a) in zip(results, postcomputes)])
File ~\AppData\Roaming\Python\Python38\site-packages\dask\threaded.py:89, in get(dsk, keys, cache, num_workers, pool, **kwargs)
86 elif isinstance(pool, multiprocessing.pool.Pool):
87 pool = MultiprocessingPoolExecutor(pool)
---> 89 results = get_async(
90 pool.submit,
91 pool._max_workers,
92 dsk,
93 keys,
94 cache=cache,
95 get_id=_thread_get_id,
96 pack_exception=pack_exception,
97 **kwargs,
98 )
100 # Cleanup pools associated to dead threads
101 with pools_lock:
File ~\AppData\Roaming\Python\Python38\site-packages\dask\local.py:511, in get_async(submit, num_workers, dsk, result, cache, get_id, rerun_exceptions_locally, pack_exception, raise_exception, callbacks, dumps, loads, chunksize, **kwargs)
509 _execute_task(task, data) # Re-execute locally
510 else:
--> 511 raise_exception(exc, tb)
512 res, worker_id = loads(res_info)
513 state["cache"][key] = res
File ~\AppData\Roaming\Python\Python38\site-packages\dask\local.py:319, in reraise(exc, tb)
317 if exc.__traceback__ is not tb:
318 raise exc.with_traceback(tb)
--> 319 raise exc
File ~\AppData\Roaming\Python\Python38\site-packages\dask\local.py:224, in execute_task(key, task_info, dumps, loads, get_id, pack_exception)
222 try:
223 task, data = loads(task_info)
--> 224 result = _execute_task(task, data)
225 id = get_id()
226 result = dumps((result, id))
File ~\AppData\Roaming\Python\Python38\site-packages\dask\core.py:119, in _execute_task(arg, cache, dsk)
115 func, args = arg[0], arg[1:]
116 # Note: Don't assign the subtask results to a variable. numpy detects
117 # temporaries by their reference count and can execute certain
118 # operations in-place.
--> 119 return func(*(_execute_task(a, cache) for a in args))
120 elif not ishashable(arg):
121 return arg
File ~\AppData\Roaming\Python\Python38\site-packages\dask\array\core.py:128, in getter(a, b, asarray, lock)
123 # Below we special-case `np.matrix` to force a conversion to
124 # `np.ndarray` and preserve original Dask behavior for `getter`,
125 # as for all purposes `np.matrix` is array-like and thus
126 # `is_arraylike` evaluates to `True` in that case.
127 if asarray and (not is_arraylike(c) or isinstance(c, np.matrix)):
--> 128 c = np.asarray(c)
129 finally:
130 if lock:
File ~\Anaconda3\envs\satellite\lib\site-packages\xarray\core\indexing.py:459, in ImplicitToExplicitIndexingAdapter.__array__(self, dtype)
458 def __array__(self, dtype=None):
--> 459 return np.asarray(self.array, dtype=dtype)
File ~\Anaconda3\envs\satellite\lib\site-packages\xarray\core\indexing.py:623, in CopyOnWriteArray.__array__(self, dtype)
622 def __array__(self, dtype=None):
--> 623 return np.asarray(self.array, dtype=dtype)
File ~\Anaconda3\envs\satellite\lib\site-packages\xarray\core\indexing.py:524, in LazilyIndexedArray.__array__(self, dtype)
522 def __array__(self, dtype=None):
523 array = as_indexable(self.array)
--> 524 return np.asarray(array[self.key], dtype=None)
File ~\Anaconda3\envs\satellite\lib\site-packages\xarray\backends\zarr.py:76, in ZarrArrayWrapper.__getitem__(self, key)
74 array = self.get_array()
75 if isinstance(key, indexing.BasicIndexer):
---> 76 return array[key.tuple]
77 elif isinstance(key, indexing.VectorizedIndexer):
78 return array.vindex[
79 indexing._arrayize_vectorized_indexer(key, self.shape).tuple
80 ]
File ~\Anaconda3\envs\satellite\lib\site-packages\zarr\core.py:788, in Array.__getitem__(self, selection)
786 result = self.vindex[selection]
787 else:
--> 788 result = self.get_basic_selection(pure_selection, fields=fields)
789 return result
File ~\Anaconda3\envs\satellite\lib\site-packages\zarr\core.py:914, in Array.get_basic_selection(self, selection, out, fields)
911 return self._get_basic_selection_zd(selection=selection, out=out,
912 fields=fields)
913 else:
--> 914 return self._get_basic_selection_nd(selection=selection, out=out,
915 fields=fields)
File ~\Anaconda3\envs\satellite\lib\site-packages\zarr\core.py:957, in Array._get_basic_selection_nd(self, selection, out, fields)
951 def _get_basic_selection_nd(self, selection, out=None, fields=None):
952 # implementation of basic selection for array with at least one dimension
953
954 # setup indexer
955 indexer = BasicIndexer(selection, self)
--> 957 return self._get_selection(indexer=indexer, out=out, fields=fields)
File ~\Anaconda3\envs\satellite\lib\site-packages\zarr\core.py:1247, in Array._get_selection(self, indexer, out, fields)
1241 if not hasattr(self.chunk_store, "getitems") or \
1242 any(map(lambda x: x == 0, self.shape)):
1243 # sequentially get one key at a time from storage
1244 for chunk_coords, chunk_selection, out_selection in indexer:
1245
1246 # load chunk selection into output array
-> 1247 self._chunk_getitem(chunk_coords, chunk_selection, out, out_selection,
1248 drop_axes=indexer.drop_axes, fields=fields)
1249 else:
1250 # allow storage to get multiple items at once
1251 lchunk_coords, lchunk_selection, lout_selection = zip(*indexer)
File ~\Anaconda3\envs\satellite\lib\site-packages\zarr\core.py:1939, in Array._chunk_getitem(self, chunk_coords, chunk_selection, out, out_selection, drop_axes, fields)
1935 ckey = self._chunk_key(chunk_coords)
1937 try:
1938 # obtain compressed data for chunk
-> 1939 cdata = self.chunk_store[ckey]
1941 except KeyError:
1942 # chunk not initialized
1943 if self._fill_value is not None:
File ~\Anaconda3\envs\satellite\lib\site-packages\zarr\storage.py:717, in KVStore.__getitem__(self, key)
716 def __getitem__(self, key):
--> 717 return self._mutable_mapping[key]
File ~\Anaconda3\envs\satellite\lib\site-packages\fsspec\mapping.py:137, in FSMap.__getitem__(self, key, default)
135 k = self._key_to_str(key)
136 try:
--> 137 result = self.fs.cat(k)
138 except self.missing_exceptions:
139 if default is not None:
File ~\Anaconda3\envs\satellite\lib\site-packages\fsspec\asyn.py:111, in sync_wrapper.<locals>.wrapper(*args, **kwargs)
108 #functools.wraps(func)
109 def wrapper(*args, **kwargs):
110 self = obj or args[0]
--> 111 return sync(self.loop, func, *args, **kwargs)
File ~\Anaconda3\envs\satellite\lib\site-packages\fsspec\asyn.py:94, in sync(loop, func, timeout, *args, **kwargs)
91 return_result = result[0]
92 if isinstance(return_result, asyncio.TimeoutError):
93 # suppress asyncio.TimeoutError, raise FSTimeoutError
---> 94 raise FSTimeoutError from return_result
95 elif isinstance(return_result, BaseException):
96 raise return_result
FSTimeoutError:
In the line:
store = fsspec.get_mapper(asset.href)
You can pass extra arguments to the fsspec backend, in this case HTTP, see fsspec.implementations.http.HTTPFileSystem. In this case, client_kwargs get passed to aiohttp.ClientSession, and include an optional timeout argument. Your call may look something like
from aiohttp import ClientTimeout
store = get_mapper(asset.href, client_kwargs={"timeout": ClientTimeout(total=5000, connect=1000)})
I have read quite a few posts around this but couldn't find a resolution. I am able to read from Postgres but getting a connection time out error when I use create_engine string. I know the credentials are correct because I can read from PostGres.
My aim is to write dataframe (df) to postgres directly from Python and going forward keep appending the rows to that table. I have changed the password and the IP address in the example below. Not sure if this is relevant but I am using Tunnel in Putty to connect to remote server and launch Jupyter notebook and then trying to write to Postgres DB.
My code
from sqlalchemy import create_engine
import psycopg2
import io
password = 'Pas#$tk$#a' #This is just an example. MY password has special characters
engine = create_engine('postgresql+psycopg2://admin:password#1.12.11.1:5432/DEV_Sach_D', connect_args={'sslmode':'require'}, echo=True).connect() # I have changed the IP here for security reasons
conn = engine.connect()
df.to_sql('py_stg_test', con=conn, if_exists='replace',index=False)
Error I get
---------------------------------------------------------------------------
OperationalError Traceback (most recent call last)
/usr/local/lib/python3.6/dist-packages/sqlalchemy/engine/base.py in _wrap_pool_connect(self, fn, connection)
3211 try:
-> 3212 return fn()
3213 except dialect.dbapi.Error as e:
/usr/local/lib/python3.6/dist-packages/sqlalchemy/pool/base.py in connect(self)
306 """
--> 307 return _ConnectionFairy._checkout(self)
308
/usr/local/lib/python3.6/dist-packages/sqlalchemy/pool/base.py in _checkout(cls, pool, threadconns, fairy)
766 if not fairy:
--> 767 fairy = _ConnectionRecord.checkout(pool)
768
/usr/local/lib/python3.6/dist-packages/sqlalchemy/pool/base.py in checkout(cls, pool)
424 def checkout(cls, pool):
--> 425 rec = pool._do_get()
426 try:
/usr/local/lib/python3.6/dist-packages/sqlalchemy/pool/impl.py in _do_get(self)
145 with util.safe_reraise():
--> 146 self._dec_overflow()
147 else:
/usr/local/lib/python3.6/dist-packages/sqlalchemy/util/langhelpers.py in __exit__(self, type_, value, traceback)
71 exc_value,
---> 72 with_traceback=exc_tb,
73 )
/usr/local/lib/python3.6/dist-packages/sqlalchemy/util/compat.py in raise_(***failed resolving arguments***)
206 try:
--> 207 raise exception
208 finally:
/usr/local/lib/python3.6/dist-packages/sqlalchemy/pool/impl.py in _do_get(self)
142 try:
--> 143 return self._create_connection()
144 except:
/usr/local/lib/python3.6/dist-packages/sqlalchemy/pool/base.py in _create_connection(self)
252
--> 253 return _ConnectionRecord(self)
254
/usr/local/lib/python3.6/dist-packages/sqlalchemy/pool/base.py in __init__(self, pool, connect)
367 if connect:
--> 368 self.__connect()
369 self.finalize_callback = deque()
/usr/local/lib/python3.6/dist-packages/sqlalchemy/pool/base.py in __connect(self)
610 with util.safe_reraise():
--> 611 pool.logger.debug("Error on connect(): %s", e)
612 else:
/usr/local/lib/python3.6/dist-packages/sqlalchemy/util/langhelpers.py in __exit__(self, type_, value, traceback)
71 exc_value,
---> 72 with_traceback=exc_tb,
73 )
/usr/local/lib/python3.6/dist-packages/sqlalchemy/util/compat.py in raise_(***failed resolving arguments***)
206 try:
--> 207 raise exception
208 finally:
/usr/local/lib/python3.6/dist-packages/sqlalchemy/pool/base.py in __connect(self)
604 self.starttime = time.time()
--> 605 connection = pool._invoke_creator(self)
606 pool.logger.debug("Created new connection %r", connection)
/usr/local/lib/python3.6/dist-packages/sqlalchemy/engine/create.py in connect(connection_record)
577 return connection
--> 578 return dialect.connect(*cargs, **cparams)
579
/usr/local/lib/python3.6/dist-packages/sqlalchemy/engine/default.py in connect(self, *cargs, **cparams)
583 # inherits the docstring from interfaces.Dialect.connect
--> 584 return self.dbapi.connect(*cargs, **cparams)
585
/usr/local/lib/python3.6/dist-packages/psycopg2/__init__.py in connect(dsn, connection_factory, cursor_factory, **kwargs)
121 dsn = _ext.make_dsn(dsn, **kwargs)
--> 122 conn = _connect(dsn, connection_factory=connection_factory, **kwasync)
123 if cursor_factory is not None:
OperationalError: could not connect to server: Connection timed out
Is the server running on host "1.12.11.1" and accepting
TCP/IP connections on port 5432?
The above exception was the direct cause of the following exception:
OperationalError Traceback (most recent call last)
<ipython-input-5-28cdb6a0c387> in <module>()
4 password = Pas#$tk$#a'
5
----> 6 engine = create_engine('postgresql+psycopg2://libertypassageadminuser#libertypassage:password#1.12.11.1:5432/Dev_Sach_D, connect_args={'sslmode':'require'}, echo=True).connect()
7 conn = engine.connect()
8 df.to_sql('py_stg_test', con=conn, if_exists='replace',index=False)
/usr/local/lib/python3.6/dist-packages/sqlalchemy/engine/base.py in connect(self, close_with_result)
3164 """
3165
-> 3166 return self._connection_cls(self, close_with_result=close_with_result)
3167
3168 #util.deprecated(
/usr/local/lib/python3.6/dist-packages/sqlalchemy/engine/base.py in __init__(self, engine, connection, close_with_result, _branch_from, _execution_options, _dispatch, _has_events, _allow_revalidate)
94 connection
95 if connection is not None
---> 96 else engine.raw_connection()
97 )
98
/usr/local/lib/python3.6/dist-packages/sqlalchemy/engine/base.py in raw_connection(self, _connection)
3243
3244 """
-> 3245 return self._wrap_pool_connect(self.pool.connect, _connection)
3246
3247
/usr/local/lib/python3.6/dist-packages/sqlalchemy/engine/base.py in _wrap_pool_connect(self, fn, connection)
3214 if connection is None:
3215 Connection._handle_dbapi_exception_noconnection(
-> 3216 e, dialect, self
3217 )
3218 else:
/usr/local/lib/python3.6/dist-packages/sqlalchemy/engine/base.py in _handle_dbapi_exception_noconnection(cls, e, dialect, engine)
2068 elif should_wrap:
2069 util.raise_(
-> 2070 sqlalchemy_exception, with_traceback=exc_info[2], from_=e
2071 )
2072 else:
/usr/local/lib/python3.6/dist-packages/sqlalchemy/util/compat.py in raise_(***failed resolving arguments***)
205
206 try:
--> 207 raise exception
208 finally:
209 # credit to
/usr/local/lib/python3.6/dist-packages/sqlalchemy/engine/base.py in _wrap_pool_connect(self, fn, connection)
3210 dialect = self.dialect
3211 try:
-> 3212 return fn()
3213 except dialect.dbapi.Error as e:
3214 if connection is None:
/usr/local/lib/python3.6/dist-packages/sqlalchemy/pool/base.py in connect(self)
305
306 """
--> 307 return _ConnectionFairy._checkout(self)
308
309 def _return_conn(self, record):
/usr/local/lib/python3.6/dist-packages/sqlalchemy/pool/base.py in _checkout(cls, pool, threadconns, fairy)
765 def _checkout(cls, pool, threadconns=None, fairy=None):
766 if not fairy:
--> 767 fairy = _ConnectionRecord.checkout(pool)
768
769 fairy._pool = pool
/usr/local/lib/python3.6/dist-packages/sqlalchemy/pool/base.py in checkout(cls, pool)
423 #classmethod
424 def checkout(cls, pool):
--> 425 rec = pool._do_get()
426 try:
427 dbapi_connection = rec.get_connection()
/usr/local/lib/python3.6/dist-packages/sqlalchemy/pool/impl.py in _do_get(self)
144 except:
145 with util.safe_reraise():
--> 146 self._dec_overflow()
147 else:
148 return self._do_get()
/usr/local/lib/python3.6/dist-packages/sqlalchemy/util/langhelpers.py in __exit__(self, type_, value, traceback)
70 compat.raise_(
71 exc_value,
---> 72 with_traceback=exc_tb,
73 )
74 else:
/usr/local/lib/python3.6/dist-packages/sqlalchemy/util/compat.py in raise_(***failed resolving arguments***)
205
206 try:
--> 207 raise exception
208 finally:
209 # credit to
/usr/local/lib/python3.6/dist-packages/sqlalchemy/pool/impl.py in _do_get(self)
141 if self._inc_overflow():
142 try:
--> 143 return self._create_connection()
144 except:
145 with util.safe_reraise():
/usr/local/lib/python3.6/dist-packages/sqlalchemy/pool/base.py in _create_connection(self)
251 """Called by subclasses to create a new ConnectionRecord."""
252
--> 253 return _ConnectionRecord(self)
254
255 def _invalidate(self, connection, exception=None, _checkin=True):
/usr/local/lib/python3.6/dist-packages/sqlalchemy/pool/base.py in __init__(self, pool, connect)
366 self.__pool = pool
367 if connect:
--> 368 self.__connect()
369 self.finalize_callback = deque()
370
/usr/local/lib/python3.6/dist-packages/sqlalchemy/pool/base.py in __connect(self)
609 except Exception as e:
610 with util.safe_reraise():
--> 611 pool.logger.debug("Error on connect(): %s", e)
612 else:
613 # in SQLAlchemy 1.4 the first_connect event is not used by
/usr/local/lib/python3.6/dist-packages/sqlalchemy/util/langhelpers.py in __exit__(self, type_, value, traceback)
70 compat.raise_(
71 exc_value,
---> 72 with_traceback=exc_tb,
73 )
74 else:
/usr/local/lib/python3.6/dist-packages/sqlalchemy/util/compat.py in raise_(***failed resolving arguments***)
205
206 try:
--> 207 raise exception
208 finally:
209 # credit to
/usr/local/lib/python3.6/dist-packages/sqlalchemy/pool/base.py in __connect(self)
603 try:
604 self.starttime = time.time()
--> 605 connection = pool._invoke_creator(self)
606 pool.logger.debug("Created new connection %r", connection)
607 self.connection = connection
/usr/local/lib/python3.6/dist-packages/sqlalchemy/engine/create.py in connect(connection_record)
576 if connection is not None:
577 return connection
--> 578 return dialect.connect(*cargs, **cparams)
579
580 creator = pop_kwarg("creator", connect)
/usr/local/lib/python3.6/dist-packages/sqlalchemy/engine/default.py in connect(self, *cargs, **cparams)
582 def connect(self, *cargs, **cparams):
583 # inherits the docstring from interfaces.Dialect.connect
--> 584 return self.dbapi.connect(*cargs, **cparams)
585
586 def create_connect_args(self, url):
/usr/local/lib/python3.6/dist-packages/psycopg2/__init__.py in connect(dsn, connection_factory, cursor_factory, **kwargs)
120
121 dsn = _ext.make_dsn(dsn, **kwargs)
--> 122 conn = _connect(dsn, connection_factory=connection_factory, **kwasync)
123 if cursor_factory is not None:
124 conn.cursor_factory = cursor_factory
OperationalError: (psycopg2.OperationalError) could not connect to server: Connection timed out
Is the server running on host "1.12.11.1" and accepting
TCP/IP connections on port 5432?
(Background on this error at: https://sqlalche.me/e/14/e3q8)
Very silly of me. I was using the wrong IP in the string. Instead of using POSTGRES DB name I was using the IP of the machine itself.
Thanks to all who answered.
I have chain of tasks, and want to terminate conditionally, I am following steps in https://stackoverflow.com/a/21106596/243031 but after that, we are not getting the output.
I have tasks as
from __future__ import absolute_import, unicode_literals
from .celery import app
#app.task(bind=True)
def add(self, x, y):
if (x + y) % 2 == 0:
self.request.callbacks[:] = []
return x + y
means when the sum is even and its part of chain, then stop that chain.
But it gives error.
In [13]: ~(add.s(2, 2) | add.s(3))
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-13-0fe85c5d0e22> in <module>
----> 1 ~(add.s(2, 2) | add.s(3))
~/virtualenv/lib/python3.7/site-packages/celery/canvas.py in __invert__(self)
479
480 def __invert__(self):
--> 481 return self.apply_async().get()
482
483 def __reduce__(self):
~/virtualenv/lib/python3.7/site-packages/celery/result.py in get(self, timeout, propagate, interval, no_ack, follow_parents, callback, on_message, on_interval, disable_sync_subtasks, EXCEPTION_STATES, PROPAGATE_STATES)
228 propagate=propagate,
229 callback=callback,
--> 230 on_message=on_message,
231 )
232 wait = get # deprecated alias to :meth:`get`.
~/virtualenv/lib/python3.7/site-packages/celery/backends/asynchronous.py in wait_for_pending(self, result, callback, propagate, **kwargs)
197 callback=None, propagate=True, **kwargs):
198 self._ensure_not_eager()
--> 199 for _ in self._wait_for_pending(result, **kwargs):
200 pass
201 return result.maybe_throw(callback=callback, propagate=propagate)
~/virtualenv/lib/python3.7/site-packages/celery/backends/asynchronous.py in _wait_for_pending(self, result, timeout, on_interval, on_message, **kwargs)
265 for _ in self.drain_events_until(
266 result.on_ready, timeout=timeout,
--> 267 on_interval=on_interval):
268 yield
269 sleep(0)
~/virtualenv/lib/python3.7/site-packages/celery/backends/asynchronous.py in drain_events_until(self, p, timeout, interval, on_interval, wait)
56 pass
57 if on_interval:
---> 58 on_interval()
59 if p.ready: # got event on the wanted channel.
60 break
~/virtualenv/lib/python3.7/site-packages/vine/promises.py in __call__(self, *args, **kwargs)
158 self.value = (ca, ck) = (retval,), {}
159 except Exception:
--> 160 return self.throw()
161 else:
162 self.value = (ca, ck) = final_args, final_kwargs
~/virtualenv/lib/python3.7/site-packages/vine/promises.py in __call__(self, *args, **kwargs)
155 ck = {}
156 else:
--> 157 retval = fun(*final_args, **final_kwargs)
158 self.value = (ca, ck) = (retval,), {}
159 except Exception:
~/virtualenv/lib/python3.7/site-packages/celery/result.py in _maybe_reraise_parent_error(self)
234 def _maybe_reraise_parent_error(self):
235 for node in reversed(list(self._parents())):
--> 236 node.maybe_throw()
237
238 def _parents(self):
~/virtualenv/lib/python3.7/site-packages/celery/result.py in maybe_throw(self, propagate, callback)
333 cache['status'], cache['result'], cache.get('traceback'))
334 if state in states.PROPAGATE_STATES and propagate:
--> 335 self.throw(value, self._to_remote_traceback(tb))
336 if callback is not None:
337 callback(self.id, value)
~/virtualenv/lib/python3.7/site-packages/celery/result.py in throw(self, *args, **kwargs)
326
327 def throw(self, *args, **kwargs):
--> 328 self.on_ready.throw(*args, **kwargs)
329
330 def maybe_throw(self, propagate=True, callback=None):
~/virtualenv/lib/python3.7/site-packages/vine/promises.py in throw(self, exc, tb, propagate)
232 if tb is None and (exc is None or exc is current_exc):
233 raise
--> 234 reraise(type(exc), exc, tb)
235
236 #property
~/virtualenv/lib/python3.7/site-packages/vine/utils.py in reraise(tp, value, tb)
28 if value.__traceback__ is not tb:
29 raise value.with_traceback(tb)
---> 30 raise value
TypeError: 'NoneType' object does not support item assignment
I tried self.request.chain = None and self.request.chain[:] = [],
#app.task(bind=True)
def add(self, x, y):
if self.request.chain and (x + y) % 2 == 0:
self.request.chain = None
return x + y
In logs, it shows, it return the data.
[2021-03-24 22:11:48,795: WARNING/MainProcess] Substantial drift from scrpc_worker#458cd596aed3 may mean clocks are out of sync. Current drift is
14400 seconds. [orig: 2021-03-24 22:11:48.795402 recv: 2021-03-25 02:11:48.796579]
[2021-03-24 22:11:52,227: INFO/MainProcess] Events of group {task} enabled by remote.
[2021-03-24 22:11:57,853: INFO/MainProcess] Received task: myprj.tasks.add[8aaae68f-d5ca-4c0a-8f2e-f1c7b5916e29]
[2021-03-24 22:11:57,867: INFO/ForkPoolWorker-8] Task myprj.tasks.add[8aaae68f-d5ca-4c0a-8f2e-f1c7b5916e29] succeeded in 0.01066690299999884s: 4
but it wait for some socket and when we press ctr+c, it give below traceback.
In [1]: from myprj.tasks import add
In [2]: ~(add.s(2, 2) | add.s(3))
^C---------------------------------------------------------------------------
KeyboardInterrupt Traceback (most recent call last)
<ipython-input-2-0fe85c5d0e22> in <module>
----> 1 ~(add.s(2, 2) | add.s(3))
~/virtualenv/lib/python3.7/site-packages/celery/canvas.py in __invert__(self)
479
480 def __invert__(self):
--> 481 return self.apply_async().get()
482
483 def __reduce__(self):
~/virtualenv/lib/python3.7/site-packages/celery/result.py in get(self, timeout, propagate, interval, no_ack, follow_parents, callback, on_message, on_interval, disable_sync_subtasks, EXCEPTION_STATES, PROPAGATE_STATES)
228 propagate=propagate,
229 callback=callback,
--> 230 on_message=on_message,
231 )
232 wait = get # deprecated alias to :meth:`get`.
~/virtualenv/lib/python3.7/site-packages/celery/backends/asynchronous.py in wait_for_pending(self, result, callback, propagate, **kwargs)
197 callback=None, propagate=True, **kwargs):
198 self._ensure_not_eager()
--> 199 for _ in self._wait_for_pending(result, **kwargs):
200 pass
201 return result.maybe_throw(callback=callback, propagate=propagate)
~/virtualenv/lib/python3.7/site-packages/celery/backends/asynchronous.py in _wait_for_pending(self, result, timeout, on_interval, on_message, **kwargs)
265 for _ in self.drain_events_until(
266 result.on_ready, timeout=timeout,
--> 267 on_interval=on_interval):
268 yield
269 sleep(0)
~/virtualenv/lib/python3.7/site-packages/celery/backends/asynchronous.py in drain_events_until(self, p, timeout, interval, on_interval, wait)
52 raise socket.timeout()
53 try:
---> 54 yield self.wait_for(p, wait, timeout=interval)
55 except socket.timeout:
56 pass
~/virtualenv/lib/python3.7/site-packages/celery/backends/asynchronous.py in wait_for(self, p, wait, timeout)
61
62 def wait_for(self, p, wait, timeout=None):
---> 63 wait(timeout=timeout)
64
65
~/virtualenv/lib/python3.7/site-packages/celery/backends/redis.py in drain_events(self, timeout)
149 if self._pubsub:
150 with self.reconnect_on_error():
--> 151 message = self._pubsub.get_message(timeout=timeout)
152 if message and message['type'] == 'message':
153 self.on_state_change(self._decode_result(message['data']), message)
~/virtualenv/lib/python3.7/site-packages/redis/client.py in get_message(self, ignore_subscribe_messages, timeout)
3615 number.
3616 """
-> 3617 response = self.parse_response(block=False, timeout=timeout)
3618 if response:
3619 return self.handle_message(response, ignore_subscribe_messages)
~/virtualenv/lib/python3.7/site-packages/redis/client.py in parse_response(self, block, timeout)
3501 self.check_health()
3502
-> 3503 if not block and not conn.can_read(timeout=timeout):
3504 return None
3505 response = self._execute(conn, conn.read_response)
~/virtualenv/lib/python3.7/site-packages/redis/connection.py in can_read(self, timeout)
732 self.connect()
733 sock = self._sock
--> 734 return self._parser.can_read(timeout)
735
736 def read_response(self):
~/virtualenv/lib/python3.7/site-packages/redis/connection.py in can_read(self, timeout)
319
320 def can_read(self, timeout):
--> 321 return self._buffer and self._buffer.can_read(timeout)
322
323 def read_response(self):
~/virtualenv/lib/python3.7/site-packages/redis/connection.py in can_read(self, timeout)
229 return bool(self.length) or \
230 self._read_from_socket(timeout=timeout,
--> 231 raise_on_timeout=False)
232
233 def read(self, length):
~/virtualenv/lib/python3.7/site-packages/redis/connection.py in _read_from_socket(self, length, timeout, raise_on_timeout)
196 sock.settimeout(timeout)
197 while True:
--> 198 data = recv(self._sock, socket_read_size)
199 # an empty string indicates the server shutdown the socket
200 if isinstance(data, bytes) and len(data) == 0:
~/virtualenv/lib/python3.7/site-packages/redis/_compat.py in recv(sock, *args, **kwargs)
70 else: # Python 3.5 and above automatically retry EINTR
71 def recv(sock, *args, **kwargs):
---> 72 return sock.recv(*args, **kwargs)
73
74 def recv_into(sock, *args, **kwargs):
KeyboardInterrupt:
First of all, terminating chain is good option or we have to add condition in next chain task that if some condition, don't process, just return what you get.
If we have to terminate the chain, what is the best option? where you get the output of partial chain.
I am trying to get a the result of a Google Bigquery query in a pandas dataframe (in Jupiter notebook).
But everytime I try to run the query I get a DeadlineExceeded: 504 Deadline Exceeded.
This happens not only for queries in my own BQ project but also for other projects.
I have tried a lot of option to run the query like in here: https://cloud.google.com/bigquery/docs/bigquery-storage-python-pandas
Anyone have a idea how to fix this?
Query:
%load_ext google.cloud.bigquery
%%bigquery tax_forms --use_bqstorage_api
SELECT * FROM `bigquery-public-data.irs_990.irs_990_2012`
---------------------------------------------------------------------------
_MultiThreadedRendezvous Traceback (most recent call last)
~\AppData\Local\Continuum\anaconda3\lib\site-packages\google\api_core\grpc_helpers.py in error_remapped_callable(*args, **kwargs)
149 prefetch_first = getattr(callable_, "_prefetch_first_result_", True)
--> 150 return _StreamingResponseIterator(result, prefetch_first_result=prefetch_first)
151 except grpc.RpcError as exc:
~\AppData\Local\Continuum\anaconda3\lib\site-packages\google\api_core\grpc_helpers.py in __init__(self, wrapped, prefetch_first_result)
72 if prefetch_first_result:
---> 73 self._stored_first_result = six.next(self._wrapped)
74 except TypeError:
~\AppData\Local\Continuum\anaconda3\lib\site-packages\grpc\_channel.py in __next__(self)
415 def __next__(self):
--> 416 return self._next()
417
~\AppData\Local\Continuum\anaconda3\lib\site-packages\grpc\_channel.py in _next(self)
705 elif self._state.code is not None:
--> 706 raise self
707
_MultiThreadedRendezvous: <_MultiThreadedRendezvous of RPC that terminated with:
status = StatusCode.DEADLINE_EXCEEDED
details = "Deadline Exceeded"
debug_error_string = "{"created":"#1597838569.388000000","description":"Error received from peer ipv4:172.217.168.202:443","file":"src/core/lib/surface/call.cc","file_line":1062,"grpc_message":"Deadline Exceeded","grpc_status":4}"
>
The above exception was the direct cause of the following exception:
DeadlineExceeded Traceback (most recent call last)
<ipython-input-2-4fdaec7219df> in <module>
----> 1 get_ipython().run_cell_magic('bigquery', 'tax_forms --use_bqstorage_api', 'SELECT * FROM `bigquery-public-data.irs_990.irs_990_2012`\n')
~\AppData\Local\Continuum\anaconda3\lib\site-packages\IPython\core\interactiveshell.py in run_cell_magic(self, magic_name, line, cell)
2357 with self.builtin_trap:
2358 args = (magic_arg_s, cell)
-> 2359 result = fn(*args, **kwargs)
2360 return result
2361
~\AppData\Local\Continuum\anaconda3\lib\site-packages\google\cloud\bigquery\magics.py in _cell_magic(line, query)
589 )
590 else:
--> 591 result = query_job.to_dataframe(bqstorage_client=bqstorage_client)
592
593 if args.destination_var:
~\AppData\Local\Continuum\anaconda3\lib\site-packages\google\cloud\bigquery\job.py in to_dataframe(self, bqstorage_client, dtypes, progress_bar_type, create_bqstorage_client, date_as_object)
3381 progress_bar_type=progress_bar_type,
3382 create_bqstorage_client=create_bqstorage_client,
-> 3383 date_as_object=date_as_object,
3384 )
3385
~\AppData\Local\Continuum\anaconda3\lib\site-packages\google\cloud\bigquery\table.py in to_dataframe(self, bqstorage_client, dtypes, progress_bar_type, create_bqstorage_client, date_as_object)
1726 progress_bar_type=progress_bar_type,
1727 bqstorage_client=bqstorage_client,
-> 1728 create_bqstorage_client=create_bqstorage_client,
1729 )
1730
~\AppData\Local\Continuum\anaconda3\lib\site-packages\google\cloud\bigquery\table.py in to_arrow(self, progress_bar_type, bqstorage_client, create_bqstorage_client)
1544 record_batches = []
1545 for record_batch in self._to_arrow_iterable(
-> 1546 bqstorage_client=bqstorage_client
1547 ):
1548 record_batches.append(record_batch)
~\AppData\Local\Continuum\anaconda3\lib\site-packages\google\cloud\bigquery\table.py in _to_page_iterable(self, bqstorage_download, tabledata_list_download, bqstorage_client)
1433 ):
1434 if bqstorage_client is not None:
-> 1435 for item in bqstorage_download():
1436 yield item
1437 return
~\AppData\Local\Continuum\anaconda3\lib\site-packages\google\cloud\bigquery\_pandas_helpers.py in _download_table_bqstorage(project_id, table, bqstorage_client, preserve_order, selected_fields, page_to_item)
723 # Call result() on any finished threads to raise any
724 # exceptions encountered.
--> 725 future.result()
726
727 try:
~\AppData\Local\Continuum\anaconda3\lib\concurrent\futures\_base.py in result(self, timeout)
426 raise CancelledError()
427 elif self._state == FINISHED:
--> 428 return self.__get_result()
429
430 self._condition.wait(timeout)
~\AppData\Local\Continuum\anaconda3\lib\concurrent\futures\_base.py in __get_result(self)
382 def __get_result(self):
383 if self._exception:
--> 384 raise self._exception
385 else:
386 return self._result
~\AppData\Local\Continuum\anaconda3\lib\concurrent\futures\thread.py in run(self)
55
56 try:
---> 57 result = self.fn(*self.args, **self.kwargs)
58 except BaseException as exc:
59 self.future.set_exception(exc)
~\AppData\Local\Continuum\anaconda3\lib\site-packages\google\cloud\bigquery\_pandas_helpers.py in _download_table_bqstorage_stream(download_state, bqstorage_client, session, stream, worker_queue, page_to_item)
591 rowstream = bqstorage_client.read_rows(position).rows(session)
592 else:
--> 593 rowstream = bqstorage_client.read_rows(stream.name).rows(session)
594
595 for page in rowstream.pages:
~\AppData\Local\Continuum\anaconda3\lib\site-packages\google\cloud\bigquery_storage_v1\client.py in read_rows(self, name, offset, retry, timeout, metadata)
120 retry=retry,
121 timeout=timeout,
--> 122 metadata=metadata,
123 )
124 return reader.ReadRowsStream(
~\AppData\Local\Continuum\anaconda3\lib\site-packages\google\cloud\bigquery_storage_v1\gapic\big_query_read_client.py in read_rows(self, read_stream, offset, retry, timeout, metadata)
370
371 return self._inner_api_calls["read_rows"](
--> 372 request, retry=retry, timeout=timeout, metadata=metadata
373 )
374
~\AppData\Local\Continuum\anaconda3\lib\site-packages\google\api_core\gapic_v1\method.py in __call__(self, *args, **kwargs)
143 kwargs["metadata"] = metadata
144
--> 145 return wrapped_func(*args, **kwargs)
146
147
~\AppData\Local\Continuum\anaconda3\lib\site-packages\google\api_core\retry.py in retry_wrapped_func(*args, **kwargs)
284 sleep_generator,
285 self._deadline,
--> 286 on_error=on_error,
287 )
288
~\AppData\Local\Continuum\anaconda3\lib\site-packages\google\api_core\retry.py in retry_target(target, predicate, sleep_generator, deadline, on_error)
182 for sleep in sleep_generator:
183 try:
--> 184 return target()
185
186 # pylint: disable=broad-except
~\AppData\Local\Continuum\anaconda3\lib\site-packages\google\api_core\timeout.py in func_with_timeout(*args, **kwargs)
212 """Wrapped function that adds timeout."""
213 kwargs["timeout"] = next(timeouts)
--> 214 return func(*args, **kwargs)
215
216 return func_with_timeout
~\AppData\Local\Continuum\anaconda3\lib\site-packages\google\api_core\grpc_helpers.py in error_remapped_callable(*args, **kwargs)
150 return _StreamingResponseIterator(result, prefetch_first_result=prefetch_first)
151 except grpc.RpcError as exc:
--> 152 six.raise_from(exceptions.from_grpc_error(exc), exc)
153
154 return error_remapped_callable
~\AppData\Local\Continuum\anaconda3\lib\site-packages\six.py in raise_from(value, from_value)
DeadlineExceeded: 504 Deadline Exceeded
Let me know if you need to know more. Thanks in advance.
Rutger
It turned out to be a conflict between a Conda package and a pip packages.
I resolved it by reinstall all the packages.
When I try to authenticate a user with Django 1.8:
authenticate(username=<username>, password=<password>)
I am getting this error:
lookup_cast() takes exactly 2 arguments (3 given)
Traceback:
/lib/python2.7/site-packages/django/contrib/auth/__init__.pyc in authenticate(**credentials)
72
73 try:
---> 74 user = backend.authenticate(**credentials)
75 except PermissionDenied:
76 # This backend says to stop in our tracks - this user should not be allowed in at all.
/lib/python2.7/site-packages/django/contrib/auth/backends.pyc in authenticate(self, username, password, **kwargs)
15 username = kwargs.get(UserModel.USERNAME_FIELD)
16 try:
---> 17 user = UserModel._default_manager.get_by_natural_key(username)
18 if user.check_password(password):
19 return user
/lib/python2.7/site-packages/django/contrib/auth/models.pyc in get_by_natural_key(self, username)
160
161 def get_by_natural_key(self, username):
--> 162 return self.get(**{self.model.USERNAME_FIELD: username})
163
164
/lib/python2.7/site-packages/django/db/models/manager.pyc in manager_method(self, *args, **kwargs)
125 def create_method(name, method):
126 def manager_method(self, *args, **kwargs):
--> 127 return getattr(self.get_queryset(), name)(*args, **kwargs)
128 manager_method.__name__ = method.__name__
129 manager_method.__doc__ = method.__doc__
/lib/python2.7/site-packages/django/db/models/query.pyc in get(self, *args, **kwargs)
326 if self.query.can_filter():
327 clone = clone.order_by()
--> 328 num = len(clone)
329 if num == 1:
330 return clone._result_cache[0]
/lib/python2.7/site-packages/django/db/models/query.pyc in __len__(self)
142
143 def __len__(self):
--> 144 self._fetch_all()
145 return len(self._result_cache)
146
/lib/python2.7/site-packages/django/db/models/query.pyc in _fetch_all(self)
963 def _fetch_all(self):
964 if self._result_cache is None:
--> 965 self._result_cache = list(self.iterator())
966 if self._prefetch_related_lookups and not self._prefetch_done:
967 self._prefetch_related_objects()
/lib/python2.7/site-packages/django/db/models/query.pyc in iterator(self)
236 # Execute the query. This will also fill compiler.select, klass_info,
237 # and annotations.
--> 238 results = compiler.execute_sql()
239 select, klass_info, annotation_col_map = (compiler.select, compiler.klass_info,
240 compiler.annotation_col_map)
/lib/python2.7/site-packages/django/db/models/sql/compiler.pyc in execute_sql(self, result_type)
816 result_type = NO_RESULTS
817 try:
--> 818 sql, params = self.as_sql()
819 if not sql:
820 raise EmptyResultSet
/lib/python2.7/site-packages/django/db/models/sql/compiler.pyc in as_sql(self, with_limits, with_col_aliases, subquery)
374 from_, f_params = self.get_from_clause()
375
--> 376 where, w_params = self.compile(self.query.where)
377 having, h_params = self.compile(self.query.having)
378 params = []
/lib/python2.7/site-packages/django/db/models/sql/compiler.pyc in compile(self, node, select_format)
344 sql, params = vendor_impl(self, self.connection)
345 else:
--> 346 sql, params = node.as_sql(self, self.connection)
347 if select_format and not self.subquery:
348 return node.output_field.select_format(self, sql, params)
/lib/python2.7/site-packages/django/db/models/sql/where.pyc in as_sql(self, compiler, connection)
102 try:
103 if hasattr(child, 'as_sql'):
--> 104 sql, params = compiler.compile(child)
105 else:
106 # A leaf node in the tree.
/lib/python2.7/site-packages/django/db/models/sql/compiler.pyc in compile(self, node, select_format)
344 sql, params = vendor_impl(self, self.connection)
345 else:
--> 346 sql, params = node.as_sql(self, self.connection)
347 if select_format and not self.subquery:
348 return node.output_field.select_format(self, sql, params)
/lib/python2.7/site-packages/django/db/models/lookups.pyc in as_sql(self, compiler, connection)
203
204 def as_sql(self, compiler, connection):
--> 205 lhs_sql, params = self.process_lhs(compiler, connection)
206 rhs_sql, rhs_params = self.process_rhs(compiler, connection)
207 params.extend(rhs_params)
/lib/python2.7/site-packages/django/db/models/lookups.pyc in process_lhs(self, compiler, connection, lhs)
199 lhs_sql = connection.ops.field_cast_sql(
200 db_type, field_internal_type) % lhs_sql
--> 201 lhs_sql = connection.ops.lookup_cast(self.lookup_name, field_internal_type) % lhs_sql
202 return lhs_sql, params
203
TypeError: lookup_cast() takes exactly 2 arguments (3 given)
Which authentication/database backend are you using?
The lookup_cast() method was changed in Django 1.8 to:
def lookup_cast(self, lookup_type, internal_type=None)
from:
def lookup_cast(self, lookup_type)
If you are using a backend designed for 1.7 or earlier it might be possible that it could be called according to the new method signature (3 args: self plus 2 args), but the backend still expects 2 args (self plus 1 arg).
Perhaps you need to check if there is an update available for your backend.