Currently using Python 3.5.2 and Celery 3.1.23
#celery.task(bind=True)
def test(self, player, team, region):
print('test', player, team, region)
def update_player_accounts(self):
try:
teams = client.get_pro_teams()
for team, team_data in teams.items():
for player, player_data in team_data['members'].items():
print(player, team, team_data['region'])
self.test.delay(
player,
team,
team_data['region'],
)
I'm iterating through a nested string dictionary teams. My understanding of this code is that I'm creating celery tasks in the nested for loop to print my parameters. The above code throws a maximum recursion depth exceeded error. However, it works fine if I just remove one of the parameters in function test(). Doesn't matter which one I remove.
Also, the code will work if I attempt to add empty strings to the argument like so:
self.test.delay(
player+'',
team,
team_data['region'],
)
Why does this happen? I'm only passing strings to the celery task which should be straightforward.
Edit 1:
Supplying example teams
{
'Last Kings':{
'members':{
'Primoo':{
'url':'/professional/resume/primoo/',
'lane':'jng'
},
'Badmilk':{
'url':'/professional/resume/badmilk/',
'lane':'sup'
},
'Nipphu':{
'url':'/professional/resume/nipphu/',
'lane':'top'
},
'SryNotSry':{
'url':None,
'lane':'adc'
},
'Rakyz':{
'url':'/professional/resume/rakyz/',
'lane':'mid'
}
},
'region':'las'
},
'Dire Wolves':{
....
}
Edit 2:
Traceback (most recent call last):
File "c:\users\jung\appdata\local\programs\python\python35-32\lib\site-packages\kombu\serialization.py", line 55, in _reraise_errors
yield
File "c:\users\jung\appdata\local\programs\python\python35-32\lib\site-packages\kombu\serialization.py", line 164, in dumps
payload = encoder(data)
File "c:\users\jung\appdata\local\programs\python\python35-32\lib\site-packages\kombu\serialization.py", line 356, in pickle_dumps
return dumper(obj, protocol=pickle_protocol)
RecursionError: maximum recursion depth exceeded
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "C:\Users\Jung\Documents\Projects\hq\services\player_service.py", line 84, in update_player_accounts
team_data['region']
File "c:\users\jung\appdata\local\programs\python\python35-32\lib\site-packages\celery\app\task.py", line 453, in delay
return self.apply_async(args, kwargs)
File "c:\users\jung\appdata\local\programs\python\python35-32\lib\site-packages\celery\app\task.py", line 560, in apply_async
**dict(self._get_exec_options(), **options)
File "c:\users\jung\appdata\local\programs\python\python35-32\lib\site-packages\celery\app\base.py", line 354, in send_task
reply_to=reply_to or self.oid, **options
File "c:\users\jung\appdata\local\programs\python\python35-32\lib\site-packages\celery\app\amqp.py", line 305, in publish_task
**kwargs
File "c:\users\jung\appdata\local\programs\python\python35-32\lib\site-packages\kombu\messaging.py", line 165, in publish
compression, headers)
File "c:\users\jung\appdata\local\programs\python\python35-32\lib\site-packages\kombu\messaging.py", line 241, in _prepare
body) = dumps(body, serializer=serializer)
File "c:\users\jung\appdata\local\programs\python\python35-32\lib\site-packages\kombu\serialization.py", line 164, in dumps
payload = encoder(data)
File "c:\users\jung\appdata\local\programs\python\python35-32\lib\contextlib.py", line 77, in __exit__
self.gen.throw(type, value, traceback)
File "c:\users\jung\appdata\local\programs\python\python35-32\lib\site-packages\kombu\serialization.py", line 59, in _reraise_errors
reraise(wrapper, wrapper(exc), sys.exc_info()[2])
File "c:\users\jung\appdata\local\programs\python\python35-32\lib\site-packages\kombu\five.py", line 131, in reraise
raise value.with_traceback(tb)
File "c:\users\jung\appdata\local\programs\python\python35-32\lib\site-packages\kombu\serialization.py", line 55, in _reraise_errors
yield
File "c:\users\jung\appdata\local\programs\python\python35-32\lib\site-packages\kombu\serialization.py", line 164, in dumps
payload = encoder(data)
File "c:\users\jung\appdata\local\programs\python\python35-32\lib\site-packages\kombu\serialization.py", line 356, in pickle_dumps
return dumper(obj, protocol=pickle_protocol)
kombu.exceptions.EncodeError: maximum recursion depth exceeded
I've decided to post the entire stack trace as it is not that long.
Related
I have model written in declarative base of SQL Alchemy.
Class Roles(Base):
__tablename__ = "roles"
__table_args__ = (
Index("roles_name", "name", unique=True),
)
id = Column(Integer, primary_key=True, default=get_uuid())
name = Column(String(10), nullable=False)
As you may have noticed I have set the default value of primary key column id to get_uuid().
def get_uuid():
pk = uuid.uuid4().int >> 64
return pk
The above method return UUID as integer of bit size 64 or less. This is because the column id of this table is set to int and spanner can hold up to 64 bit.
So now to insert a row in this table -
>>> role = Roles()
>>> role.name = "Admin"
>>> session.add(role)
>>> session.commit()
This resulted in following exception -
Traceback (most recent call last):
File "/usr/local/lib/python3.10/site-packages/google/api_core/grpc_helpers.py", line 72, in error_remapped_callable
return callable_(*args, **kwargs)
File "/usr/local/lib/python3.10/site-packages/grpc/_channel.py", line 946, in __call__
return _end_unary_response_blocking(state, call, False, None)
File "/usr/local/lib/python3.10/site-packages/grpc/_channel.py", line 849, in _end_unary_response_blocking
raise _InactiveRpcError(state)
grpc._channel._InactiveRpcError: <_InactiveRpcError of RPC that terminated with:
status = StatusCode.FAILED_PRECONDITION
details = "Could not parse 18011687921562567628 as an integer"
debug_error_string = "UNKNOWN:Error received from peer ipv4:172.19.0.3:9010 {grpc_message:"Could not parse 18011687921562567628 as an integer", grpc_status:9, created_time:"2022-11-12T06:48:36.468914625+00:00"}"
>
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.10/site-packages/google/cloud/spanner_dbapi/cursor.py", line 269, in execute
) = self.connection.run_statement(statement)
File "/usr/local/lib/python3.10/site-packages/google/cloud/spanner_dbapi/connection.py", line 454, in run_statement
_execute_insert_heterogenous(
File "/usr/local/lib/python3.10/site-packages/google/cloud/spanner_dbapi/_helpers.py", line 57, in _execute_insert_heterogenous
transaction.execute_update(
File "/usr/local/lib/python3.10/site-packages/google/cloud/spanner_v1/transaction.py", line 302, in execute_update
response = api.execute_sql(
File "/usr/local/lib/python3.10/site-packages/google/cloud/spanner_v1/services/spanner/client.py", line 1096, in execute_sql
response = rpc(
File "/usr/local/lib/python3.10/site-packages/google/api_core/gapic_v1/method.py", line 154, in __call__
return wrapped_func(*args, **kwargs)
File "/usr/local/lib/python3.10/site-packages/google/api_core/retry.py", line 283, in retry_wrapped_func
return retry_target(
File "/usr/local/lib/python3.10/site-packages/google/api_core/retry.py", line 190, in retry_target
return target()
File "/usr/local/lib/python3.10/site-packages/google/api_core/timeout.py", line 99, in func_with_timeout
return func(*args, **kwargs)
File "/usr/local/lib/python3.10/site-packages/google/api_core/grpc_helpers.py", line 74, in error_remapped_callable
raise exceptions.from_grpc_error(exc) from exc
google.api_core.exceptions.FailedPrecondition: 400 Could not parse 18011687921562567628 as an integer
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.10/site-packages/sqlalchemy/engine/base.py", line 1900, in _execute_context
self.dialect.do_execute(
File "/usr/local/lib/python3.10/site-packages/google/cloud/sqlalchemy_spanner/sqlalchemy_spanner.py", line 1013, in do_execute
cursor.execute(statement, parameters)
File "/usr/local/lib/python3.10/site-packages/google/cloud/spanner_dbapi/cursor.py", line 70, in wrapper
return function(cursor, *args, **kwargs)
File "/usr/local/lib/python3.10/site-packages/google/cloud/spanner_dbapi/cursor.py", line 289, in execute
raise IntegrityError(getattr(e, "details", e)) from e
google.cloud.spanner_dbapi.exceptions.IntegrityError: []
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/local/lib/python3.10/site-packages/sqlalchemy/orm/session.py", line 1451, in commit
self._transaction.commit(_to_root=self.future)
File "/usr/local/lib/python3.10/site-packages/sqlalchemy/orm/session.py", line 829, in commit
self._prepare_impl()
File "/usr/local/lib/python3.10/site-packages/sqlalchemy/orm/session.py", line 808, in _prepare_impl
self.session.flush()
File "/usr/local/lib/python3.10/site-packages/sqlalchemy/orm/session.py", line 3386, in flush
self._flush(objects)
File "/usr/local/lib/python3.10/site-packages/sqlalchemy/orm/session.py", line 3525, in _flush
with util.safe_reraise():
File "/usr/local/lib/python3.10/site-packages/sqlalchemy/util/langhelpers.py", line 70, in __exit__
compat.raise_(
File "/usr/local/lib/python3.10/site-packages/sqlalchemy/util/compat.py", line 208, in raise_
raise exception
File "/usr/local/lib/python3.10/site-packages/sqlalchemy/orm/session.py", line 3486, in _flush
flush_context.execute()
File "/usr/local/lib/python3.10/site-packages/sqlalchemy/orm/unitofwork.py", line 456, in execute
rec.execute(self)
File "/usr/local/lib/python3.10/site-packages/sqlalchemy/orm/unitofwork.py", line 630, in execute
util.preloaded.orm_persistence.save_obj(
File "/usr/local/lib/python3.10/site-packages/sqlalchemy/orm/persistence.py", line 245, in save_obj
_emit_insert_statements(
File "/usr/local/lib/python3.10/site-packages/sqlalchemy/orm/persistence.py", line 1238, in _emit_insert_statements
result = connection._execute_20(
File "/usr/local/lib/python3.10/site-packages/sqlalchemy/engine/base.py", line 1705, in _execute_20
return meth(self, args_10style, kwargs_10style, execution_options)
File "/usr/local/lib/python3.10/site-packages/sqlalchemy/sql/elements.py", line 333, in _execute_on_connection
return connection._execute_clauseelement(
File "/usr/local/lib/python3.10/site-packages/sqlalchemy/engine/base.py", line 1572, in _execute_clauseelement
ret = self._execute_context(
File "/usr/local/lib/python3.10/site-packages/sqlalchemy/engine/base.py", line 1943, in _execute_context
self._handle_dbapi_exception(
File "/usr/local/lib/python3.10/site-packages/sqlalchemy/engine/base.py", line 2124, in _handle_dbapi_exception
util.raise_(
File "/usr/local/lib/python3.10/site-packages/sqlalchemy/util/compat.py", line 208, in raise_
raise exception
File "/usr/local/lib/python3.10/site-packages/sqlalchemy/engine/base.py", line 1900, in _execute_context
self.dialect.do_execute(
File "/usr/local/lib/python3.10/site-packages/google/cloud/sqlalchemy_spanner/sqlalchemy_spanner.py", line 1013, in do_execute
cursor.execute(statement, parameters)
File "/usr/local/lib/python3.10/site-packages/google/cloud/spanner_dbapi/cursor.py", line 70, in wrapper
return function(cursor, *args, **kwargs)
File "/usr/local/lib/python3.10/site-packages/google/cloud/spanner_dbapi/cursor.py", line 289, in execute
raise IntegrityError(getattr(e, "details", e)) from e
sqlalchemy.exc.IntegrityError: (google.cloud.spanner_dbapi.exceptions.IntegrityError) []
[SQL: INSERT INTO roles (id, name) VALUES (%s, %s)]
[parameters: [18011687921562567628, 'Admin']]
(Background on this error at: https://sqlalche.me/e/14/gkpj)
What I understood for this is that the spanner is not willing to accept the generated UUID.
status = StatusCode.FAILED_PRECONDITION
details = "Could not parse 18011687921562567628 as an integer"
I have checked the method get_uuid(). It does return int value of but size 64 or less.
The README of this repo suggests creating a table's primary key as Integer and while in inserting a row in the database generate value of primary key in hex. I did exactly the same but it didn't work.
The generated int value is larger than the maximum INT64 value that is allowed in Cloud Spanner:
Max allowed: 9223372036854775807
Your value : 18011687921562567628
See https://cloud.google.com/spanner/docs/reference/standard-sql/data-types#integer_types for more information on the INT64 type.
I'm no Python expert, but my guess is that the int value that you are generating is interpreted as an unsigned int, while the INT64 data type in Cloud Spanner is signed.
EDIT: Add example to get signed value.
My understanding is that you can do the following to get a signed 64-bit integer value from a UUID in Python:
import ctypes
import uuid
ctypes.c_long(uuid.uuid4().int >> 64).value
I created a small program that uses numba on a method containing a 2D array (list of lists). This program runs just fine on Spyder, but when I try to run the exact same program on a remote Ubuntu server, I get a long error saying "unsupported nested memory-managed object" (listed below the program code).
The code:
from numba import jit
#jit(nopython=True)
def test():
num1 = 10
num2 = 5
array = [ [ 1 for i in range(num1) ] for j in range(num2) ]
#array = [2 for i in range(num1) ]
sum = 0
for i in range(0,num1):
for j in range(0,num2):
sum = sum + array[i][j]
print(sum)
test()
The error:
Traceback (most recent call last):
File "/usr/lib/python3/dist-packages/numba/runtime/context.py", line 186, in _call_incref_decref
meminfo = data_model.get_nrt_meminfo(builder, value)
File "/usr/lib/python3/dist-packages/numba/datamodel/models.py", line 329, in get_nrt_meminfo
"unsupported nested memory-managed object")
NotImplementedError: unsupported nested memory-managed object
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/lib/python3/dist-packages/numba/errors.py", line 243, in new_error_context
yield
File "/usr/lib/python3/dist-packages/numba/lowering.py", line 216, in lower_block
self.lower_inst(inst)
File "/usr/lib/python3/dist-packages/numba/lowering.py", line 263, in lower_inst
self.storevar(val, inst.target.name)
File "/usr/lib/python3/dist-packages/numba/lowering.py", line 928, in storevar
self.decref(fetype, old)
File "/usr/lib/python3/dist-packages/numba/lowering.py", line 982, in decref
self.context.nrt.decref(self.builder, typ, val)
File "/usr/lib/python3/dist-packages/numba/runtime/context.py", line 208, in decref
self._call_incref_decref(builder, typ, typ, value, "NRT_decref")
File "/usr/lib/python3/dist-packages/numba/runtime/context.py", line 179, in _call_incref_decref
funcname, getters + (getter,))
File "/usr/lib/python3/dist-packages/numba/runtime/context.py", line 188, in _call_incref_decref
raise NotImplementedError("%s: %s" % (root_type, str(e)))
NotImplementedError: list(list(int64)): unsupported nested memory-managed object
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "2DArrayTest.py", line 15, in <module>
test()
File "/usr/lib/python3/dist-packages/numba/dispatcher.py", line 307, in _compile_for_args
return self.compile(tuple(argtypes))
File "/usr/lib/python3/dist-packages/numba/dispatcher.py", line 579, in compile
cres = self._compiler.compile(args, return_type)
File "/usr/lib/python3/dist-packages/numba/dispatcher.py", line 80, in compile
flags=flags, locals=self.locals)
File "/usr/lib/python3/dist-packages/numba/compiler.py", line 740, in compile_extra
return pipeline.compile_extra(func)
File "/usr/lib/python3/dist-packages/numba/compiler.py", line 360, in compile_extra
return self._compile_bytecode()
File "/usr/lib/python3/dist-packages/numba/compiler.py", line 699, in _compile_bytecode
return self._compile_core()
File "/usr/lib/python3/dist-packages/numba/compiler.py", line 686, in _compile_core
res = pm.run(self.status)
File "/usr/lib/python3/dist-packages/numba/compiler.py", line 246, in run
raise patched_exception
File "/usr/lib/python3/dist-packages/numba/compiler.py", line 238, in run
stage()
File "/usr/lib/python3/dist-packages/numba/compiler.py", line 621, in stage_nopython_backend
self._backend(lowerfn, objectmode=False)
File "/usr/lib/python3/dist-packages/numba/compiler.py", line 576, in _backend
lowered = lowerfn()
File "/usr/lib/python3/dist-packages/numba/compiler.py", line 563, in backend_nopython_mode
self.flags)
File "/usr/lib/python3/dist-packages/numba/compiler.py", line 858, in native_lowering_stage
lower.lower()
File "/usr/lib/python3/dist-packages/numba/lowering.py", line 135, in lower
self.lower_normal_function(self.fndesc)
File "/usr/lib/python3/dist-packages/numba/lowering.py", line 176, in lower_normal_function
entry_block_tail = self.lower_function_body()
File "/usr/lib/python3/dist-packages/numba/lowering.py", line 201, in lower_function_body
self.lower_block(block)
File "/usr/lib/python3/dist-packages/numba/lowering.py", line 216, in lower_block
self.lower_inst(inst)
File "/usr/lib/python3.6/contextlib.py", line 99, in __exit__
self.gen.throw(type, value, traceback)
File "/usr/lib/python3/dist-packages/numba/errors.py", line 249, in new_error_context
six.reraise(type(newerr), newerr, sys.exc_info()[2])
File "/usr/lib/python3/dist-packages/numba/six.py", line 658, in reraise
raise value.with_traceback(tb)
File "/usr/lib/python3/dist-packages/numba/errors.py", line 243, in new_error_context
yield
File "/usr/lib/python3/dist-packages/numba/lowering.py", line 216, in lower_block
self.lower_inst(inst)
File "/usr/lib/python3/dist-packages/numba/lowering.py", line 263, in lower_inst
self.storevar(val, inst.target.name)
File "/usr/lib/python3/dist-packages/numba/lowering.py", line 928, in storevar
self.decref(fetype, old)
File "/usr/lib/python3/dist-packages/numba/lowering.py", line 982, in decref
self.context.nrt.decref(self.builder, typ, val)
File "/usr/lib/python3/dist-packages/numba/runtime/context.py", line 208, in decref
self._call_incref_decref(builder, typ, typ, value, "NRT_decref")
File "/usr/lib/python3/dist-packages/numba/runtime/context.py", line 179, in _call_incref_decref
funcname, getters + (getter,))
File "/usr/lib/python3/dist-packages/numba/runtime/context.py", line 188, in _call_incref_decref
raise NotImplementedError("%s: %s" % (root_type, str(e)))
numba.errors.LoweringError: Failed at nopython (nopython mode backend)
list(list(int64)): unsupported nested memory-managed object
File "2DArrayTest.py", line 7
[1] During: lowering "$56 = build_list(items=[])" at 2DArrayTest.py (7)
I have seen elsewhere that numba doesn't like 2D arrays and lists of lists because of memory. Is there a way to make it work on the remote server, since it works on Spyder? What is it that Spyder does differently to make it work?
Can anyone suggest a Python client for AWS Redis Cluster enabled?
I'm using redis-py-cluster, but it fails:
Sample code:
from rediscluster import StrictRedisCluster
startup_nodes = [{"host": "xxxx.clustercfg.apn2.cache.amazonaws.com", "port": "6379"}]
r = StrictRedisCluster(startup_nodes=startup_nodes, decode_responses=True, skip_full_coverage_check=False)
r.set('foo', 'bar')
value = r.get('foo')
======
Exception:
Traceback (most recent call last):
File "testRedisCluster.py", line 11, in
r = StrictRedisCluster(startup_nodes=startup_nodes, decode_responses=True, skip_full_coverage_check=False)
File "/Library/Python/2.7/site-packages/rediscluster/client.py", line 181, in init
**kwargs
File "/Library/Python/2.7/site-packages/rediscluster/connection.py", line 141, in init
self.nodes.initialize()
File "/Library/Python/2.7/site-packages/rediscluster/nodemanager.py", line 228, in initialize
need_full_slots_coverage = self.cluster_require_full_coverage(nodes_cache)
File "/Library/Python/2.7/site-packages/rediscluster/nodemanager.py", line 270, in cluster_require_full_coverage
return any(node_require_full_coverage(node) for node in nodes.values())
File "/Library/Python/2.7/site-packages/rediscluster/nodemanager.py", line 270, in
return any(node_require_full_coverage(node) for node in nodes.values())
File "/Library/Python/2.7/site-packages/rediscluster/nodemanager.py", line 267, in node_require_full_coverage
return "yes" in r_node.config_get("cluster-require-full-coverage").values()
File "/Library/Python/2.7/site-packages/redis/client.py", line 715, in config_get
return self.execute_command('CONFIG GET', pattern)
File "/Library/Python/2.7/site-packages/redis/client.py", line 668, in execute_command
return self.parse_response(connection, command_name, **options)
File "/Library/Python/2.7/site-packages/redis/client.py", line 680, in parse_response
response = connection.read_response()
File "/Library/Python/2.7/site-packages/redis/connection.py", line 629, in read_response
raise response
redis.exceptions.ResponseError: unknown command 'CONFIG'
I'm using redis-py-cluster 1.3.4.
Any idea?
Change the parameter skip_full_coverage_check=False to skip_full_coverage_check=True
I am currently developing a game. This game store data in a sqlite database. I'm using dataset to manage the database, so I don't have to worry about sql queries. I have a method that access the database to update player info :
def updatePlayerInfo(channel, info): # Context at https://github.com/DuckHunt-discord/DuckHunt-Discord/blob/master/database.py#L33
table = getChannelTable(channel)
table.upsert(info, ["id_"])
# An UPSERT is a smart combination of insert and update.
# If rows with matching keys exist they will be updated, otherwise a new row is inserted in the table.
This function works fine for almost everything. Only one thing create an error : using munAP_ as a column name ! (storing only integers timestamps inside)
Some other columns work the same way, but aren't affected by a single bug !
Exception raised is the following :
Ignoring exception in on_message
Traceback (most recent call last):
File "/usr/local/lib/python3.4/dist-packages/discord/client.py", line 245, in _run_event
yield from getattr(self, event)(*args, **kwargs)
File "./main.py", line 1022, in on_message
if database.getStat(message.channel, message.author, "chargeurs",
File "/home/cloudbot/discord-bot/database.py", line 45, in setStat
updatePlayerInfo(channel, dict_)
File "/home/cloudbot/discord-bot/database.py", line 35, in updatePlayerInfo
table.upsert(info, ["id_"])
File "/usr/local/lib/python3.4/dist-packages/dataset/persistence/table.py", line 185, in upsert
row_count = self.update(row, keys, ensure=ensure, types=types)
File "/usr/local/lib/python3.4/dist-packages/dataset/persistence/table.py", line 154, in update
rp = self.database.executable.execute(stmt)
File "/usr/local/lib/python3.4/dist-packages/sqlalchemy/engine/base.py", line 1991, in execute
return connection.execute(statement, *multiparams, **params)
File "/usr/local/lib/python3.4/dist-packages/sqlalchemy/engine/base.py", line 914, in execute
return meth(self, multiparams, params)
File "/usr/local/lib/python3.4/dist-packages/sqlalchemy/sql/elements.py", line 323, in _execute_on_connection
return connection._execute_clauseelement(self, multiparams, params)
File "/usr/local/lib/python3.4/dist-packages/sqlalchemy/engine/base.py", line 1003, in _execute_clauseelement
inline=len(distilled_params) > 1)
File "<string>", line 1, in <lambda>
File "/usr/local/lib/python3.4/dist-packages/sqlalchemy/sql/elements.py", line 494, in compile
return self._compiler(dialect, bind=bind, **kw)
File "/usr/local/lib/python3.4/dist-packages/sqlalchemy/sql/elements.py", line 500, in _compiler
return dialect.statement_compiler(dialect, self, **kw)
File "/usr/local/lib/python3.4/dist-packages/sqlalchemy/sql/compiler.py", line 395, in __init__
Compiled.__init__(self, dialect, statement, **kwargs)
File "/usr/local/lib/python3.4/dist-packages/sqlalchemy/sql/compiler.py", line 190, in __init__
self.string = self.process(self.statement, **compile_kwargs)
File "/usr/local/lib/python3.4/dist-packages/sqlalchemy/sql/compiler.py", line 213, in process
return obj._compiler_dispatch(self, **kwargs)
File "/usr/local/lib/python3.4/dist-packages/sqlalchemy/sql/visitors.py", line 81, in _compiler_dispatch
return meth(self, **kw)
File "/usr/local/lib/python3.4/dist-packages/sqlalchemy/sql/compiler.py", line 1958, in visit_update
crud_params = crud._get_crud_params(self, update_stmt, **kw)
File "/usr/local/lib/python3.4/dist-packages/sqlalchemy/sql/crud.py", line 109, in _get_crud_params
(", ".join("%s" % c for c in check))
sqlalchemy.exc.CompileError: Unconsumed column names: munAP_
https://github.com/DuckHunt-discord/DuckHunt-Discord/issues/8
I already tried to change the column name (it was munAP before) but it changed nothing !
What else can I try ? I suspect that the problem is in my code, but maybe it's dataset fault ?
I am trying to check whether a certain dataset exists in BigQuery and I get this very strange error I've never had untill yesterday:
ERROR:dsUtils.bq_utils:Could not check if dataset tmp exists.
Traceback (most recent call last):
File "C:\Users\paco\Anaconda3\envs\visitForecastEnv\lib\site-packages\dsUtils\bq_utils.py", line 113, in _get
resp = bq_service.datasets().get(projectId=self.project_id, datasetId=self.id).execute(num_retries=2)
File "C:\Users\paco\Anaconda3\envs\visitForecastEnv\lib\site-packages\oauth2client\util.py", line 137, in positional_wrapper
return wrapped(*args, **kwargs)
File "C:\Users\paco\Anaconda3\envs\visitForecastEnv\lib\site-packages\googleapiclient\http.py", line 755, in execute
method=str(self.method), body=self.body, headers=self.headers)
File "C:\Users\paco\Anaconda3\envs\visitForecastEnv\lib\site-packages\googleapiclient\http.py", line 93, in _retry_request
resp, content = http.request(uri, method, *args, **kwargs)
File "C:\Users\paco\Anaconda3\envs\visitForecastEnv\lib\site-packages\oauth2client\client.py", line 598, in new_request
self._refresh(request_orig)
File "C:\Users\paco\Anaconda3\envs\visitForecastEnv\lib\site-packages\oauth2client\client.py", line 864, in _refresh
self._do_refresh_request(http_request)
File "C:\Users\paco\Anaconda3\envs\visitForecastEnv\lib\site-packages\oauth2client\client.py", line 891, in _do_refresh_request
body = self._generate_refresh_request_body()
File "C:\Users\paco\Anaconda3\envs\visitForecastEnv\lib\site-packages\oauth2client\client.py", line 1597, in _generate_refresh_request_body
assertion = self._generate_assertion()
File "C:\Users\paco\Anaconda3\envs\visitForecastEnv\lib\site-packages\oauth2client\service_account.py", line 318, in _generate_assertion
key_id=self._private_key_id)
File "C:\Users\paco\Anaconda3\envs\visitForecastEnv\lib\site-packages\oauth2client\crypt.py", line 97, in make_signed_jwt
signature = signer.sign(signing_input)
File "C:\Users\paco\Anaconda3\envs\visitForecastEnv\lib\site-packages\oauth2client\_pycrypto_crypt.py", line 101, in sign
return PKCS1_v1_5.new(self._key).sign(SHA256.new(message))
File "C:\Users\paco\Anaconda3\envs\visitForecastEnv\lib\site-packages\Crypto\Signature\PKCS1_v1_5.py", line 112, in sign
m = self._key.decrypt(em)
File "C:\Users\paco\Anaconda3\envs\visitForecastEnv\lib\site-packages\Crypto\PublicKey\RSA.py", line 174, in decrypt
return pubkey.pubkey.decrypt(self, ciphertext)
File "C:\Users\paco\Anaconda3\envs\visitForecastEnv\lib\site-packages\Crypto\PublicKey\pubkey.py", line 93, in decrypt
plaintext=self._decrypt(ciphertext)
File "C:\Users\paco\Anaconda3\envs\visitForecastEnv\lib\site-packages\Crypto\PublicKey\RSA.py", line 235, in _decrypt
r = getRandomRange(1, self.key.n-1, randfunc=self._randfunc)
File "C:\Users\paco\Anaconda3\envs\visitForecastEnv\lib\site-packages\Crypto\PublicKey\RSA.py", line 126, in __getattr__
raise AttributeError("%s object has no %r attribute" % (self.__class__.__name__, attrname,))
AttributeError: _RSAobj object has no '_randfunc' attribute
Has anyone an idea of why I get these errors suddenly?