Tornado authenticating to MongoDB using Motor - python

I made a tornado app that is supposed to run some queries against a mongodb instance running on another machine. To this purpose I have set up mongodb with authentication and users. I have checked that everything works and that I can authenticate to tornado using the Robo 3T app and a small synchronous script that uses pymongo.
This is how I initialize my tornado application:
class API(tornado.web.Application):
def __init__(self):
settings = dict(
autoreload=True,
compiled_template_cache=False,
static_hash_cache=False,
serve_traceback=True,
cookie_secret="secret",
xsrf_cookies=True,
static_path=os.path.join(os.path.dirname(__file__), "media"),
template_loader=tornado.template.Loader('./templates')
)
mongohost = os.environ.get('MONGOHOST', 'localhost')
mongoport = os.environ.get('MONGOPORT', 27017)
mongouser = os.environ.get('MONGOUSER')
mongopass = os.environ.get('MONGOPASS')
mongodb = os.environ.get('MONGODB')
mongouri = f'mongodb://{mongouser}:{mongopass}#{mongohost}:{mongoport}/{mongodb}'
self.client = motor.motor_tornado.MotorClient(mongouri)
logging.info('connected to mongodb')
self.db = self.client.get_default_database()
logging.info('got mongodb database')
tornado.web.Application.__init__(self, url_patterns, **settings)
def main():
port = 8888
FORMAT = ('%(asctime)s %(levelname) -10s %(name) -30s %(funcName) -35s %(lineno) -5d: %(message)s')
logging.basicConfig(level=logging.DEBUG, format=FORMAT)
tornado.ioloop.IOLoop.configure(TornadoUvloop)
app = API()
app.listen(port)
signal.signal(signal.SIGINT, sig_exit)
logging.info('Tornado server started on port %s' % port)
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main()
Everything appears to run until one of my handlers that actually performs queries against the database is hit. Code from the handler looks like this:
cursor = self.application.db['events'].find(
find,
projection
).limit(perpage).skip(page*perpage)
buffsize = 0
try:
while (yield cursor.fetch_next):
message = cursor.next_object()
self.write(json.dumps(message, default=json_util.default))
buffsize += 1
if buffsize >= 10:
buffsize = 0
yield self.flush()
yield self.flush()
except Exception:
logging.error('Could not connect to mongodb', exc_info=True)
This code worked just fine before trying to use authentication but now it raises exceptions and stoped working:
017-12-12 13:00:20,718 INFO root __init__ 37 : connected to mongodb
2017-12-12 13:00:20,718 INFO root __init__ 39 : got mongodb database
2017-12-12 13:00:20,723 INFO root main 67 : Tornado server started on port 8888
2017-12-12 13:00:25,226 INFO tornado.general _check_file 198 : /Users/liviu/Documents/Work/api_v2/src/api_tmp/handlers/event_list.py modified; restarting server
2017-12-12 13:00:25,469 INFO root __init__ 37 : connected to mongodb
2017-12-12 13:00:25,469 INFO root __init__ 39 : got mongodb database
2017-12-12 13:00:25,472 INFO root main 67 : Tornado server started on port 8888
2017-12-12 13:00:28,152 INFO root get 266 : now querying database
2017-12-12 13:00:28,214 ERROR root get 355 : Could not connect to mongodb
Traceback (most recent call last):
File "/Users/liviu/Documents/Work/api_v2/src/api_tmp/handlers/event_list.py", line 346, in get
while (yield cursor.fetch_next):
File "/Users/liviu/.venv/api/lib/python3.6/site-packages/tornado/gen.py", line 1055, in run
value = future.result()
File "/Users/liviu/.venv/api/lib/python3.6/site-packages/tornado/concurrent.py", line 238, in result
raise_exc_info(self._exc_info)
File "<string>", line 4, in raise_exc_info
File "/usr/local/Cellar/python3/3.6.3/Frameworks/Python.framework/Versions/3.6/lib/python3.6/concurrent/futures/thread.py", line 56, in run
result = self.fn(*self.args, **self.kwargs)
File "/Users/liviu/.venv/api/lib/python3.6/site-packages/pymongo/cursor.py", line 1055, in _refresh
self.__collation))
File "/Users/liviu/.venv/api/lib/python3.6/site-packages/pymongo/cursor.py", line 892, in __send_message
**kwargs)
File "/Users/liviu/.venv/api/lib/python3.6/site-packages/pymongo/mongo_client.py", line 950, in _send_message_with_response
exhaust)
File "/Users/liviu/.venv/api/lib/python3.6/site-packages/pymongo/mongo_client.py", line 961, in _reset_on_error
return func(*args, **kwargs)
File "/Users/liviu/.venv/api/lib/python3.6/site-packages/pymongo/server.py", line 99, in send_message_with_response
with self.get_socket(all_credentials, exhaust) as sock_info:
File "/usr/local/Cellar/python3/3.6.3/Frameworks/Python.framework/Versions/3.6/lib/python3.6/contextlib.py", line 81, in __enter__
return next(self.gen)
File "/Users/liviu/.venv/api/lib/python3.6/site-packages/pymongo/server.py", line 168, in get_socket
with self.pool.get_socket(all_credentials, checkout) as sock_info:
File "/usr/local/Cellar/python3/3.6.3/Frameworks/Python.framework/Versions/3.6/lib/python3.6/contextlib.py", line 81, in __enter__
return next(self.gen)
File "/Users/liviu/.venv/api/lib/python3.6/site-packages/pymongo/pool.py", line 852, in get_socket
sock_info.check_auth(all_credentials)
File "/Users/liviu/.venv/api/lib/python3.6/site-packages/pymongo/pool.py", line 570, in check_auth
auth.authenticate(credentials, self)
File "/Users/liviu/.venv/api/lib/python3.6/site-packages/pymongo/auth.py", line 486, in authenticate
auth_func(credentials, sock_info)
File "/Users/liviu/.venv/api/lib/python3.6/site-packages/pymongo/auth.py", line 466, in _authenticate_default
return _authenticate_scram_sha1(credentials, sock_info)
File "/Users/liviu/.venv/api/lib/python3.6/site-packages/pymongo/auth.py", line 209, in _authenticate_scram_sha1
res = sock_info.command(source, cmd)
File "/Users/liviu/.venv/api/lib/python3.6/site-packages/pymongo/pool.py", line 477, in command
collation=collation)
File "/Users/liviu/.venv/api/lib/python3.6/site-packages/pymongo/network.py", line 116, in command
parse_write_concern_error=parse_write_concern_error)
File "/Users/liviu/.venv/api/lib/python3.6/site-packages/pymongo/helpers.py", line 210, in _check_command_response
raise OperationFailure(msg % errmsg, code, response)
pymongo.errors.OperationFailure: Authentication failed.
2017-12-12 13:00:28,220 INFO tornado.access log_request 2063: 200 GET /event/list/web?starttime=2017-01-01&endtime=2017-02-03T14:00:00&minlatitude=10&maxlatitude=20&minlongitude=10&maxlongitude=20&minmagnitude=2&maxmagnitude=5&mindepth=10&maxdepth=100 (::1) 67.88ms
I was able to find some simple examples on how to log in and authenticate to MongoDB and as far as I can figure, that is exactly how I am also doing it (https://github.com/mongodb/motor/blob/master/doc/examples/authentication.rst).
Can anybody shed some light on what is going on and how to properly authenticate to MongoDB from an actual working tornado application using Motor?
P.S. I am using Python 3.6, tornado 4.5.2 and motor 1.1.
P.P.S. In the meantime I have discovered that using this as the uri makes it work properly:
mongouri = f'mongodb://{mongouser}:{mongopass}#{mongohost}:{mongoport}/admin'
In the above, I replaced {mongodb} with the "admin" db. After the client gets connected and authenticated on the admin database, I can proceed to get_database(mongodb) and it will work properly. If anyone cares to more clearly articulate what is going on I will accept the answer.

From the mongodb docs :
authSource
Specify the database name associated with the user’s credentials.
authSource defaults to the database specified in the connection
string.
That explains why it works if you specify "admin" as target database.
So in your case you should use:
mongouri = f'mongodb://{mongouser}:{mongopass}#{mongohost}:{mongoport}/{mongodb}?authSource=admin'

Related

Why am I getting pymongo.errors.AutoReconnect: connection pool paused

I'm getting this error of autoreconnect, and there are around 100 connections in the logs during this call to .objects. Here is the document:
class NotificationDoc(Document):
patient_id = StringField(max_length=32)
type = StringField(max_length=32)
sms_sent = BooleanField(default=False)
email_sent = BooleanField(default=False)
sending_time = DateTimeField(default=datetime.utcnow)
def __unicode__(self):
return "{}_{}_{}".format(self.patient_id, self.type, self.sending_time.strftime('%Y-%m-%d'))
and this is the query call that causes the issue:
docs = NotificationDoc.objects(sending_time__lte=from_date)
This is the complete stacktrace. How can I understand what is going on?
pymongo.errors.AutoReconnect: connection pool paused
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.8/site-packages/celery/app/trace.py", line 451, in trace_task
R = retval = fun(*args, **kwargs)
File "/usr/local/lib/python3.8/site-packages/celery/app/trace.py", line 734, in __protected_call__
return self.run(*args, **kwargs)
File "/app/src/reminder/configurations/app/worker.py", line 106, in schedule_recall
schedule_recall_use_case.execute()
File "/app/src/reminder/domain/notification/recall/use_cases/schedule_recall.py", line 24, in execute
notifications_sent = self.notifications_provider.find_recalled_notifications_for_date(no_recalls_before_date)
File "/app/src/reminder/data_providers/database/odm/repositories.py", line 42, in find_recalled_notifications_for_date
if NotificationDoc.objects(sending_time__lte=from_date).count() > 0:
File "/usr/local/lib/python3.8/site-packages/mongoengine/queryset/queryset.py", line 144, in count
return super().count(with_limit_and_skip)
File "/usr/local/lib/python3.8/site-packages/mongoengine/queryset/base.py", line 423, in count
count = count_documents(
File "/usr/local/lib/python3.8/site-packages/mongoengine/pymongo_support.py", line 38, in count_documents
return collection.count_documents(filter=filter, **kwargs)
File "/usr/local/lib/python3.8/site-packages/pymongo/collection.py", line 1502, in count_documents
return self.__database.client._retryable_read(
File "/usr/local/lib/python3.8/site-packages/pymongo/mongo_client.py", line 1307, in _retryable_read
with self._secondaryok_for_server(read_pref, server, session) as (
File "/usr/local/lib/python3.8/contextlib.py", line 113, in __enter__
return next(self.gen)
File "/usr/local/lib/python3.8/site-packages/pymongo/mongo_client.py", line 1162, in _secondaryok_for_server
with self._get_socket(server, session) as sock_info:
File "/usr/local/lib/python3.8/contextlib.py", line 113, in __enter__
return next(self.gen)
File "/usr/local/lib/python3.8/site-packages/pymongo/mongo_client.py", line 1099, in _get_socket
with server.get_socket(
File "/usr/local/lib/python3.8/contextlib.py", line 113, in __enter__
return next(self.gen)
File "/usr/local/lib/python3.8/site-packages/pymongo/pool.py", line 1371, in get_socket
sock_info = self._get_socket(all_credentials)
File "/usr/local/lib/python3.8/site-packages/pymongo/pool.py", line 1436, in _get_socket
self._raise_if_not_ready(emit_event=True)
File "/usr/local/lib/python3.8/site-packages/pymongo/pool.py", line 1407, in _raise_if_not_ready
_raise_connection_failure(
File "/usr/local/lib/python3.8/site-packages/pymongo/pool.py", line 250, in _raise_connection_failure
raise AutoReconnect(msg) from error
pymongo.errors.AutoReconnect: mongo:27017: connection pool paused
turned out to be celery is leaving connections open, so here is the solution
from mongoengine import disconnect
from celery.signals import task_prerun
#task_prerun.connect
def on_task_init(*args, **kwargs):
disconnect(alias='default')
connect(db, host=host, port=port, maxPoolSize=400, minPoolSize=200, alias='default')
This could be related to the fact that PyMongo is not fork-safe. If you're using a process pool in any way, which includes server software like uWSGI and even some configurations of popular ASGI servers.
Every time you run a query in PyMongo, that MongoClient object becomes fork-unsafe. A MongoClient object that has never run any queries is fork-safe. Created Database and Collection objects will reference back to their parent MongoClient without checking for existence.
I do see you are using MongoEngine, which uses PyMongo underneath. I don't know the semantics of that particular library but I would assume they are the same.
In short: you must re-create your connections as part of your forking process.
pip install -U pymongo; is already fix; see this https://github.com/mongodb/mongo-python-driver/pull/944

Random NoTransaction in Pyramid

I'm having trouble identifying the source of transaction.interfaces.NoTransaction errors within my Pyramid App. I don't see any patterns to when the error happens, so to me it's quite random.
This app is a (semi-) RESTful API and uses SQLAlchemy and MySQL. I'm currently running within a docker container that connects to an external (bare metal) MySQL instance on the same host OS.
Here's the stack trace for a login attempt within the App. This error happened right after another login attempt that was actually successful.
2020-06-15 03:57:18,982 DEBUG [txn.140501728405248:108][waitress-1] new transaction
2020-06-15 03:57:18,984 INFO [sqlalchemy.engine.base.Engine:730][waitress-1] BEGIN (implicit)
2020-06-15 03:57:18,984 DEBUG [txn.140501728405248:576][waitress-1] abort
2020-06-15 03:57:18,985 ERROR [waitress:357][waitress-1] Exception while serving /auth
Traceback (most recent call last):
File "/usr/local/lib/python3.8/site-packages/waitress/channel.py", line 350, in service
task.service()
File "/usr/local/lib/python3.8/site-packages/waitress/task.py", line 171, in service
self.execute()
File "/usr/local/lib/python3.8/site-packages/waitress/task.py", line 441, in execute
app_iter = self.channel.server.application(environ, start_response)
File "/usr/local/lib/python3.8/site-packages/pyramid/router.py", line 270, in __call__
response = self.execution_policy(environ, self)
File "/usr/local/lib/python3.8/site-packages/pyramid_retry/__init__.py", line 127, in retry_policy
response = router.invoke_request(request)
File "/usr/local/lib/python3.8/site-packages/pyramid/router.py", line 249, in invoke_request
response = handle_request(request)
File "/usr/local/lib/python3.8/site-packages/pyramid_tm/__init__.py", line 178, in tm_tween
reraise(*exc_info)
File "/usr/local/lib/python3.8/site-packages/pyramid_tm/compat.py", line 36, in reraise
raise value
File "/usr/local/lib/python3.8/site-packages/pyramid_tm/__init__.py", line 135, in tm_tween
userid = request.authenticated_userid
File "/usr/local/lib/python3.8/site-packages/pyramid/security.py", line 381, in authenticated_userid
return policy.authenticated_userid(self)
File "/opt/REDACTED-api/REDACTED_api/auth/policy.py", line 208, in authenticated_userid
result = self._authenticate(request)
File "/opt/REDACTED-api/REDACTED_api/auth/policy.py", line 199, in _authenticate
session = self._get_session_from_token(token)
File "/opt/REDACTED-api/REDACTED_api/auth/policy.py", line 320, in _get_session_from_token
session = service.get(session_id)
File "/opt/REDACTED-api/REDACTED_api/service/__init__.py", line 122, in get
entity = self.queryset.filter(self.Meta.model.id == entity_id).first()
File "/usr/local/lib/python3.8/site-packages/sqlalchemy/orm/query.py", line 3375, in first
ret = list(self[0:1])
File "/usr/local/lib/python3.8/site-packages/sqlalchemy/orm/query.py", line 3149, in __getitem__
return list(res)
File "/usr/local/lib/python3.8/site-packages/sqlalchemy/orm/query.py", line 3481, in __iter__
return self._execute_and_instances(context)
File "/usr/local/lib/python3.8/site-packages/sqlalchemy/orm/query.py", line 3502, in _execute_and_instances
conn = self._get_bind_args(
File "/usr/local/lib/python3.8/site-packages/sqlalchemy/orm/query.py", line 3517, in _get_bind_args
return fn(
File "/usr/local/lib/python3.8/site-packages/sqlalchemy/orm/query.py", line 3496, in _connection_from_session
conn = self.session.connection(**kw)
File "/usr/local/lib/python3.8/site-packages/sqlalchemy/orm/session.py", line 1138, in connection
return self._connection_for_bind(
File "/usr/local/lib/python3.8/site-packages/sqlalchemy/orm/session.py", line 1146, in _connection_for_bind
return self.transaction._connection_for_bind(
File "/usr/local/lib/python3.8/site-packages/sqlalchemy/orm/session.py", line 458, in _connection_for_bind
self.session.dispatch.after_begin(self.session, self, conn)
File "/usr/local/lib/python3.8/site-packages/sqlalchemy/event/attr.py", line 322, in __call__
fn(*args, **kw)
File "/usr/local/lib/python3.8/site-packages/zope/sqlalchemy/datamanager.py", line 268, in after_begin
join_transaction(
File "/usr/local/lib/python3.8/site-packages/zope/sqlalchemy/datamanager.py", line 233, in join_transaction
DataManager(
File "/usr/local/lib/python3.8/site-packages/zope/sqlalchemy/datamanager.py", line 89, in __init__
transaction_manager.get().join(self)
File "/usr/local/lib/python3.8/site-packages/transaction/_manager.py", line 91, in get
raise NoTransaction()
transaction.interfaces.NoTransaction
The trace shows that the execution eventually reaches my project, but only my custom authentication policy. And it fails right where the database should be queried for the user.
What intrigues me here is the third line on the stack trace. It seems Waitress somehow aborted the transaction it created? Any clue why?
EDIT: Here's the code where that happens: policy.py:320
def _get_session_from_token(self, token) -> UserSession:
try:
session_id, session_secret = self.parse_token(token)
except InvalidToken as e:
raise SessionNotFound(e)
service = AuthService(self.dbsession, None)
try:
session = service.get(session_id) # <---- Service Class called here
except NoResultsFound:
raise SessionNotFound("Invalid session found Request headers. "
"Session id: %s".format(session_id))
if not service.check_session(session, session_secret):
raise SessionNotFound("Session signature does not match")
now = datetime.now(tz=pytz.UTC)
if session.validity < now:
raise SessionNotFound(
"Current session ID {session_id} is expired".format(
session_id=session.id
)
)
return session
And here is an a view on the that service class method:
class AuthService(ModelService):
class Meta:
model = UserSession
queryset = Query(UserSession)
search_fields = []
order_fields = [UserSession.created_at.desc()]
# These below are from the generic ModelClass father class
def __init__(self, dbsession: Session, user_id: str):
self.user_id = user_id
self.dbsession = dbsession
self.Meta.queryset = self.Meta.queryset.with_session(dbsession)
self.logger = logging.getLogger("REDACTED")
#property
def queryset(self):
return self.Meta.queryset
def get(self, entity_id) -> Base:
entity = self.queryset.filter(self.Meta.model.id == entity_id).first()
if not entity:
raise NoResultsFound(f"Could not find requested ID {entity_id}")
As you can see, the there's already some exception treatment. I really don't see what other exception I could try to catch on AuthService.get
I found the solution to be much simpler than tinkering inside Pyramid or SQLAlchemy.
Debugging my Authentication Policy closely, I found out that my it was keeping a sticky reference for the dbsession. It was stored on the first request ever who used it, and never released.
The first request works as expected, the following one fails: My understanding is that the object is still in memory while the app is running, and after the initial transaction is closed. The second request has a new connection, and a new transaction, but the object in memory still points to the previous one, that when used ultimately causes this.
What I don't understand is why the exception didn't happen sometimes. As I mentioned initially, it was seemingly random.
Another thing that I struggled with was in writing a test case to expose the issue. On my tests, the issue never happens because I have (and I've never seen it done differently) a single connection and a single transaction throughout the entire testing session, as opposed of a new connection/transaction per request, so I have not found no way to actually reproduce.
Please let me know if that makes sense, and if you can shed a light on how to expose the bug on a test case.

Python package APSscheduler throws an error when starting scheduler if mongodb is used as a jobstore

I'm trying to run some code with APScheduler with MongoDB as a jobstore. I've downloaded pymongo and I tested it, so it does work. APScheduler will work when I instantiate the scheduler, but when I run scheduler.start() it throws the following error:
Traceback (most recent call last):
File "aps_ro.py", line 56, in <module>
scheduler.start()
File "/usr/lib/python3.8/site-packages/apscheduler/schedulers/background.py", line 33, in start
BaseScheduler.start(self, *args, **kwargs)
File "/usr/lib/python3.8/site-packages/apscheduler/schedulers/base.py", line 158, in start
store.start(self, alias)
File "/usr/lib/python3.8/site-packages/apscheduler/jobstores/mongodb.py", line 57, in start
self.collection.ensure_index('next_run_time', sparse=True)
File "/usr/lib/python3.8/site-packages/pymongo/collection.py", line 2028, in ensure_index
self.__create_index(keys, kwargs, session=None)
File "/usr/lib/python3.8/site-packages/pymongo/collection.py", line 1881, in __create_index
with self._socket_for_writes(session) as sock_info:
File "/usr/lib/python3.8/site-packages/pymongo/collection.py", line 195, in _socket_for_writes
return self.__database.client._socket_for_writes(session)
File "/usr/lib/python3.8/site-packages/pymongo/mongo_client.py", line 1266, in _socket_for_writes
server = self._select_server(writable_server_selector, session)
File "/usr/lib/python3.8/site-packages/pymongo/mongo_client.py", line 1253, in _select_server
server = topology.select_server(server_selector)
File "/usr/lib/python3.8/site-packages/pymongo/topology.py", line 233, in select_server
return random.choice(self.select_servers(selector,
File "/usr/lib/python3.8/site-packages/pymongo/topology.py", line 192, in select_servers
server_descriptions = self._select_servers_loop(
File "/usr/lib/python3.8/site-packages/pymongo/topology.py", line 208, in _select_servers_loop
raise ServerSelectionTimeoutError(
pymongo.errors.ServerSelectionTimeoutError: localhost:27017: [Errno 111] Connection refused
My actual code sample is as follows:
jobstores = {
'mongo': {'type': 'mongodb'},
'default': SQLAlchemyJobStore(url='sqlite:///jobs.sqlite')
}
executors = {...}
job_defaults = {...}
scheduler = BackgroundScheduler()
scheduler.configure(jobstores=jobstores, executors=executors, job_defaults=job_defaults, timezone=utc)
#scheduler.scheduled_job(trigger='cron', day_of_week=4, hour=22, minute=21, second=0)
def tester():
print("hello")
scheduler.start()
By default, the mongodb python driver will connect to a database on host localhost and port 27017.
So you will need to either:
make sure mongodb server is running on the same machine as the python code and is running, on the default port (you can test this easily by typing mongo at your favourite shell and checking you get a > prompt and not an error).
supply a connection string or connection arguments if mongodb is running on a different server or a different port.

InterfaceError: connection already closed (using django + celery + Scrapy)

I am getting this when using a Scrapy parsing function (that can take till 10 minutes sometimes) inside a Celery task.
I use:
- Django==1.6.5
- django-celery==3.1.16
- celery==3.1.16
- psycopg2==2.5.5 (I used also psycopg2==2.5.4)
[2015-07-19 11:27:49,488: CRITICAL/MainProcess] Task myapp.parse_items[63fc40eb-c0d6-46f4-a64e-acce8301d29a] INTERNAL ERROR: InterfaceError('connection already closed',)
Traceback (most recent call last):
File "/home/mo/Work/python/pb-env/local/lib/python2.7/site-packages/celery/app/trace.py", line 284, in trace_task
uuid, retval, SUCCESS, request=task_request,
File "/home/mo/Work/python/pb-env/local/lib/python2.7/site-packages/celery/backends/base.py", line 248, in store_result
request=request, **kwargs)
File "/home/mo/Work/python/pb-env/local/lib/python2.7/site-packages/djcelery/backends/database.py", line 29, in _store_result
traceback=traceback, children=self.current_task_children(request),
File "/home/mo/Work/python/pb-env/local/lib/python2.7/site-packages/djcelery/managers.py", line 42, in _inner
return fun(*args, **kwargs)
File "/home/mo/Work/python/pb-env/local/lib/python2.7/site-packages/djcelery/managers.py", line 181, in store_result
'meta': {'children': children}})
File "/home/mo/Work/python/pb-env/local/lib/python2.7/site-packages/djcelery/managers.py", line 87, in update_or_create
return get_queryset(self).update_or_create(**kwargs)
File "/home/mo/Work/python/pb-env/local/lib/python2.7/site-packages/djcelery/managers.py", line 70, in update_or_create
obj, created = self.get_or_create(**kwargs)
File "/home/mo/Work/python/pb-env/local/lib/python2.7/site-packages/django/db/models/query.py", line 376, in get_or_create
return self.get(**lookup), False
File "/home/mo/Work/python/pb-env/local/lib/python2.7/site-packages/django/db/models/query.py", line 304, in get
num = len(clone)
File "/home/mo/Work/python/pb-env/local/lib/python2.7/site-packages/django/db/models/query.py", line 77, in __len__
self._fetch_all()
File "/home/mo/Work/python/pb-env/local/lib/python2.7/site-packages/django/db/models/query.py", line 857, in _fetch_all
self._result_cache = list(self.iterator())
File "/home/mo/Work/python/pb-env/local/lib/python2.7/site-packages/django/db/models/query.py", line 220, in iterator
for row in compiler.results_iter():
File "/home/mo/Work/python/pb-env/local/lib/python2.7/site-packages/django/db/models/sql/compiler.py", line 713, in results_iter
for rows in self.execute_sql(MULTI):
File "/home/mo/Work/python/pb-env/local/lib/python2.7/site-packages/django/db/models/sql/compiler.py", line 785, in execute_sql
cursor = self.connection.cursor()
File "/home/mo/Work/python/pb-env/local/lib/python2.7/site-packages/django/db/backends/__init__.py", line 160, in cursor
cursor = self.make_debug_cursor(self._cursor())
File "/home/mo/Work/python/pb-env/local/lib/python2.7/site-packages/django/db/backends/__init__.py", line 134, in _cursor
return self.create_cursor()
File "/home/mo/Work/python/pb-env/local/lib/python2.7/site-packages/django/db/utils.py", line 99, in __exit__
six.reraise(dj_exc_type, dj_exc_value, traceback)
File "/home/mo/Work/python/pb-env/local/lib/python2.7/site-packages/django/db/backends/__init__.py", line 134, in _cursor
return self.create_cursor()
File "/home/mo/Work/python/pb-env/local/lib/python2.7/site-packages/django/db/backends/postgresql_psycopg2/base.py", line 137, in create_cursor
cursor = self.connection.cursor()
InterfaceError: connection already closed
Unfortunately this is a problem with django + psycopg2 + celery combo.
It's an old and unsolved problem.
Take a look on this thread to understand:
https://github.com/celery/django-celery/issues/121
Basically, when celery starts a worker, it forks a database connection
from django.db framework. If this connection drops for some reason, it
doesn't create a new one. Celery has nothing to do with this problem
once there is no way to detect when the database connection is dropped
using django.db libraries. Django doesn't notifies when it happens,
because it just start a connection and it receives a wsgi call (no
connection pool). I had the same problem on a huge production
environment with a lot of machine workers, and sometimes, these
machines lost connectivity with postgres server.
I solved it putting each celery master process under a linux
supervisord handler and a watcher and implemented a decorator that
handles the psycopg2.InterfaceError, and when it happens this function
dispatches a syscall to force supervisor restart gracefully with
SIGINT the celery process.
Edit:
Found a better solution. I implemented a celery task baseclass like this:
from django.db import connection
import celery
class FaultTolerantTask(celery.Task):
""" Implements after return hook to close the invalid connection.
This way, django is forced to serve a new connection for the next
task.
"""
abstract = True
def after_return(self, *args, **kwargs):
connection.close()
#celery.task(base=FaultTolerantTask)
def my_task():
# my database dependent code here
I believe it will fix your problem too.
Guys and emanuelcds,
I had the same problem, now I have updated my code and created a new loader for celery:
from djcelery.loaders import DjangoLoader
from django import db
class CustomDjangoLoader(DjangoLoader):
def on_task_init(self, task_id, task):
"""Called before every task."""
for conn in db.connections.all():
conn.close_if_unusable_or_obsolete()
super(CustomDjangoLoader, self).on_task_init(task_id, task)
This of course if you are using djcelery, it will also require something like this in the settings:
CELERY_LOADER = 'myproject.loaders.CustomDjangoLoader'
os.environ['CELERY_LOADER'] = CELERY_LOADER
I still have to test it, I will update.
If you are running into this when running tests, then you can either change the test to TransactionTestCase class instead of TestCase or add the mark pytest.mark.django_db(transaction=True). This kept my db connection alive from creation of the pytest-celery fixtures to the database calls.
Github issue - https://github.com/Koed00/django-q/issues/167
For context, I am using pytest-celery with celery_app and celery_worker as fixtures in my tests. I am also trying to hit the test db in the tasks referenced in these tests.
If someone would explain switching to transaction=True keeps it open, that would be great!

Flask_MongoEngine doesn't work on Webfaction?

I'm currently trying to deploy a website built with the Python Flask framework to Webfaction. I've deployed a MongoDB to which I can succesfully connect from the MongoDB interactive interpreter:
[celli#web498 ~]$ mongo localhost:18209/admin
MongoDB shell version: 3.0.2
connecting to: localhost:18209/admin
> db.auth("awesomeusername", "anawesomepassword")
1
When starting the website, it can successfully connect using these credentials in my flask config.py file:
MONGODB_SETTINGS = {
'db': 'tc',
'port': 18209,
'username': 'awesomeusername',
'password': 'anawesomepassword',
}
But when I visit a page which does a query I get the error below. This code works perfectly well on another server, so the mistake lies somewhere in either the setup of my MongoDB instance, or in the connection to it. I also find it strange that setting up the connection works fine, but querying it creates this Authentication failed error.
Any idea how I can fix this? All tips are welcome!
File "/home/celli/webapps/tc/venv/lib/python2.7/site-packages/flask_login.py", line 758, in decorated_view
return func(*args, **kwargs)
File "/home/celli/webapps/tc/tc/app/views/webviews.py", line 220, in myTickets
userDocs = UserDocument.objects(id__in=[ticket.doc_id for ticket in userTickets])
File "/home/celli/webapps/tc/venv/lib/python2.7/site-packages/mongoengine/queryset/manager.py", line 37, in __get__
queryset = queryset_class(owner, owner._get_collection())
File "/home/celli/webapps/tc/venv/lib/python2.7/site-packages/mongoengine/document.py", line 168, in _get_collection
db = cls._get_db()
File "/home/celli/webapps/tc/venv/lib/python2.7/site-packages/mongoengine/document.py", line 162, in _get_db
return get_db(cls._meta.get("db_alias", DEFAULT_CONNECTION_NAME))
File "/home/celli/webapps/tc/venv/lib/python2.7/site-packages/mongoengine/connection.py", line 143, in get_db
source=conn_settings['authentication_source'])
File "/home/celli/webapps/tc/venv/lib/python2.7/site-packages/pymongo/database.py", line 978, in authenticate
self.connection._cache_credentials(self.name, credentials)
File "/home/celli/webapps/tc/venv/lib/python2.7/site-packages/pymongo/mongo_client.py", line 467, in _cache_credentials
auth.authenticate(credentials, sock_info, self.__simple_command)
File "/home/celli/webapps/tc/venv/lib/python2.7/site-packages/pymongo/auth.py", line 475, in authenticate
auth_func(credentials[1:], sock_info, cmd_func)
File "/home/celli/webapps/tc/venv/lib/python2.7/site-packages/pymongo/auth.py", line 450, in _authenticate_default
return _authenticate_scram_sha1(credentials, sock_info, cmd_func)
File "/home/celli/webapps/tc/venv/lib/python2.7/site-packages/pymongo/auth.py", line 238, in _authenticate_scram_sha1
sasl_start, sasl_continue)
File "/home/celli/webapps/tc/venv/lib/python2.7/site-packages/pymongo/auth.py", line 185, in _scram_sha1_conversation
res, _ = cmd_func(sock_info, source, sasl_start)
File "/home/celli/webapps/tc/venv/lib/python2.7/site-packages/pymongo/mongo_client.py", line 703, in __simple_command
helpers._check_command_response(response, None, msg)
File "/home/celli/webapps/tc/venv/lib/python2.7/site-packages/pymongo/helpers.py", line 182, in _check_command_response
raise OperationFailure(msg % errmsg, code, response)
OperationFailure: command SON([('saslStart', 1), ('mechanism', 'SCRAM-SHA-1'), ('autoAuthorize', 1), ('payload', Binary('n,,n=awesomeusername,r=NjAxNTEwMjc5MzAy', 0))]) on namespace tc.$cmd failed: Authentication failed.
MongoDB's users are per-database. In your example you show that you connect to the admin database, but the error message is simply telling you that your user is trying to authenticate with the tc database. Probably your user doesn't have a role on tc.
(The other possibility is that tc is not the database you want to connect to; you need to specify the correct database name in your connection params/string).

Categories