logs inside django channels consumers not showing up - python

Environment:
OS: redhat
python version: 3.6
django: 2.1
django channels: 2.1.3
The following is my logging.conf file (same level as settings.py):
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': '%(asctime)s [%(levelname)s] %(name)s: %(message)s',
'datefmt': '%Y-%m-%d %H:%M:%S',
},
},
'handlers': {
'file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': 'debug.log',
'formatter': 'standard'
},
},
'loggers': {
'django': {
'handlers': ['file'],
'level': 'DEBUG',
'propagate': True,
},
},
}
django_logger = logging.getLogger('django')
and the function to log as follows:
def log_it(*args):
try:
django_logger.info(str(args).encode('utf-8'))
return True
except:
django_logger.info('#### Exception in LOGGING!!!!!!')
return False
I am using this to log inside django consumers as follows:
class EchoConsumer(SyncConsumer):
def websocket_connect(self, event):
try:
log_it('inside EchoConsumer connect()', event)
self.send({
"type": "websocket.accept",
})
return True
except:
error = traceback.format_exc()
write_error_log(error)
return False
def websocket_receive(self, event):
try:
log_it('inside EchoConsumer receive()', event)
self.send({
"type": "websocket.send",
"text": event["text"],
})
return True
except:
error = traceback.format_exc()
write_error_log(error)
return False
The log_it() used inside the consumers aren't being invoked.
(Assumption: Integrating channels with django must have overriden the django logging settings)
I checked if there was any problem in reaching the consumers in the daphne access logs.
I also checked my nginx conf, but the connection requests were recorded as follows:
127.0.0.1:45100 - - [08/Oct/2018:19:00:58] "WSCONNECTING /ws/event/" - -
127.0.0.1:45100 - - [08/Oct/2018:19:00:58] "WSCONNECT /ws/event/" - -
Note: I have linked /ws/event to EchoConsumer in routing.py
My expectation is:
127.0.0.1:45100 - - [08/Oct/2018:19:00:58] "WSCONNECTING /ws/event/" - -
127.0.0.1:45100 - - [08/Oct/2018:19:00:58] "WSCONNECT /ws/event/" - -
inside EchoConsumer connect() ....
Any idea why the log statements are not being displayed?
P.S related issue on github

Related

Can't add username to logging record using Middleware

I'm trying to log (by default) username and project (which can be decided from request object). I don't want to add context to every log manually.
The problem is that I can't make Django to add request or straight username and project to the LogRecord. I tried tens of ways.
This is my code:
middlewares.py
import threading
local = threading.local()
class LoggingRequestMiddleware:
def __init__(self, get_response):
self.get_response = get_response
# One-time configuration and initialization.
def __call__(self, request):
# Code to be executed for each request before
# the view (and later middleware) are called.
setattr(local, 'request', request)
response = self.get_response(request)
# Code to be executed for each request/response after
# the view is called.
return response
settings.py
def add_username_to_log(record):
local = threading.local()
record.username = '-'
request = getattr(local,'request',None)
print(request)
return True
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': LOGGING_VERBOSE_FORMAT,
'style': '{',
},
},
'filters': {
'context_filter': {
'()': 'django.utils.log.CallbackFilter',
'callback': add_username_to_log,
},
},
'handlers': {
'console': {
'level': DEFAULT_LOG_LEVEL,
'class': 'logging.StreamHandler',
'formatter': 'verbose',
'filters': ['context_filter'],
},
'file_main': {
'level': DEFAULT_LOG_LEVEL,
'class': 'logging.handlers.RotatingFileHandler',
'filename': os.path.join(LOG_PATH, 'main.log'),
'maxBytes': DEFAULT_LOG_SIZE,
'formatter': 'verbose',
'filters': ['context_filter'],
'backupCount': 0,
},
},
'loggers': {
'': {
'handlers': ['file_main'],
'level': DEFAULT_LOG_LEVEL,
'propagate': False,
},
},
}
But the request object is always None. Do you know why?
threading.local() returns a new object every time, you have to read and write to the same object.
locals_a = threading.local()
locals_a.foo = 1
hasattr(locals_a, 'foo') # True
locals_b = threading.local()
hasattr(locals_b, 'foo') # False
You need to define your locals object in 1 place that you can then import everywhere you need to access the request and read and write to that object every time. As a basic example this should work
def add_username_to_log(record):
from middleware import local
request = getattr(local,'request',None)

python: How to start and stop a logger whenever i want

I am trying to log sql statements in a code in my Django Application
Currently i am using the following logger config in my settings.py
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'sql': {
'()': SQLFormatter,
'format': '[%(duration).3f] %(statement)s',
},
'verbose': {
'format': '%(levelname)s %(funcName)s() %(pathname)s[:%(lineno)s] %(name)s \n%(message)s'
}
},
'handlers': {
'console': {
'level': 'DEBUG',
'formatter': 'verbose',
'class': 'logging.StreamHandler',
},
'sql': {
'class': 'logging.StreamHandler',
'formatter': 'sql',
'level': 'DEBUG',
}
}
}
In genereal to log sql in django we can add the django.db.backends to the logger.config in the settings.py
'loggers': {
'django.db.backends': {
'handlers': ['sql'],
'level': 'DEBUG',
'propagate': False,
},
But the problem is it will log every sql statement. So how can we start and stop logging for django.db.backends in between code.
I have the following code in my views.py
def someview(request)
# start logging from here
user_set = User.objects.all()
for user in user_set:
print(user.last_name)
# stop logging from here
Also I want to use the sql handler which I defined in the logging config.
What code will go in start and stop logging place in the above view function.
Create a filter class and add an instance to the logger or handler.
class LoggerGate:
def __init__(self, state='open'):
self.state = state
def open(self):
self.state = 'open'
def close(self):
self.state = 'closed'
def filter(self, record):
return self.state == 'open'
Create a filter, initialized in the 'closed' state.
Get the 'django.db.backends' logger and add the filter.
gate = LoggerGate('closed')
sql_logger = logging.getLogger('django.db.backends')
sql_logger.addFilter(gate)
Then call the open or close method to limit logging to where you want it.
def someview(request)
gate.open() # start logging from here
user_set = User.objects.all()
for user in user_set:
print(user.last_name)
gate.close() # stop logging here
Just summarizing from the above answer and also from the answer of Gabriel C, which both are same and also from the answer of Sraw
My goal was to log sql using django django.db.backends. But the problem with it is that it will log all the sqls. I want to log only sqls in a particular section of a code or whereever i want to see the sqls. So the following way i could do it.
logging config inside settings.py:
# Filter class to stop or start logging for "django.db.backends"
class LoggerGate:
def __init__(self, state='closed'):
# We found that the settings.py runs twice and the filters are created twice. So we have to keep only one. So we delete all the previous filters before we create the new one
import logging
logger_database = logging.getLogger("django.db.backends")
try:
for filter in logger_database.filters:
logger_database.removeFilter(filter)
except Exception as e:
pass
self.state = state
def open(self):
self.state = 'open'
def close(self):
self.state = 'closed'
def filter(self, record):
"""
Determine if the specified record is to be logged.
Is the specified record to be logged? Returns 0/False for no, nonzero/True for
yes. If deemed appropriate, the record may be modified in-place.
"""
return self.state == 'open'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'sql': {
'class': 'logging.StreamHandler',
'level': 'DEBUG',
}
},
'filters': {
'myfilter': {
'()': LoggerGate,
}
},
'loggers': {
'django.db.backends': {
'handlers': ['sql'],
'level': 'DEBUG',
'propagate': False,
'filters': ['myfilter']
}
}
}
Then in the views.py
import logging
logger = logging.getLogger(__name__)
logger_database = logging.getLogger("django.db.backends")
def test1(request):
logger_database.filters[0].open()
#Will allow priting of sql satatements from here
from django import db
user_set = User.objects.all()
for user in user_set: # Here sql is executed and is printed to console
pass
#Will stop priting of sql satatements after this
logger_database.filters[0].close()
from django import db
user_set = User.objects.all()
for user in user_set: # Here sql is executed and is not printed to console
pass
now = datetime.datetime.now()
html = "<html><body>Internal purpose</body></html>"
return HttpResponse(html)
If one wants to print the sql in formatted and colorful way use this in the settings.py
# SQL formatter to be used for the handler used in logging "django.db.backends"
class SQLFormatter(logging.Formatter):
def format(self, record):
# Check if Pygments is available for coloring
try:
import pygments
from pygments.lexers import SqlLexer
from pygments.formatters import TerminalTrueColorFormatter
except ImportError:
pygments = None
# Check if sqlparse is available for indentation
try:
import sqlparse
except ImportError:
sqlparse = None
# Remove leading and trailing whitespaces
sql = record.sql.strip()
if sqlparse:
# Indent the SQL query
sql = sqlparse.format(sql, reindent=True)
if pygments:
# Highlight the SQL query
sql = pygments.highlight(
sql,
SqlLexer(),
#TerminalTrueColorFormatter(style='monokai')
TerminalTrueColorFormatter()
)
# Set the record's statement to the formatted query
record.statement = sql
return super(SQLFormatter, self).format(record)
# Filter class to stop or start logging for "django.db.backends"
class LoggerGate:
def __init__(self, state='closed'):
# We found that the settings.py runs twice and the filters are created twice. So we have to keep only one. So we delete all the previous filters before we create the new one
import logging
logger_database = logging.getLogger("django.db.backends")
try:
for filter in logger_database.filters:
logger_database.removeFilter(filter)
except Exception as e:
pass
self.state = state
def open(self):
self.state = 'open'
def close(self):
self.state = 'closed'
def filter(self, record):
"""
Determine if the specified record is to be logged.
Is the specified record to be logged? Returns 0/False for no, nonzero/True for
yes. If deemed appropriate, the record may be modified in-place.
"""
return self.state == 'open'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'sql': {
'()': SQLFormatter,
'format': '[%(duration).3f] %(statement)s',
}
},
'handlers': {
'sql': {
'class': 'logging.StreamHandler',
'formatter': 'sql',
'level': 'DEBUG',
}
},
'filters': {
'myfilter': {
'()': LoggerGate,
}
},
'loggers': {
'django.db.backends': {
'handlers': ['sql'],
'level': 'DEBUG',
'propagate': False,
'filters': ['myfilter']
}
}
}

Airflow Google Cloud Logging

For Apache Airflow v1.10 running in Python2.7, with `pip install airflow[gcp_api] I am trying to setup logging for the Google Cloud. I have the following log_config py file:
GCS_LOG_FOLDER = 'gs://GCSbucket/'
LOG_LEVEL = conf.get('core', 'LOGGING_LEVEL').upper()
FAB_LOG_LEVEL = conf.get('core', 'FAB_LOGGING_LEVEL').upper()
LOG_FORMAT = conf.get('core', 'LOG_FORMAT')
BASE_LOG_FOLDER = conf.get('core', 'BASE_LOG_FOLDER')
PROCESSOR_LOG_FOLDER = conf.get('scheduler', 'CHILD_PROCESS_LOG_DIRECTORY')
FILENAME_TEMPLATE = conf.get('core', 'LOG_FILENAME_TEMPLATE')
PROCESSOR_FILENAME_TEMPLATE = conf.get('core', 'LOG_PROCESSOR_FILENAME_TEMPLATE')
# Storage bucket url for remote logging
# s3 buckets should start with "s3://"
# gcs buckets should start with "gs://"
# wasb buckets should start with "wasb"
# just to help Airflow select correct handler
REMOTE_BASE_LOG_FOLDER = conf.get('core', 'REMOTE_BASE_LOG_FOLDER')
LOGGING_CONFIG = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'airflow': {
'format': LOG_FORMAT,
},
},
'handlers': {
'console': {
'class': 'airflow.utils.log.logging_mixin.RedirectStdHandler',
'formatter': 'airflow',
'stream': 'sys.stdout'
},
'task': {
'class': 'airflow.utils.log.file_task_handler.FileTaskHandler',
'formatter': 'airflow',
'base_log_folder': os.path.expanduser(BASE_LOG_FOLDER),
'filename_template': FILENAME_TEMPLATE,
},
'processor': {
'class': 'airflow.utils.log.file_processor_handler.FileProcessorHandler',
'formatter': 'airflow',
'base_log_folder': os.path.expanduser(PROCESSOR_LOG_FOLDER),
'filename_template': PROCESSOR_FILENAME_TEMPLATE,
},
# Add a GCSTaskHandler to the 'handlers' block of the LOGGING_CONFIG variable
'gcs.task': {
'class': 'airflow.utils.log.gcs_task_handler.GCSTaskHandler',
'formatter': 'airflow',
'base_log_folder': os.path.expanduser(BASE_LOG_FOLDER),
'gcs_log_folder': GCS_LOG_FOLDER,
'filename_template': FILENAME_TEMPLATE,
},
},
'loggers': {
'airflow.processor': {
'handlers': ['processor'],
'level': LOG_LEVEL,
'propagate': False,
},
'airflow.task': {
'handlers': ['gcs.task'],
'level': LOG_LEVEL,
'propagate': False,
},
'airflow.task_runner': {
'handlers': ['gcs.task'],
'level': LOG_LEVEL,
'propagate': True,
},
'flask_appbuilder': {
'handler': ['console'],
'level': FAB_LOG_LEVEL,
'propagate': True,
}
},
'root': {
'handlers': ['console'],
'level': LOG_LEVEL,
}
}
REMOTE_HANDLERS = {
's3': {
'task': {
'class': 'airflow.utils.log.s3_task_handler.S3TaskHandler',
'formatter': 'airflow',
'base_log_folder': os.path.expanduser(BASE_LOG_FOLDER),
's3_log_folder': REMOTE_BASE_LOG_FOLDER,
'filename_template': FILENAME_TEMPLATE,
},
'processor': {
'class': 'airflow.utils.log.s3_task_handler.S3TaskHandler',
'formatter': 'airflow',
'base_log_folder': os.path.expanduser(PROCESSOR_LOG_FOLDER),
's3_log_folder': REMOTE_BASE_LOG_FOLDER,
'filename_template': PROCESSOR_FILENAME_TEMPLATE,
},
},
'gcs': {
'task': {
'class': 'airflow.utils.log.gcs_task_handler.GCSTaskHandler',
'formatter': 'airflow',
'base_log_folder': os.path.expanduser(BASE_LOG_FOLDER),
'gcs_log_folder': REMOTE_BASE_LOG_FOLDER,
'filename_template': FILENAME_TEMPLATE,
},
'processor': {
'class': 'airflow.utils.log.gcs_task_handler.GCSTaskHandler',
'formatter': 'airflow',
'base_log_folder': os.path.expanduser(PROCESSOR_LOG_FOLDER),
'gcs_log_folder': REMOTE_BASE_LOG_FOLDER,
'filename_template': PROCESSOR_FILENAME_TEMPLATE,
},
},
'wasb': {
'task': {
'class': 'airflow.utils.log.wasb_task_handler.WasbTaskHandler',
'formatter': 'airflow',
'base_log_folder': os.path.expanduser(BASE_LOG_FOLDER),
'wasb_log_folder': REMOTE_BASE_LOG_FOLDER,
'wasb_container': 'airflow-logs',
'filename_template': FILENAME_TEMPLATE,
'delete_local_copy': False,
},
'processor': {
'class': 'airflow.utils.log.wasb_task_handler.WasbTaskHandler',
'formatter': 'airflow',
'base_log_folder': os.path.expanduser(PROCESSOR_LOG_FOLDER),
'wasb_log_folder': REMOTE_BASE_LOG_FOLDER,
'wasb_container': 'airflow-logs',
'filename_template': PROCESSOR_FILENAME_TEMPLATE,
'delete_local_copy': False,
},
}
}
REMOTE_LOGGING = conf.get('core', 'remote_logging')
if REMOTE_LOGGING and REMOTE_BASE_LOG_FOLDER.startswith('s3://'):
LOGGING_CONFIG['handlers'].update(REMOTE_HANDLERS['s3'])
elif REMOTE_LOGGING and REMOTE_BASE_LOG_FOLDER.startswith('gs://'):
LOGGING_CONFIG['handlers'].update(REMOTE_HANDLERS['gcs'])
elif REMOTE_LOGGING and REMOTE_BASE_LOG_FOLDER.startswith('wasb'):
LOGGING_CONFIG['handlers'].update(REMOTE_HANDLERS['wasb'])
My airflow.cfg settings are:
[core]
remote_logging = True
remote_base_log_folder = gs:/GCSbucket/logs
remote_log_conn_id = google_cloud_default
The error I get is the following:
Traceback (most recent call last):
File "/usr/local/lib/python2.7/atexit.py", line 24, in _run_exitfuncs
func(*targs, **kargs)
File "/usr/local/lib/python2.7/logging/__init__.py", line 1676, in shutdown
h.close()
File "/usr/local/lib/python2.7/site-packages/airflow/utils/log/gcs_task_handler.py", line 73, in close
if self.closed:
AttributeError: 'GCSTaskHandler' object has no attribute 'closed'
Does anybody know what might have gone wrong?
The tutorial that is being followed is: https://airflow.readthedocs.io/en/1.10.0/howto/write-logs.html
Update: Did some more research in the source code, here I see that the close statement returns nothing, and this is why my application crashes.
https://github.com/apache/incubator-airflow/blob/v1-10-stable/airflow/utils/log/gcs_task_handler.py
Does somebody know why nothing is returend in
def close(self):
if self.closed:
return
The instructions might be outdated. Please try with the instructions from the following link:
https://airflow.readthedocs.io/en/latest/howto/write-logs.html#writing-logs-to-google-cloud-storage
Follow the steps below to enable Google Cloud Storage logging.
To enable this feature, airflow.cfg must be configured as in this example:
[core]
# Airflow can store logs remotely in AWS S3, Google Cloud Storage or Elastic Search.
# Users must supply an Airflow connection id that provides access to the storage
# location. If remote_logging is set to true, see UPDATING.md for additional
# configuration requirements.
remote_logging = True
remote_base_log_folder = gs://my-bucket/path/to/logs
remote_log_conn_id = MyGCSConn
Install the gcp_api package first, like so: pip install apache-airflow[gcp_api].
Make sure a Google Cloud Platform connection hook has been defined in Airflow. The hook should have read and write access to the Google Cloud Storage bucket defined above in remote_base_log_folder.
Restart the Airflow webserver and scheduler, and trigger (or wait for) a new task execution.
Verify that logs are showing up for newly executed tasks in the bucket you’ve defined.
Verify that the Google Cloud Storage viewer is working in the UI. Pull up a newly executed task, and verify that you see something like:
-
*** Reading remote log from gs://<bucket where logs should be persisted>/example_bash_operator/run_this_last/2017-10-03T00:00:00/16.log.
[2017-10-03 21:57:50,056] {cli.py:377} INFO - Running on host chrisr-00532
[2017-10-03 21:57:50,093] {base_task_runner.py:115} INFO - Running: ['bash', '-c', u'airflow run example_bash_operator run_this_last 2017-10-03T00:00:00 --job_id 47 --raw -sd DAGS_FOLDER/example_dags/example_bash_operator.py']
[2017-10-03 21:57:51,264] {base_task_runner.py:98} INFO - Subtask: [2017-10-03 21:57:51,263] {__init__.py:45} INFO - Using executor SequentialExecutor
[2017-10-03 21:57:51,306] {base_task_runner.py:98} INFO - Subtask: [2017-10-03 21:57:51,306] {models.py:186} INFO - Filling up the DagBag from /airflow/dags/example_dags/example_bash_operator.py
To resolve this question I added the following to the core of airflow.cfg
[core]
log_filename_template = {{ ti.dag_id }}/{{ ti.task_id }}/{{ ts }}/{{ try_number }}.log
log_processor_filename_template = {{ filename }}.log
# Log format
# we need to escape the curly braces by adding an additional curly brace
log_format = [%%(asctime)s] {%%(filename)s:%%(lineno)d} %%(levelname)s - %%(message)s
simple_log_format = %%(asctime)s %%(levelname)s - %%(message)s
# Logging class
# Specify the class that will specify the logging configuration
# This class has to be on the python classpath
# logging_config_class = my.path.default_local_settings.LOGGING_CONFIG
logging_config_class =log_config.LOGGING_CONFIG
task_log_reader = gcs.task
At the log_config.LOGGING_CONFIG I added the following:
# Add a GCSTaskHandler to the 'handlers' block of the LOGGING_CONFIG variable
'gcs.task': {
'class': 'airflow.utils.log.gcs_task_handler.GCSTaskHandler',
'formatter': 'airflow',
'base_log_folder': os.path.expanduser(BASE_LOG_FOLDER),
'gcs_log_folder': GCS_LOG_FOLDER,
'filename_template': FILENAME_TEMPLATE,
}
The tutorial to be followed is:
https://airflow.readthedocs.io/en/1.10.0/howto/write-logs.html

Writing custom log handler in python

I am trying to extend RotatingFile Handler to say FooBar.
class FooBar(RotatingFileHandler) :
def __init__(self, filename, mode='a', maxBytes=0,backupCount=0, encoding=None, delay=0) :
RotatingHandler.__init__(self, filename, mode, maxBytes, backupCount, encoding, delay)
I configure it using
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(asctime)s %(message)s'
},
},
'handlers': {
'file':{
'level': 'ERROR',
'class': 'myhandler.FooBar',
'formatter': 'simple',
'filename': '/tmp/cattle.txt',
'mode': 'a',
'maxBytes': 16,
'backupCount' : 100,
},
#-- Remaining part truncated ###
logging.config.dictConfig(LOGGING) ### === ERROR here
When I use it ; I get an error
File "/usr/lib/python2.7/logging/config.py", line 576, in configure
'%r: %s' % (name, e))
ValueError: Unable to configure handler 'file': global name 'RotatingHandler' is not defined
RotatingHandler is not in scope, so you would need something like this to bring it into scope:
from logging.handlers import RotatingFileHandler
However, have a look at this example:
How to log everything into a file using RotatingFileHandler by using logging.conf file?
You may not need to create your own class to accomplish what you want to do.

django 1.8 can't override AdminEmailHandler

i'm on django 1.8 and trying to customize the class
django.utils.log.AdminEmailHandler
here is my custom class
class MyCustomAdminEmailHandler(AdminEmailHandler):
# emit copied and overridden from v1.8 so we can add the request .user into the message
def emit(self, record):
current_user = None
try:
request = record.request
subject = '%s (%s IP): %s' % (
record.levelname,
('internal' if request.META.get('REMOTE_ADDR') in settings.INTERNAL_IPS
else 'EXTERNAL'),
record.getMessage()
)
filter = get_exception_reporter_filter(request)
request_repr = '\n{}'.format(force_text(filter.get_request_repr(request)))
current_user = request.user
except Exception:
subject = '%s: %s' % (
record.levelname,
record.getMessage()
)
request = None
request_repr = "unavailable"
subject = self.format_subject(subject)
if record.exc_info:
exc_info = record.exc_info
else:
exc_info = (None, record.getMessage(), None)
message = "User:%s\n\n%s\n\nRequest repr(): %s" % (current_user, self.format(record), request_repr)
reporter = ExceptionReporter(request, is_email=True, *exc_info)
html_message = reporter.get_traceback_html() if self.include_html else None
self.send_mail(subject, message, fail_silently=True, html_message=html_message)
And here is the relevant part of my logging config in settings.py
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
# can't just override this so customize here
'mail_admins':{
'level': 'ERROR',
'class': 'my_utils.logging_utils.MyCustomAdminEmailHandler',
'include_html': True,
},
},
}
When i test this out, I don't get HTML email and I don't get my customizations in the email.
Does anyone know what I'm missing?
------ edit ------
#pynchia's comment: django.utils.log already has this relevant logging config for loggers
'loggers': {
'django': {
'handlers': ['console'],
},
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': False,
},
'django.security': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': False,
},
'py.warnings': {
'handlers': ['console'],
},
}
----- 2nd edit ------
for the record #pynchia's comment worked. I had to add to manually add it to logging config.
wierd.

Categories