I'm trying to log (by default) username and project (which can be decided from request object). I don't want to add context to every log manually.
The problem is that I can't make Django to add request or straight username and project to the LogRecord. I tried tens of ways.
This is my code:
middlewares.py
import threading
local = threading.local()
class LoggingRequestMiddleware:
def __init__(self, get_response):
self.get_response = get_response
# One-time configuration and initialization.
def __call__(self, request):
# Code to be executed for each request before
# the view (and later middleware) are called.
setattr(local, 'request', request)
response = self.get_response(request)
# Code to be executed for each request/response after
# the view is called.
return response
settings.py
def add_username_to_log(record):
local = threading.local()
record.username = '-'
request = getattr(local,'request',None)
print(request)
return True
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': LOGGING_VERBOSE_FORMAT,
'style': '{',
},
},
'filters': {
'context_filter': {
'()': 'django.utils.log.CallbackFilter',
'callback': add_username_to_log,
},
},
'handlers': {
'console': {
'level': DEFAULT_LOG_LEVEL,
'class': 'logging.StreamHandler',
'formatter': 'verbose',
'filters': ['context_filter'],
},
'file_main': {
'level': DEFAULT_LOG_LEVEL,
'class': 'logging.handlers.RotatingFileHandler',
'filename': os.path.join(LOG_PATH, 'main.log'),
'maxBytes': DEFAULT_LOG_SIZE,
'formatter': 'verbose',
'filters': ['context_filter'],
'backupCount': 0,
},
},
'loggers': {
'': {
'handlers': ['file_main'],
'level': DEFAULT_LOG_LEVEL,
'propagate': False,
},
},
}
But the request object is always None. Do you know why?
threading.local() returns a new object every time, you have to read and write to the same object.
locals_a = threading.local()
locals_a.foo = 1
hasattr(locals_a, 'foo') # True
locals_b = threading.local()
hasattr(locals_b, 'foo') # False
You need to define your locals object in 1 place that you can then import everywhere you need to access the request and read and write to that object every time. As a basic example this should work
def add_username_to_log(record):
from middleware import local
request = getattr(local,'request',None)
Related
I am trying to understand how multiple Filters (one defined in config and other in the code) in Python logging work.
I am working on a Django project and below is my logger config in settings.py
My goal is to switch on and switch off the logger whenever i want. So using filters i am trying to switch off the logger by returning False (0)
1) Switch off the logger in the starting
class StartFilter(object):
def filter(self, record):
"""
Determine if the specified record is to be logged.
Is the specified record to be logged? Returns 0 for no, nonzero for
yes. If deemed appropriate, the record may be modified in-place.
"""
return 0
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(levelname)s %(funcName)s() %(pathname)s[:%(lineno)s] %(name)s \n%(message)s'
}
},
'handlers': {
'console': {
'level': 'DEBUG',
'formatter': 'verbose',
'class': 'logging.StreamHandler',
},
},
'filters': {
'myfilter': {
'()': StartFilter,
}
},
'loggers': {
'log_testing': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
'filters': ['myfilter']
},
}
}
I have added the filter to the logger. 'filters': ['myfilter']
2) Switch on and off the logger in the views.py file where i want the logging to be seen
# to switch on logger
class LoggerGateStart(object):
def filter(self, record):
"""
Determine if the specified record is to be logged.
Is the specified record to be logged? Returns 0 for no, nonzero for
yes. If deemed appropriate, the record may be modified in-place.
"""
return 1
# to switch off logger
class LoggerGateStop(object):
def filter(self, record):
"""
Determine if the specified record is to be logged.
Is the specified record to be logged? Returns 0 for no, nonzero for
yes. If deemed appropriate, the record may be modified in-place.
"""
return 0
import logging
logger = logging.getLogger("log_testing")
...
logging.debug("Some text Before) # i dont want this to be logged
...
gatestart = LoggerGateStart()
logger_database.addFilter(gatestart)
...
logging.debug("Some text) # i want this to be logged
...
gatestop = LoggerGateStop()
logger_database.addFilter(gatestop)
...
logging.debug("Some text after") # i dont want this to be logged even
if it exist
...
I found its not working this way. It considers the StartFilter only and not consider LoggerGateStart or LoggerGateStop and does not print any log to the console
How can i do this
ANSWER I USED BASED ON THE ANSWER OF Gabriel C
My goal was to log sql using django django.db.backends. But the problem with it is that it will log all the sqls. I want to log only sqls in a particular section of a code or whereever i want to see the sqls. So the following way i could do it.
logging config inside settings.py:
# Filter class to stop or start logging for "django.db.backends"
class LoggerGate:
def __init__(self, state='closed'):
# We found that the settings.py runs twice and the filters are created twice. So we have to keep only one. So we delete all the previous filters before we create the new one
import logging
logger_database = logging.getLogger("django.db.backends")
try:
for filter in logger_database.filters:
logger_database.removeFilter(filter)
except Exception as e:
pass
self.state = state
def open(self):
self.state = 'open'
def close(self):
self.state = 'closed'
def filter(self, record):
"""
Determine if the specified record is to be logged.
Is the specified record to be logged? Returns 0/False for no, nonzero/True for
yes. If deemed appropriate, the record may be modified in-place.
"""
return self.state == 'open'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'sql': {
'class': 'logging.StreamHandler',
'level': 'DEBUG',
}
},
'filters': {
'myfilter': {
'()': LoggerGate,
}
},
'loggers': {
'django.db.backends': {
'handlers': ['sql'],
'level': 'DEBUG',
'propagate': False,
'filters': ['myfilter']
}
}
}
Then in the views.py
import logging
logger = logging.getLogger(__name__)
logger_database = logging.getLogger("django.db.backends")
def test1(request):
logger_database.filters[0].open()
#Will allow priting of sql satatements from here
from django import db
user_set = User.objects.all()
for user in user_set: # Here sql is executed and is printed to console
pass
#Will stop priting of sql satatements after this
logger_database.filters[0].close()
from django import db
user_set = User.objects.all()
for user in user_set: # Here sql is executed and is not printed to console
pass
now = datetime.datetime.now()
html = "<html><body>Internal purpose</body></html>"
return HttpResponse(html)
If one wants to print the sql in formatted and colorful way use this in the settings.py
# SQL formatter to be used for the handler used in logging "django.db.backends"
class SQLFormatter(logging.Formatter):
def format(self, record):
# Check if Pygments is available for coloring
try:
import pygments
from pygments.lexers import SqlLexer
from pygments.formatters import TerminalTrueColorFormatter
except ImportError:
pygments = None
# Check if sqlparse is available for indentation
try:
import sqlparse
except ImportError:
sqlparse = None
# Remove leading and trailing whitespaces
sql = record.sql.strip()
if sqlparse:
# Indent the SQL query
sql = sqlparse.format(sql, reindent=True)
if pygments:
# Highlight the SQL query
sql = pygments.highlight(
sql,
SqlLexer(),
#TerminalTrueColorFormatter(style='monokai')
TerminalTrueColorFormatter()
)
# Set the record's statement to the formatted query
record.statement = sql
return super(SQLFormatter, self).format(record)
# Filter class to stop or start logging for "django.db.backends"
class LoggerGate:
def __init__(self, state='closed'):
# We found that the settings.py runs twice and the filters are created twice. So we have to keep only one. So we delete all the previous filters before we create the new one
import logging
logger_database = logging.getLogger("django.db.backends")
try:
for filter in logger_database.filters:
logger_database.removeFilter(filter)
except Exception as e:
pass
self.state = state
def open(self):
self.state = 'open'
def close(self):
self.state = 'closed'
def filter(self, record):
"""
Determine if the specified record is to be logged.
Is the specified record to be logged? Returns 0/False for no, nonzero/True for
yes. If deemed appropriate, the record may be modified in-place.
"""
return self.state == 'open'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'sql': {
'()': SQLFormatter,
'format': '[%(duration).3f] %(statement)s',
}
},
'handlers': {
'sql': {
'class': 'logging.StreamHandler',
'formatter': 'sql',
'level': 'DEBUG',
}
},
'filters': {
'myfilter': {
'()': LoggerGate,
}
},
'loggers': {
'django.db.backends': {
'handlers': ['sql'],
'level': 'DEBUG',
'propagate': False,
'filters': ['myfilter']
}
}
}
Create only one filter and use it's instance to control if logs should be accepted or not.
from logging import getLogger
class LoggerGate(object):
def __init__(self):
self.started = False
def start(self):
self.started = True
def stop(self):
self.started = False
def filter(self, record):
"""
Determine if the specified record is to be logged.
Returns True is this LoggerGate is started, False otherwise.
"""
return self.started
logger_database = getLogger("log_testing")
logger_gate = LoggerGate()
logger_database.addFilter(logger_gate)
logger_database.critical('this is not logged')
logger_gate.start()
logger_database.critical('this is logged')
logger_gate.stop()
logger_database.critical('this is not logged')
Gabriel C gives a great solution. And to explain more, filter works in sequence, which means the record is passed to each filter one by one. And will stop at the one returns a zero. So as your StartFilter returns 0, it will directly drop all records.
I am trying to log sql statements in a code in my Django Application
Currently i am using the following logger config in my settings.py
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'sql': {
'()': SQLFormatter,
'format': '[%(duration).3f] %(statement)s',
},
'verbose': {
'format': '%(levelname)s %(funcName)s() %(pathname)s[:%(lineno)s] %(name)s \n%(message)s'
}
},
'handlers': {
'console': {
'level': 'DEBUG',
'formatter': 'verbose',
'class': 'logging.StreamHandler',
},
'sql': {
'class': 'logging.StreamHandler',
'formatter': 'sql',
'level': 'DEBUG',
}
}
}
In genereal to log sql in django we can add the django.db.backends to the logger.config in the settings.py
'loggers': {
'django.db.backends': {
'handlers': ['sql'],
'level': 'DEBUG',
'propagate': False,
},
But the problem is it will log every sql statement. So how can we start and stop logging for django.db.backends in between code.
I have the following code in my views.py
def someview(request)
# start logging from here
user_set = User.objects.all()
for user in user_set:
print(user.last_name)
# stop logging from here
Also I want to use the sql handler which I defined in the logging config.
What code will go in start and stop logging place in the above view function.
Create a filter class and add an instance to the logger or handler.
class LoggerGate:
def __init__(self, state='open'):
self.state = state
def open(self):
self.state = 'open'
def close(self):
self.state = 'closed'
def filter(self, record):
return self.state == 'open'
Create a filter, initialized in the 'closed' state.
Get the 'django.db.backends' logger and add the filter.
gate = LoggerGate('closed')
sql_logger = logging.getLogger('django.db.backends')
sql_logger.addFilter(gate)
Then call the open or close method to limit logging to where you want it.
def someview(request)
gate.open() # start logging from here
user_set = User.objects.all()
for user in user_set:
print(user.last_name)
gate.close() # stop logging here
Just summarizing from the above answer and also from the answer of Gabriel C, which both are same and also from the answer of Sraw
My goal was to log sql using django django.db.backends. But the problem with it is that it will log all the sqls. I want to log only sqls in a particular section of a code or whereever i want to see the sqls. So the following way i could do it.
logging config inside settings.py:
# Filter class to stop or start logging for "django.db.backends"
class LoggerGate:
def __init__(self, state='closed'):
# We found that the settings.py runs twice and the filters are created twice. So we have to keep only one. So we delete all the previous filters before we create the new one
import logging
logger_database = logging.getLogger("django.db.backends")
try:
for filter in logger_database.filters:
logger_database.removeFilter(filter)
except Exception as e:
pass
self.state = state
def open(self):
self.state = 'open'
def close(self):
self.state = 'closed'
def filter(self, record):
"""
Determine if the specified record is to be logged.
Is the specified record to be logged? Returns 0/False for no, nonzero/True for
yes. If deemed appropriate, the record may be modified in-place.
"""
return self.state == 'open'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'sql': {
'class': 'logging.StreamHandler',
'level': 'DEBUG',
}
},
'filters': {
'myfilter': {
'()': LoggerGate,
}
},
'loggers': {
'django.db.backends': {
'handlers': ['sql'],
'level': 'DEBUG',
'propagate': False,
'filters': ['myfilter']
}
}
}
Then in the views.py
import logging
logger = logging.getLogger(__name__)
logger_database = logging.getLogger("django.db.backends")
def test1(request):
logger_database.filters[0].open()
#Will allow priting of sql satatements from here
from django import db
user_set = User.objects.all()
for user in user_set: # Here sql is executed and is printed to console
pass
#Will stop priting of sql satatements after this
logger_database.filters[0].close()
from django import db
user_set = User.objects.all()
for user in user_set: # Here sql is executed and is not printed to console
pass
now = datetime.datetime.now()
html = "<html><body>Internal purpose</body></html>"
return HttpResponse(html)
If one wants to print the sql in formatted and colorful way use this in the settings.py
# SQL formatter to be used for the handler used in logging "django.db.backends"
class SQLFormatter(logging.Formatter):
def format(self, record):
# Check if Pygments is available for coloring
try:
import pygments
from pygments.lexers import SqlLexer
from pygments.formatters import TerminalTrueColorFormatter
except ImportError:
pygments = None
# Check if sqlparse is available for indentation
try:
import sqlparse
except ImportError:
sqlparse = None
# Remove leading and trailing whitespaces
sql = record.sql.strip()
if sqlparse:
# Indent the SQL query
sql = sqlparse.format(sql, reindent=True)
if pygments:
# Highlight the SQL query
sql = pygments.highlight(
sql,
SqlLexer(),
#TerminalTrueColorFormatter(style='monokai')
TerminalTrueColorFormatter()
)
# Set the record's statement to the formatted query
record.statement = sql
return super(SQLFormatter, self).format(record)
# Filter class to stop or start logging for "django.db.backends"
class LoggerGate:
def __init__(self, state='closed'):
# We found that the settings.py runs twice and the filters are created twice. So we have to keep only one. So we delete all the previous filters before we create the new one
import logging
logger_database = logging.getLogger("django.db.backends")
try:
for filter in logger_database.filters:
logger_database.removeFilter(filter)
except Exception as e:
pass
self.state = state
def open(self):
self.state = 'open'
def close(self):
self.state = 'closed'
def filter(self, record):
"""
Determine if the specified record is to be logged.
Is the specified record to be logged? Returns 0/False for no, nonzero/True for
yes. If deemed appropriate, the record may be modified in-place.
"""
return self.state == 'open'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'sql': {
'()': SQLFormatter,
'format': '[%(duration).3f] %(statement)s',
}
},
'handlers': {
'sql': {
'class': 'logging.StreamHandler',
'formatter': 'sql',
'level': 'DEBUG',
}
},
'filters': {
'myfilter': {
'()': LoggerGate,
}
},
'loggers': {
'django.db.backends': {
'handlers': ['sql'],
'level': 'DEBUG',
'propagate': False,
'filters': ['myfilter']
}
}
}
I am logging my database queries in Django along with the pathname and linenumber.
Right now i am getting these logs:
07/Dec/2018 14:25:00 DEBUG django.db.backends utils **/Users/XXXXX/.idea/lib/python2.7/site-packages/django/db/backends/utils.py:89**
(0.340) SELECT "metadata"."metaname", "metadata"."description", "metadata"."attributes" FROM "metadata" WHERE "metadata"."metaname" = 'date_type'; args=('date_type',)
For all queries, I am getting the same path and line number. Is there any way I can capture the line number from my main application instead of the one from utils.
Current logging Implementation:
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'color'
},
},
'loggers': {
'django.db.backends': {
'handlers': ['console'],
'level': 'DEBUG',
'propogate': True,
}
}
}
Using python 2.7 and django 1.9
The slightly optimized version from User #will-keeling
Logging configuration for Django for output line numbers for each db request.
Note: If you want to use it for tests you need to set DEBUG=True for tests How do you set DEBUG to True when running a Django test?
import logging
import traceback
from django.conf import settings
class StackInfoHandler(logging.StreamHandler):
trim = 5
def emit(self, record):
super(StackInfoHandler, self).emit(record)
trace = traceback.format_stack()
stack1 = [str(row) for row in trace]
stack2 = [s for s in stack1 if settings.BASE_DIR in s and 'format_stack' not in s]
stack3 = [s for s in stack2 if 'test' not in s]
if not stack3:
stack3 = stack2 # include test call
if stack3:
stack4 = ''.join(stack3[-self.trim:]) # take only last records
stack5 = f"Stack {self.terminator} {''.join(stack4)}"
self.stream.write(stack5)
self.stream.write(self.terminator)
self.flush()
Logging Config (partitial)
LOGGING = {
'handlers': {
'db-console': {
'level': 'DEBUG',
'class': 'settings.local.StackInfoHandler', # Reference the custom handler
'formatter': 'simple',
},
'loggers': {
'django.db.backends': {
'handlers': ['db-console'],
'level': 'DEBUG',
'propagate': False
},
}
}
}
This will show you only stack trace from your Django codebase like below
[2020-05-25 17:49:17,977]: (0.000) INSERT INTO `contacts_contactscount` (`user_id`, `date`, `amount`) VALUES (338, '2020-05-25 17:49:17', 7); args=[338, '2020-05-25 17:49:17', 7]
Stack
File "<project-root>/api/views/contacts.py", line 164, in create
Contact.objects.filter(pk__in=to_delete).delete()
File "<project-root>/<folder>/contacts/models.py", line 54, in delete
create_deletion_log.delay(obj, deleted_timestamp)
File "<project-root>/<folder>/contacts/tasks.py", line 31, in create_deletion_log
contact.save()
File "<project-root>/<folder>/contacts/models.py", line 118, in save
Contact.objects.contacts_added_hook(self.user)
File "<project-root>/<folder>/contacts/models.py", line 67, in contacts_added_hook
current_total = user.profile.contacts_total
File "<project-root>/<folder>/profile/models.py", line 631, in contacts_total
ContactsCount.objects.create(user=self.user, amount=count)
I'm guessing that you're trying to determine which lines in your application are responsible for running which queries.
One way to achieve this would be to create a custom handler that prints out the current stack at the point where Django logs the query. That would allow you to see which line in your application is executing.
You could create a custom handler such as:
import logging
import traceback
class StackInfoHandler(logging.StreamHandler):
trim = 5
def emit(self, record):
super(StackInfoHandler, self).emit(record)
stack = ''.join(
str(row) for row in traceback.format_stack()[:-self.trim]
)
self.stream.write(stack)
And then in your logging config, you could just switch the handler class to use the StackInfoHandler:
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'my.package.StackInfoHandler', # Reference the custom handler
'formatter': 'color'
},
},
Note that the StackInfoHandler trims 5 lines off the stack so that it doesn't show you stack frames from the logging framework itself. You might need to tweak this number (5 works for me locally).
Related to a previous question I asked, Airbrake logger in Django still sending notifications even though its level is set to 'CRITICAL'?, I'm working on a Django project with multiple versions of settings.py: settings/base.py, settings/staging.py, etc.
There is also a kind of settings 'mixin', settings/staging_development.py, which contains the following LOGGING configuration:
# Auxiliary variable used in LOGGING
_AIRBRAKE_LOGGER = {
'handlers': ['airbrake'],
'level': 'ERROR',
'propagate': True,
}
# Airbrake logging integration (cf. https://github.com/airbrake/pybrake#django-integration)
# In our case, 'app' is replaced by three apps, 'lucy_web', 'api', and 'activation'.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'airbrake': {
'level': 'ERROR',
'class': 'pybrake.LoggingHandler',
},
},
'loggers': {
'lucy_web': _AIRBRAKE_LOGGER,
'api': _AIRBRAKE_LOGGER,
'activation': _AIRBRAKE_LOGGER,
},
}
This is imported in settings/staging.py in which LOGGING is further .update()d as follows:
from lucy.settings.staging_production import *
# LOGGING = {
# 'version': 1,
# 'disable_existing_loggers': False,
# 'handlers': {
# 'console': {
# 'class': 'logging.StreamHandler',
# },
# },
# 'loggers': {
# 'django': {
# 'handlers': ['console'],
# 'level': os.getenv('LOG_LEVEL', 'INFO'),
# },
# },
# }
LOGGING['handlers'].update(console={
'class': 'logging.StreamHandler'
})
LOGGING['loggers'].update(django={
'handlers': ['console'],
'level': os.getenv('LOG_LEVEL', 'INFO'),
})
What I'd like to achieve is to log to Airbrake as well as to the console. Now, if I simply comment in the commented-out code and re-define the LOGGING configuration, I notice that error messages are successfully getting logged to the console. If I use the code as-is, however, they do not, even though I do get Airbrake notifications.
If I poke around in the shell, everything looks OK: for example, the 'lucy_web' and 'django' loggers both have handlers attached and propagate set to True:
(venv) Kurts-MacBook-Pro-2:lucy-web kurtpeek$ ENV_ROLE=staging_on_localhost python manage.py shell
Python 3.6.4 (v3.6.4:d48ecebad5, Dec 18 2017, 21:07:28)
Type 'copyright', 'credits' or 'license' for more information
IPython 6.3.1 -- An enhanced Interactive Python. Type '?' for help.
In [1]: from django.conf import settings
In [2]: settings.LOGGING
Out[2]:
{'version': 1,
'disable_existing_loggers': False,
'handlers': {'airbrake': {'level': 'ERROR',
'class': 'pybrake.LoggingHandler'},
'console': {'class': 'logging.StreamHandler'}},
'loggers': {'lucy_web': {'handlers': ['airbrake'],
'level': 'ERROR',
'propagate': True},
'api': {'handlers': ['airbrake'], 'level': 'ERROR', 'propagate': True},
'activation': {'handlers': ['airbrake'],
'level': 'ERROR',
'propagate': True},
'django': {'handlers': ['console'], 'level': 'ERROR'}}}
In [3]: import logging
In [4]: logger = logging.getLogger('lucy_web')
In [6]: logger.propagate
Out[6]: True
In [7]: logger.hasHandlers()
Out[7]: True
In [8]: logger.handlers
Out[8]: [<LoggingHandler (ERROR)>]
In [9]: django_logger = logging.getLogger('django')
In [10]: django_logger.handlers
Out[10]: [<StreamHandler <stderr> (NOTSET)>]
In [11]: django_logger.getEffectiveLevel()
Out[11]: 40
In [12]: logging.ERROR
Out[12]: 40
In [13]: django_logger.propagate
Out[13]: True
In short, I can get one type of logging to work or the other, but not both at the same time, even though log propagation is enabled.
Any idea what the issue might be?
I resolved the problem by adding the 'console' handler to the apps ('lucy_web', 'api', and 'activation') themselves, rather than to the catch-all 'django' logger. Actually, I think that at the root of the problem, the errors I was throwing were not in 'django' logger's hierarchy.
So firstly, in settings/base.py I use deepcopy:
from copy import deepcopy
# Auxiliary variable used in LOGGING
_AIRBRAKE_LOGGER = {
'handlers': ['airbrake'],
'level': 'ERROR',
'propagate': True,
}
# Airbrake logging integration (cf. https://github.com/airbrake/pybrake#django-integration)
# In our case, 'app' is replaced by three apps, 'lucy_web', 'api', and 'activation'.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'airbrake': {
'level': 'ERROR',
'class': 'pybrake.LoggingHandler',
},
},
'loggers': {
# The deepcopy allows us to append to each app's 'handlers' list without affecting the others
'lucy_web': deepcopy(_AIRBRAKE_LOGGER),
'api': deepcopy(_AIRBRAKE_LOGGER),
'activation': deepcopy(_AIRBRAKE_LOGGER),
},
}
And secondly, in settings/staging.py I updated LOGGING like so:
LOGGING['handlers'].update(console={
'class': 'logging.StreamHandler'
})
for app in ('lucy_web', 'api', 'activation'):
LOGGING['loggers'][app]['handlers'].append('console')
Now error gets logged both to Airbrake and to the console.
i'm on django 1.8 and trying to customize the class
django.utils.log.AdminEmailHandler
here is my custom class
class MyCustomAdminEmailHandler(AdminEmailHandler):
# emit copied and overridden from v1.8 so we can add the request .user into the message
def emit(self, record):
current_user = None
try:
request = record.request
subject = '%s (%s IP): %s' % (
record.levelname,
('internal' if request.META.get('REMOTE_ADDR') in settings.INTERNAL_IPS
else 'EXTERNAL'),
record.getMessage()
)
filter = get_exception_reporter_filter(request)
request_repr = '\n{}'.format(force_text(filter.get_request_repr(request)))
current_user = request.user
except Exception:
subject = '%s: %s' % (
record.levelname,
record.getMessage()
)
request = None
request_repr = "unavailable"
subject = self.format_subject(subject)
if record.exc_info:
exc_info = record.exc_info
else:
exc_info = (None, record.getMessage(), None)
message = "User:%s\n\n%s\n\nRequest repr(): %s" % (current_user, self.format(record), request_repr)
reporter = ExceptionReporter(request, is_email=True, *exc_info)
html_message = reporter.get_traceback_html() if self.include_html else None
self.send_mail(subject, message, fail_silently=True, html_message=html_message)
And here is the relevant part of my logging config in settings.py
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
# can't just override this so customize here
'mail_admins':{
'level': 'ERROR',
'class': 'my_utils.logging_utils.MyCustomAdminEmailHandler',
'include_html': True,
},
},
}
When i test this out, I don't get HTML email and I don't get my customizations in the email.
Does anyone know what I'm missing?
------ edit ------
#pynchia's comment: django.utils.log already has this relevant logging config for loggers
'loggers': {
'django': {
'handlers': ['console'],
},
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': False,
},
'django.security': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': False,
},
'py.warnings': {
'handlers': ['console'],
},
}
----- 2nd edit ------
for the record #pynchia's comment worked. I had to add to manually add it to logging config.
wierd.