Airflow Google Cloud Logging - python

For Apache Airflow v1.10 running in Python2.7, with `pip install airflow[gcp_api] I am trying to setup logging for the Google Cloud. I have the following log_config py file:
GCS_LOG_FOLDER = 'gs://GCSbucket/'
LOG_LEVEL = conf.get('core', 'LOGGING_LEVEL').upper()
FAB_LOG_LEVEL = conf.get('core', 'FAB_LOGGING_LEVEL').upper()
LOG_FORMAT = conf.get('core', 'LOG_FORMAT')
BASE_LOG_FOLDER = conf.get('core', 'BASE_LOG_FOLDER')
PROCESSOR_LOG_FOLDER = conf.get('scheduler', 'CHILD_PROCESS_LOG_DIRECTORY')
FILENAME_TEMPLATE = conf.get('core', 'LOG_FILENAME_TEMPLATE')
PROCESSOR_FILENAME_TEMPLATE = conf.get('core', 'LOG_PROCESSOR_FILENAME_TEMPLATE')
# Storage bucket url for remote logging
# s3 buckets should start with "s3://"
# gcs buckets should start with "gs://"
# wasb buckets should start with "wasb"
# just to help Airflow select correct handler
REMOTE_BASE_LOG_FOLDER = conf.get('core', 'REMOTE_BASE_LOG_FOLDER')
LOGGING_CONFIG = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'airflow': {
'format': LOG_FORMAT,
},
},
'handlers': {
'console': {
'class': 'airflow.utils.log.logging_mixin.RedirectStdHandler',
'formatter': 'airflow',
'stream': 'sys.stdout'
},
'task': {
'class': 'airflow.utils.log.file_task_handler.FileTaskHandler',
'formatter': 'airflow',
'base_log_folder': os.path.expanduser(BASE_LOG_FOLDER),
'filename_template': FILENAME_TEMPLATE,
},
'processor': {
'class': 'airflow.utils.log.file_processor_handler.FileProcessorHandler',
'formatter': 'airflow',
'base_log_folder': os.path.expanduser(PROCESSOR_LOG_FOLDER),
'filename_template': PROCESSOR_FILENAME_TEMPLATE,
},
# Add a GCSTaskHandler to the 'handlers' block of the LOGGING_CONFIG variable
'gcs.task': {
'class': 'airflow.utils.log.gcs_task_handler.GCSTaskHandler',
'formatter': 'airflow',
'base_log_folder': os.path.expanduser(BASE_LOG_FOLDER),
'gcs_log_folder': GCS_LOG_FOLDER,
'filename_template': FILENAME_TEMPLATE,
},
},
'loggers': {
'airflow.processor': {
'handlers': ['processor'],
'level': LOG_LEVEL,
'propagate': False,
},
'airflow.task': {
'handlers': ['gcs.task'],
'level': LOG_LEVEL,
'propagate': False,
},
'airflow.task_runner': {
'handlers': ['gcs.task'],
'level': LOG_LEVEL,
'propagate': True,
},
'flask_appbuilder': {
'handler': ['console'],
'level': FAB_LOG_LEVEL,
'propagate': True,
}
},
'root': {
'handlers': ['console'],
'level': LOG_LEVEL,
}
}
REMOTE_HANDLERS = {
's3': {
'task': {
'class': 'airflow.utils.log.s3_task_handler.S3TaskHandler',
'formatter': 'airflow',
'base_log_folder': os.path.expanduser(BASE_LOG_FOLDER),
's3_log_folder': REMOTE_BASE_LOG_FOLDER,
'filename_template': FILENAME_TEMPLATE,
},
'processor': {
'class': 'airflow.utils.log.s3_task_handler.S3TaskHandler',
'formatter': 'airflow',
'base_log_folder': os.path.expanduser(PROCESSOR_LOG_FOLDER),
's3_log_folder': REMOTE_BASE_LOG_FOLDER,
'filename_template': PROCESSOR_FILENAME_TEMPLATE,
},
},
'gcs': {
'task': {
'class': 'airflow.utils.log.gcs_task_handler.GCSTaskHandler',
'formatter': 'airflow',
'base_log_folder': os.path.expanduser(BASE_LOG_FOLDER),
'gcs_log_folder': REMOTE_BASE_LOG_FOLDER,
'filename_template': FILENAME_TEMPLATE,
},
'processor': {
'class': 'airflow.utils.log.gcs_task_handler.GCSTaskHandler',
'formatter': 'airflow',
'base_log_folder': os.path.expanduser(PROCESSOR_LOG_FOLDER),
'gcs_log_folder': REMOTE_BASE_LOG_FOLDER,
'filename_template': PROCESSOR_FILENAME_TEMPLATE,
},
},
'wasb': {
'task': {
'class': 'airflow.utils.log.wasb_task_handler.WasbTaskHandler',
'formatter': 'airflow',
'base_log_folder': os.path.expanduser(BASE_LOG_FOLDER),
'wasb_log_folder': REMOTE_BASE_LOG_FOLDER,
'wasb_container': 'airflow-logs',
'filename_template': FILENAME_TEMPLATE,
'delete_local_copy': False,
},
'processor': {
'class': 'airflow.utils.log.wasb_task_handler.WasbTaskHandler',
'formatter': 'airflow',
'base_log_folder': os.path.expanduser(PROCESSOR_LOG_FOLDER),
'wasb_log_folder': REMOTE_BASE_LOG_FOLDER,
'wasb_container': 'airflow-logs',
'filename_template': PROCESSOR_FILENAME_TEMPLATE,
'delete_local_copy': False,
},
}
}
REMOTE_LOGGING = conf.get('core', 'remote_logging')
if REMOTE_LOGGING and REMOTE_BASE_LOG_FOLDER.startswith('s3://'):
LOGGING_CONFIG['handlers'].update(REMOTE_HANDLERS['s3'])
elif REMOTE_LOGGING and REMOTE_BASE_LOG_FOLDER.startswith('gs://'):
LOGGING_CONFIG['handlers'].update(REMOTE_HANDLERS['gcs'])
elif REMOTE_LOGGING and REMOTE_BASE_LOG_FOLDER.startswith('wasb'):
LOGGING_CONFIG['handlers'].update(REMOTE_HANDLERS['wasb'])
My airflow.cfg settings are:
[core]
remote_logging = True
remote_base_log_folder = gs:/GCSbucket/logs
remote_log_conn_id = google_cloud_default
The error I get is the following:
Traceback (most recent call last):
File "/usr/local/lib/python2.7/atexit.py", line 24, in _run_exitfuncs
func(*targs, **kargs)
File "/usr/local/lib/python2.7/logging/__init__.py", line 1676, in shutdown
h.close()
File "/usr/local/lib/python2.7/site-packages/airflow/utils/log/gcs_task_handler.py", line 73, in close
if self.closed:
AttributeError: 'GCSTaskHandler' object has no attribute 'closed'
Does anybody know what might have gone wrong?
The tutorial that is being followed is: https://airflow.readthedocs.io/en/1.10.0/howto/write-logs.html
Update: Did some more research in the source code, here I see that the close statement returns nothing, and this is why my application crashes.
https://github.com/apache/incubator-airflow/blob/v1-10-stable/airflow/utils/log/gcs_task_handler.py
Does somebody know why nothing is returend in
def close(self):
if self.closed:
return

The instructions might be outdated. Please try with the instructions from the following link:
https://airflow.readthedocs.io/en/latest/howto/write-logs.html#writing-logs-to-google-cloud-storage
Follow the steps below to enable Google Cloud Storage logging.
To enable this feature, airflow.cfg must be configured as in this example:
[core]
# Airflow can store logs remotely in AWS S3, Google Cloud Storage or Elastic Search.
# Users must supply an Airflow connection id that provides access to the storage
# location. If remote_logging is set to true, see UPDATING.md for additional
# configuration requirements.
remote_logging = True
remote_base_log_folder = gs://my-bucket/path/to/logs
remote_log_conn_id = MyGCSConn
Install the gcp_api package first, like so: pip install apache-airflow[gcp_api].
Make sure a Google Cloud Platform connection hook has been defined in Airflow. The hook should have read and write access to the Google Cloud Storage bucket defined above in remote_base_log_folder.
Restart the Airflow webserver and scheduler, and trigger (or wait for) a new task execution.
Verify that logs are showing up for newly executed tasks in the bucket you’ve defined.
Verify that the Google Cloud Storage viewer is working in the UI. Pull up a newly executed task, and verify that you see something like:
-
*** Reading remote log from gs://<bucket where logs should be persisted>/example_bash_operator/run_this_last/2017-10-03T00:00:00/16.log.
[2017-10-03 21:57:50,056] {cli.py:377} INFO - Running on host chrisr-00532
[2017-10-03 21:57:50,093] {base_task_runner.py:115} INFO - Running: ['bash', '-c', u'airflow run example_bash_operator run_this_last 2017-10-03T00:00:00 --job_id 47 --raw -sd DAGS_FOLDER/example_dags/example_bash_operator.py']
[2017-10-03 21:57:51,264] {base_task_runner.py:98} INFO - Subtask: [2017-10-03 21:57:51,263] {__init__.py:45} INFO - Using executor SequentialExecutor
[2017-10-03 21:57:51,306] {base_task_runner.py:98} INFO - Subtask: [2017-10-03 21:57:51,306] {models.py:186} INFO - Filling up the DagBag from /airflow/dags/example_dags/example_bash_operator.py

To resolve this question I added the following to the core of airflow.cfg
[core]
log_filename_template = {{ ti.dag_id }}/{{ ti.task_id }}/{{ ts }}/{{ try_number }}.log
log_processor_filename_template = {{ filename }}.log
# Log format
# we need to escape the curly braces by adding an additional curly brace
log_format = [%%(asctime)s] {%%(filename)s:%%(lineno)d} %%(levelname)s - %%(message)s
simple_log_format = %%(asctime)s %%(levelname)s - %%(message)s
# Logging class
# Specify the class that will specify the logging configuration
# This class has to be on the python classpath
# logging_config_class = my.path.default_local_settings.LOGGING_CONFIG
logging_config_class =log_config.LOGGING_CONFIG
task_log_reader = gcs.task
At the log_config.LOGGING_CONFIG I added the following:
# Add a GCSTaskHandler to the 'handlers' block of the LOGGING_CONFIG variable
'gcs.task': {
'class': 'airflow.utils.log.gcs_task_handler.GCSTaskHandler',
'formatter': 'airflow',
'base_log_folder': os.path.expanduser(BASE_LOG_FOLDER),
'gcs_log_folder': GCS_LOG_FOLDER,
'filename_template': FILENAME_TEMPLATE,
}
The tutorial to be followed is:
https://airflow.readthedocs.io/en/1.10.0/howto/write-logs.html

Related

Can't add username to logging record using Middleware

I'm trying to log (by default) username and project (which can be decided from request object). I don't want to add context to every log manually.
The problem is that I can't make Django to add request or straight username and project to the LogRecord. I tried tens of ways.
This is my code:
middlewares.py
import threading
local = threading.local()
class LoggingRequestMiddleware:
def __init__(self, get_response):
self.get_response = get_response
# One-time configuration and initialization.
def __call__(self, request):
# Code to be executed for each request before
# the view (and later middleware) are called.
setattr(local, 'request', request)
response = self.get_response(request)
# Code to be executed for each request/response after
# the view is called.
return response
settings.py
def add_username_to_log(record):
local = threading.local()
record.username = '-'
request = getattr(local,'request',None)
print(request)
return True
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': LOGGING_VERBOSE_FORMAT,
'style': '{',
},
},
'filters': {
'context_filter': {
'()': 'django.utils.log.CallbackFilter',
'callback': add_username_to_log,
},
},
'handlers': {
'console': {
'level': DEFAULT_LOG_LEVEL,
'class': 'logging.StreamHandler',
'formatter': 'verbose',
'filters': ['context_filter'],
},
'file_main': {
'level': DEFAULT_LOG_LEVEL,
'class': 'logging.handlers.RotatingFileHandler',
'filename': os.path.join(LOG_PATH, 'main.log'),
'maxBytes': DEFAULT_LOG_SIZE,
'formatter': 'verbose',
'filters': ['context_filter'],
'backupCount': 0,
},
},
'loggers': {
'': {
'handlers': ['file_main'],
'level': DEFAULT_LOG_LEVEL,
'propagate': False,
},
},
}
But the request object is always None. Do you know why?
threading.local() returns a new object every time, you have to read and write to the same object.
locals_a = threading.local()
locals_a.foo = 1
hasattr(locals_a, 'foo') # True
locals_b = threading.local()
hasattr(locals_b, 'foo') # False
You need to define your locals object in 1 place that you can then import everywhere you need to access the request and read and write to that object every time. As a basic example this should work
def add_username_to_log(record):
from middleware import local
request = getattr(local,'request',None)

django database query log linenumber

I am logging my database queries in Django along with the pathname and linenumber.
Right now i am getting these logs:
07/Dec/2018 14:25:00 DEBUG django.db.backends utils **/Users/XXXXX/.idea/lib/python2.7/site-packages/django/db/backends/utils.py:89**
(0.340) SELECT "metadata"."metaname", "metadata"."description", "metadata"."attributes" FROM "metadata" WHERE "metadata"."metaname" = 'date_type'; args=('date_type',)
For all queries, I am getting the same path and line number. Is there any way I can capture the line number from my main application instead of the one from utils.
Current logging Implementation:
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'color'
},
},
'loggers': {
'django.db.backends': {
'handlers': ['console'],
'level': 'DEBUG',
'propogate': True,
}
}
}
Using python 2.7 and django 1.9
The slightly optimized version from User #will-keeling
Logging configuration for Django for output line numbers for each db request.
Note: If you want to use it for tests you need to set DEBUG=True for tests How do you set DEBUG to True when running a Django test?
import logging
import traceback
from django.conf import settings
class StackInfoHandler(logging.StreamHandler):
trim = 5
def emit(self, record):
super(StackInfoHandler, self).emit(record)
trace = traceback.format_stack()
stack1 = [str(row) for row in trace]
stack2 = [s for s in stack1 if settings.BASE_DIR in s and 'format_stack' not in s]
stack3 = [s for s in stack2 if 'test' not in s]
if not stack3:
stack3 = stack2 # include test call
if stack3:
stack4 = ''.join(stack3[-self.trim:]) # take only last records
stack5 = f"Stack {self.terminator} {''.join(stack4)}"
self.stream.write(stack5)
self.stream.write(self.terminator)
self.flush()
Logging Config (partitial)
LOGGING = {
'handlers': {
'db-console': {
'level': 'DEBUG',
'class': 'settings.local.StackInfoHandler', # Reference the custom handler
'formatter': 'simple',
},
'loggers': {
'django.db.backends': {
'handlers': ['db-console'],
'level': 'DEBUG',
'propagate': False
},
}
}
}
This will show you only stack trace from your Django codebase like below
[2020-05-25 17:49:17,977]: (0.000) INSERT INTO `contacts_contactscount` (`user_id`, `date`, `amount`) VALUES (338, '2020-05-25 17:49:17', 7); args=[338, '2020-05-25 17:49:17', 7]
Stack
File "<project-root>/api/views/contacts.py", line 164, in create
Contact.objects.filter(pk__in=to_delete).delete()
File "<project-root>/<folder>/contacts/models.py", line 54, in delete
create_deletion_log.delay(obj, deleted_timestamp)
File "<project-root>/<folder>/contacts/tasks.py", line 31, in create_deletion_log
contact.save()
File "<project-root>/<folder>/contacts/models.py", line 118, in save
Contact.objects.contacts_added_hook(self.user)
File "<project-root>/<folder>/contacts/models.py", line 67, in contacts_added_hook
current_total = user.profile.contacts_total
File "<project-root>/<folder>/profile/models.py", line 631, in contacts_total
ContactsCount.objects.create(user=self.user, amount=count)
I'm guessing that you're trying to determine which lines in your application are responsible for running which queries.
One way to achieve this would be to create a custom handler that prints out the current stack at the point where Django logs the query. That would allow you to see which line in your application is executing.
You could create a custom handler such as:
import logging
import traceback
class StackInfoHandler(logging.StreamHandler):
trim = 5
def emit(self, record):
super(StackInfoHandler, self).emit(record)
stack = ''.join(
str(row) for row in traceback.format_stack()[:-self.trim]
)
self.stream.write(stack)
And then in your logging config, you could just switch the handler class to use the StackInfoHandler:
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'my.package.StackInfoHandler', # Reference the custom handler
'formatter': 'color'
},
},
Note that the StackInfoHandler trims 5 lines off the stack so that it doesn't show you stack frames from the logging framework itself. You might need to tweak this number (5 works for me locally).

logs inside django channels consumers not showing up

Environment:
OS: redhat
python version: 3.6
django: 2.1
django channels: 2.1.3
The following is my logging.conf file (same level as settings.py):
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': '%(asctime)s [%(levelname)s] %(name)s: %(message)s',
'datefmt': '%Y-%m-%d %H:%M:%S',
},
},
'handlers': {
'file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': 'debug.log',
'formatter': 'standard'
},
},
'loggers': {
'django': {
'handlers': ['file'],
'level': 'DEBUG',
'propagate': True,
},
},
}
django_logger = logging.getLogger('django')
and the function to log as follows:
def log_it(*args):
try:
django_logger.info(str(args).encode('utf-8'))
return True
except:
django_logger.info('#### Exception in LOGGING!!!!!!')
return False
I am using this to log inside django consumers as follows:
class EchoConsumer(SyncConsumer):
def websocket_connect(self, event):
try:
log_it('inside EchoConsumer connect()', event)
self.send({
"type": "websocket.accept",
})
return True
except:
error = traceback.format_exc()
write_error_log(error)
return False
def websocket_receive(self, event):
try:
log_it('inside EchoConsumer receive()', event)
self.send({
"type": "websocket.send",
"text": event["text"],
})
return True
except:
error = traceback.format_exc()
write_error_log(error)
return False
The log_it() used inside the consumers aren't being invoked.
(Assumption: Integrating channels with django must have overriden the django logging settings)
I checked if there was any problem in reaching the consumers in the daphne access logs.
I also checked my nginx conf, but the connection requests were recorded as follows:
127.0.0.1:45100 - - [08/Oct/2018:19:00:58] "WSCONNECTING /ws/event/" - -
127.0.0.1:45100 - - [08/Oct/2018:19:00:58] "WSCONNECT /ws/event/" - -
Note: I have linked /ws/event to EchoConsumer in routing.py
My expectation is:
127.0.0.1:45100 - - [08/Oct/2018:19:00:58] "WSCONNECTING /ws/event/" - -
127.0.0.1:45100 - - [08/Oct/2018:19:00:58] "WSCONNECT /ws/event/" - -
inside EchoConsumer connect() ....
Any idea why the log statements are not being displayed?
P.S related issue on github

Can't get two Python loggers to work at the same time

Related to a previous question I asked, Airbrake logger in Django still sending notifications even though its level is set to 'CRITICAL'?, I'm working on a Django project with multiple versions of settings.py: settings/base.py, settings/staging.py, etc.
There is also a kind of settings 'mixin', settings/staging_development.py, which contains the following LOGGING configuration:
# Auxiliary variable used in LOGGING
_AIRBRAKE_LOGGER = {
'handlers': ['airbrake'],
'level': 'ERROR',
'propagate': True,
}
# Airbrake logging integration (cf. https://github.com/airbrake/pybrake#django-integration)
# In our case, 'app' is replaced by three apps, 'lucy_web', 'api', and 'activation'.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'airbrake': {
'level': 'ERROR',
'class': 'pybrake.LoggingHandler',
},
},
'loggers': {
'lucy_web': _AIRBRAKE_LOGGER,
'api': _AIRBRAKE_LOGGER,
'activation': _AIRBRAKE_LOGGER,
},
}
This is imported in settings/staging.py in which LOGGING is further .update()d as follows:
from lucy.settings.staging_production import *
# LOGGING = {
# 'version': 1,
# 'disable_existing_loggers': False,
# 'handlers': {
# 'console': {
# 'class': 'logging.StreamHandler',
# },
# },
# 'loggers': {
# 'django': {
# 'handlers': ['console'],
# 'level': os.getenv('LOG_LEVEL', 'INFO'),
# },
# },
# }
LOGGING['handlers'].update(console={
'class': 'logging.StreamHandler'
})
LOGGING['loggers'].update(django={
'handlers': ['console'],
'level': os.getenv('LOG_LEVEL', 'INFO'),
})
What I'd like to achieve is to log to Airbrake as well as to the console. Now, if I simply comment in the commented-out code and re-define the LOGGING configuration, I notice that error messages are successfully getting logged to the console. If I use the code as-is, however, they do not, even though I do get Airbrake notifications.
If I poke around in the shell, everything looks OK: for example, the 'lucy_web' and 'django' loggers both have handlers attached and propagate set to True:
(venv) Kurts-MacBook-Pro-2:lucy-web kurtpeek$ ENV_ROLE=staging_on_localhost python manage.py shell
Python 3.6.4 (v3.6.4:d48ecebad5, Dec 18 2017, 21:07:28)
Type 'copyright', 'credits' or 'license' for more information
IPython 6.3.1 -- An enhanced Interactive Python. Type '?' for help.
In [1]: from django.conf import settings
In [2]: settings.LOGGING
Out[2]:
{'version': 1,
'disable_existing_loggers': False,
'handlers': {'airbrake': {'level': 'ERROR',
'class': 'pybrake.LoggingHandler'},
'console': {'class': 'logging.StreamHandler'}},
'loggers': {'lucy_web': {'handlers': ['airbrake'],
'level': 'ERROR',
'propagate': True},
'api': {'handlers': ['airbrake'], 'level': 'ERROR', 'propagate': True},
'activation': {'handlers': ['airbrake'],
'level': 'ERROR',
'propagate': True},
'django': {'handlers': ['console'], 'level': 'ERROR'}}}
In [3]: import logging
In [4]: logger = logging.getLogger('lucy_web')
In [6]: logger.propagate
Out[6]: True
In [7]: logger.hasHandlers()
Out[7]: True
In [8]: logger.handlers
Out[8]: [<LoggingHandler (ERROR)>]
In [9]: django_logger = logging.getLogger('django')
In [10]: django_logger.handlers
Out[10]: [<StreamHandler <stderr> (NOTSET)>]
In [11]: django_logger.getEffectiveLevel()
Out[11]: 40
In [12]: logging.ERROR
Out[12]: 40
In [13]: django_logger.propagate
Out[13]: True
In short, I can get one type of logging to work or the other, but not both at the same time, even though log propagation is enabled.
Any idea what the issue might be?
I resolved the problem by adding the 'console' handler to the apps ('lucy_web', 'api', and 'activation') themselves, rather than to the catch-all 'django' logger. Actually, I think that at the root of the problem, the errors I was throwing were not in 'django' logger's hierarchy.
So firstly, in settings/base.py I use deepcopy:
from copy import deepcopy
# Auxiliary variable used in LOGGING
_AIRBRAKE_LOGGER = {
'handlers': ['airbrake'],
'level': 'ERROR',
'propagate': True,
}
# Airbrake logging integration (cf. https://github.com/airbrake/pybrake#django-integration)
# In our case, 'app' is replaced by three apps, 'lucy_web', 'api', and 'activation'.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'airbrake': {
'level': 'ERROR',
'class': 'pybrake.LoggingHandler',
},
},
'loggers': {
# The deepcopy allows us to append to each app's 'handlers' list without affecting the others
'lucy_web': deepcopy(_AIRBRAKE_LOGGER),
'api': deepcopy(_AIRBRAKE_LOGGER),
'activation': deepcopy(_AIRBRAKE_LOGGER),
},
}
And secondly, in settings/staging.py I updated LOGGING like so:
LOGGING['handlers'].update(console={
'class': 'logging.StreamHandler'
})
for app in ('lucy_web', 'api', 'activation'):
LOGGING['loggers'][app]['handlers'].append('console')
Now error gets logged both to Airbrake and to the console.

django 1.8 can't override AdminEmailHandler

i'm on django 1.8 and trying to customize the class
django.utils.log.AdminEmailHandler
here is my custom class
class MyCustomAdminEmailHandler(AdminEmailHandler):
# emit copied and overridden from v1.8 so we can add the request .user into the message
def emit(self, record):
current_user = None
try:
request = record.request
subject = '%s (%s IP): %s' % (
record.levelname,
('internal' if request.META.get('REMOTE_ADDR') in settings.INTERNAL_IPS
else 'EXTERNAL'),
record.getMessage()
)
filter = get_exception_reporter_filter(request)
request_repr = '\n{}'.format(force_text(filter.get_request_repr(request)))
current_user = request.user
except Exception:
subject = '%s: %s' % (
record.levelname,
record.getMessage()
)
request = None
request_repr = "unavailable"
subject = self.format_subject(subject)
if record.exc_info:
exc_info = record.exc_info
else:
exc_info = (None, record.getMessage(), None)
message = "User:%s\n\n%s\n\nRequest repr(): %s" % (current_user, self.format(record), request_repr)
reporter = ExceptionReporter(request, is_email=True, *exc_info)
html_message = reporter.get_traceback_html() if self.include_html else None
self.send_mail(subject, message, fail_silently=True, html_message=html_message)
And here is the relevant part of my logging config in settings.py
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
# can't just override this so customize here
'mail_admins':{
'level': 'ERROR',
'class': 'my_utils.logging_utils.MyCustomAdminEmailHandler',
'include_html': True,
},
},
}
When i test this out, I don't get HTML email and I don't get my customizations in the email.
Does anyone know what I'm missing?
------ edit ------
#pynchia's comment: django.utils.log already has this relevant logging config for loggers
'loggers': {
'django': {
'handlers': ['console'],
},
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': False,
},
'django.security': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': False,
},
'py.warnings': {
'handlers': ['console'],
},
}
----- 2nd edit ------
for the record #pynchia's comment worked. I had to add to manually add it to logging config.
wierd.

Categories