I have a logger and a class DuplicateFilter that filters messages that already were logged once. I would like to include the time when the logging happened into my filter but when I try to access the property asctime I get: AttributeError: 'LogRecord' object has no attribute 'asctime'
Here a small example how I set up my logger:
import logging
import logging.handlers as log_handlers
def setup_logger(filename):
class DuplicateFilter(object):
def __init__(self):
self.msgs = set()
def filter(self, record):
if logger.level == 10:
return True
rv = True
try:
print(record.asctime)
msg = record.threadName + " " + record.msg
if msg in self.msgs:
rv = False
except Exception as e:
print(traceback.format_exc())
return rv
self.msgs.add(msg)
return rv
log_formatter = logging.Formatter("%(asctime)s [%(levelname)-5.5s] [%(threadName)-30.30s] %(message)s")
file_handler = log_handlers.TimedRotatingFileHandler(filename, encoding="UTF-8", when='W6', backupCount=12)
file_handler.setFormatter(log_formatter)
console_handler = logging.StreamHandler()
console_handler.setFormatter(log_formatter)
logger = logging.getLogger(filename)
logger.propagate = False
logger.addHandler(console_handler)
logger.addHandler(file_handler)
logger.setLevel(logging.INFO)
dup_filter = DuplicateFilter()
logger.addFilter(dup_filter)
return logger
log = setup_logger("test.log")
for i in range(3):
log.info("wow")
Now my records look like this: 2018-07-18 14:34:49,642 [INFO ] [MainThread ] wow They clearly have an asctime and I explicitly set the asctime property in the constructor of my Formatter. The only question similar to mine I found says
To have message and asctime set, you must first call self.format(record) inside the emit method
but doesn't the logging.Formatter do that when you specify the log string the way I did with %(asctime)s?
EDIT: running.t was right, I just didn't understand what the documentation meant. I solved it by adding my formater to my filter and calling the format function at the beginning:
def __init__(self, formatter):
self.msgs = {}
self.formatter = formatter
def filter(self, record):
self.formatter.format(record)
In filter objects section of pyton logging module documentation I found following note:
Note that filters attached to handlers are consulted before an event is emitted by the handler, whereas filters attached to loggers are consulted whenever an event is logged (using debug(), info(), etc.), before sending an event to handlers. This means that events which have been generated by descendant loggers will not be filtered by a logger’s filter setting, unless the filter has also been applied to those descendant loggers.
Your filter is added to logger, while formatters are added to handlers. So in my opinion your filter method is applied before any of formatter you specified.
BTW, shouldn't your DuplicateFilter inherit from logging.Filter?
Related
Is it possible for a library of code to obtain a reference to a logger object created by client code that uses a unique name?
The python advanced logging tutorial says the following:
A good convention to use when naming loggers is to use a module-level logger, in each module which uses logging, named as follows:
logger = logging.getLogger(__name__) This means that logger names track the package/module hierarchy, and it’s intuitively obvious where events are logged just from the logger name.
Every module in my library does:
LOG = logging.getLogger(__name__)
This works fine when client code does something like:
logger = logging.getLogger()
But breaks (I get no log output to the registered handlers from the logging object in main) when client code does something like:
logger = logging.getLogger('some.unique.path')
Because I am packaging my code as a library to be used with a lot of different clients, I want to have the most extensible logging. That is, I want my module level logging to reference the same logger object as main whenever possible, whether the client code is using a named logger or not.
Here is an example program to reproduce on your end. Imagine test.py is my library of code that I want to always reference any logger created in main.py.
Example Output
% python3 main.py
in main
%
Desired Output
% python3 main.py
in main
hi
%
main.py
import logging
from test import somefunc
LOG = logging.getLogger('some.unique.path')
LOG.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
LOG.addHandler(ch)
def main():
LOG.info('in main')
somefunc()
if __name__ == '__main__':
main()
test.py
import logging
LOG = logging.getLogger(__name__)
def somefunc():
LOG.info('hi')
This is the approach I would take.
Create a separate logging utility. I have attached the code for it below.
Now, import this logger utility (or) class (or) function that this provides, wherever needed.
Updated your code, assuming I am using the above linked logger utility and tested it locally on my machine.
Code for main.py
from logger import get_logger
from test import somefunc
LOG = get_logger()
def main():
LOG.info("in main")
somefunc()
if __name__ == "__main__":
main()
Code for test.py
from logger import get_logger
LOG = get_logger()
def somefunc():
LOG.info("hi")
Code for logger.py - Attaching the code here too
import logging
from logging.handlers import RotatingFileHandler
bytes_type = bytes
unicode_type = str
basestring_type = str
DEFAULT_LOGGER = "default"
INTERNAL_LOGGER_ATTR = "internal"
CUSTOM_LOGLEVEL = "customloglevel"
logger = None
_loglevel = logging.DEBUG
_logfile = None
_formatter = None
def get_logger(
name=None,
logfile=None,
level=logging.DEBUG,
maxBytes=0,
backupCount=0,
fileLoglevel=None,
):
_logger = logging.getLogger(name or __name__)
_logger.propagate = False
_logger.setLevel(level)
# Reconfigure existing handlers
has_stream_handler = False
for handler in list(_logger.handlers):
if isinstance(handler, logging.StreamHandler):
has_stream_handler = True
if isinstance(handler, logging.FileHandler) and hasattr(
handler, INTERNAL_LOGGER_ATTR
):
# Internal FileHandler needs to be removed and re-setup to be able
# to set a new logfile.
_logger.removeHandler(handler)
continue
# reconfigure handler
handler.setLevel(level)
if not has_stream_handler:
stream_handler = logging.StreamHandler()
setattr(stream_handler, INTERNAL_LOGGER_ATTR, True)
stream_handler.setLevel(level)
_logger.addHandler(stream_handler)
if logfile:
rotating_filehandler = RotatingFileHandler(
filename=logfile, maxBytes=maxBytes, backupCount=backupCount
)
setattr(rotating_filehandler, INTERNAL_LOGGER_ATTR, True)
rotating_filehandler.setLevel(fileLoglevel or level)
_logger.addHandler(rotating_filehandler)
return _logger
def setup_default_logger(
logfile=None, level=logging.DEBUG, formatter=None, maxBytes=0, backupCount=0
):
global logger
logger = get_logger(
name=DEFAULT_LOGGER, logfile=logfile, level=level, formatter=formatter
)
return logger
def reset_default_logger():
"""
Resets the internal default logger to the initial configuration
"""
global logger
global _loglevel
global _logfile
global _formatter
_loglevel = logging.DEBUG
_logfile = None
_formatter = None
logger = get_logger(name=DEFAULT_LOGGER, logfile=_logfile, level=_loglevel)
# Initially setup the default logger
reset_default_logger()
def loglevel(level=logging.DEBUG, update_custom_handlers=False):
"""
Set the minimum loglevel for the default logger
Reconfigures only the internal handlers of the default logger (eg. stream and logfile).
Update the loglevel for custom handlers by using `update_custom_handlers=True`.
:param int level: Minimum logging-level (default: `logging.DEBUG`).
:param bool update_custom_handlers: custom handlers to this logger; set `update_custom_handlers` to `True`
"""
logger.setLevel(level)
# Reconfigure existing internal handlers
for handler in list(logger.handlers):
if hasattr(handler, INTERNAL_LOGGER_ATTR) or update_custom_handlers:
# Don't update the loglevel if this handler uses a custom one
if hasattr(handler, CUSTOM_LOGLEVEL):
continue
# Update the loglevel for all default handlers
handler.setLevel(level)
global _loglevel
_loglevel = level
def formatter(formatter, update_custom_handlers=False):
"""
Set the formatter for all handlers of the default logger
:param Formatter formatter: default uses the internal LogFormatter.
:param bool update_custom_handlers: custom handlers to this logger - set ``update_custom_handlers`` to `True`
"""
for handler in list(logger.handlers):
if hasattr(handler, INTERNAL_LOGGER_ATTR) or update_custom_handlers:
handler.setFormatter(formatter)
global _formatter
_formatter = formatter
def logfile(
filename,
formatter=None,
mode="a",
maxBytes=0,
backupCount=0,
encoding=None,
loglevel=None,
):
"""
Function to handle the rotating fileHandler
:param filename: filename logs are being collected
:param mode: fileMode
:param maxBytes: values for roll-over at a pre-determined size; if zero; rollover never occurs
:param backupCount: if value is non-zero; system saves old logfiles; by appending extensions
:param encoding: set encoding option; if not None; open file with that encoding.
:param loglevel: loglevel set
"""
# Step 1: If an internal RotatingFileHandler already exists, remove it
for handler in list(logger.handlers):
if isinstance(handler, RotatingFileHandler) and hasattr(
handler, INTERNAL_LOGGER_ATTR
):
logger.removeHandler(handler)
# Step 2: If wanted, add the RotatingFileHandler now
if filename:
rotating_filehandler = RotatingFileHandler(
filename,
mode=mode,
maxBytes=maxBytes,
backupCount=backupCount,
encoding=encoding,
)
# Set internal attributes on this handler
setattr(rotating_filehandler, INTERNAL_LOGGER_ATTR, True)
if loglevel:
setattr(rotating_filehandler, CUSTOM_LOGLEVEL, True)
# Configure the handler and add it to the logger
rotating_filehandler.setLevel(loglevel or _loglevel)
logger.addHandler(rotating_filehandler)
Output:
in main
hi
Note:
Do dive deep into the Logger Utility linked to understand all the internal details.
Use logging.basicConfig instead of manually trying to configure a logger. Loggers inherit their configuration from their parent.
import logging
from test import somefunc
LOG = logging.getLogger('some.unique.path')
def main():
LOG.info('in main')
somefunc()
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
main()
I enclose the code sample below. I extended the Logger class to try to make it more user-friendly for our purposes by adding a SumoLogic handler that can be easily utilised to send log messages to SumoLogic. This functionality works as expected and is not the issue. An abbreviated but functional form of my extended class can be seen below:
import sys
import logging
class CustomLogger(logging.getLoggerClass()):
def __init__(self, logger_name, log_level="INFO"):
MSG_FORMAT = '%(asctime)s %(levelname)s %(name)s: %(message)s'
DATETIME_FORMAT = '%Y-%m-%d %H:%M:%S'
formatter = logging.Formatter(MSG_FORMAT, DATETIME_FORMAT)
# Initialise logger instance.
super(CustomLogger, self).__init__(logger_name)
self.handlers = []
self.setLevel(log_level)
# Create the stdout handler
stream_handler = logging.StreamHandler(sys.stdout)
stream_handler.setLevel(log_level)
stream_handler.setFormatter(formatter)
self.addHandler(stream_handler)
self.debug('Initialised "{}" Logger'.format(logger_name))
def add_sumologic_handler(self, **kwargs):
"""
Call this method with a kwargs dictionary to add a Sumologic handler to your logger. This is done in the
following way:
... Comments removed ...
"""
self.debug(f"Attempting to add Sumologic Handler with the following endpoint settings:\n{kwargs}")
# ... SumoLogic handler addition code here...
def remove_sumologic_handler(self):
"""
Call this method to turn off Sumologic logging, if it has already been activated.
"""
self.debug(f"Attempting to remove Sumologic Handler.")
# ... SumoLogic handler removal code here...
I have found that when I call other methods from the base Logger class, they behave contrary to my expectations. By way of example, I often use the logger.getChild() method in script functions to identify the function that currently has execution focus, when writing log messages. When I call it, I expect it to take my logger and append the given child name as a postfix to my base logger name when writing log messages. However, that is not what is occurring. Instead, my new child logger is created, but with a base log level of WARNING, meaning it is not inheriting the log level of my parent logger.
Here is a test case that features the expected behaviour, that does not use my CustomLogger() class:
import sys
import logging
def initialise_logger(logger_name, log_level="INFO"):
MSG_FORMAT = '%(asctime)s %(levelname)s %(name)s: %(message)s'
DATETIME_FORMAT = '%Y-%m-%d %H:%M:%S'
formatter = logging.Formatter(MSG_FORMAT, DATETIME_FORMAT)
# Create the stdout handler
stream_handler = logging.StreamHandler(sys.stdout)
stream_handler.setLevel(log_level)
stream_handler.setFormatter(formatter)
# Create the logger
logger = logging.getLogger(logger_name)
logger.setLevel(log_level)
logger.addHandler(stream_handler)
return logger
logger1 = initialise_logger("expected.logger", "INFO")
logger1.info("Test Message 1")
logger1a = logger1.getChild("child")
logger1a.info("Expected log message - note logger name hierarchy and inherited log level of INFO.")
... and here is an example using my CustomLogger() class extension of Logger:
logger1 = CustomLogger("expected.logger", "INFO")
logger1.info("Test Message 1")
logger1a = logger1.getChild("child")
logger1a.info("Expected log message is not output by the child logger, meaning the parent log level has not been inherited by the getChild() call.")
logger1a.warning("Now a log message is output by the child logger.")
assert isinstance(logger1, CustomLogger), "logger1 is NOT an instance of CustomLogger"
assert isinstance(logger1a, CustomLogger), "logger1a is NOT an instance of CustomLogger"
assert isinstance(logger1a, logging.getLoggerClass()), "logger1a is NOT an instance of Logger"
The above shows that in the case of my CustomLogger class instance, a call to the base Logger class's getChild() function returns an instance of Logger rather than CustomLogger; contrary to my expectations of it inheriting the parent CustomLogger class instance, along with its settings.
Is anybody able to explain what is going wrong for me here, or provide a workaround, please?
Using python's logging module, I'd like to return non-zero if an error or critical was logged, or zero if no error or critical was logged. Do I have to add a layer to catch this, or is this native functionality?
import logging
def CheckMaxErr():
pass
logging.debug("Cat")
CheckMaxErr() #should return logging.DEBUG
logging.error("Dog")
CheckMaxErr() #should return logging.ERROR
You can Implement a subclass of a logging handler and set a flag attribute on the instance if it is ever called. You would set the level of the handler to the logging.ERROR level to have it trigger on those messages.
import logging
class MyHandler(logging.Handler):
def __init__(self):
super().__init__()
self.flag = False
def emit(self, record):
self.flag = True
logger = logging.getLogger(__name__)
handle = MyHandler()
handle.setLevel(logging.ERROR)
logger.addHandler(handle)
logger.debug("debug message")
print(handle.flag) # False
logger.error("ERROR")
print(handle.flag) # True
In this example, the handler doesn't actually do anything with the message, just sets the flag when it has been triggered. You can add another handler to log to wherever you want (console, file, etc.)
I have an application which continues running, the application won't be stopped, it will idle when there are no instructions by a client, and listen to the external event and react if there is any instruction input by the user at anytime
There is a configuration file which can be changed while the application is running, and the logging level is one of them. myLoggingLevel is the param below.
Is there anyway to change the logging level while the application is running?
I am using ConfigParser.RawConfigParser() for the configuration change.
import time
import logging
import datetime
def getLogger(loggerName='myLoggerName', logLevel='INFO', log_path='C:/logs/'):
class Formatter(logging.Formatter):
def formatTime(self, record, datefmt=None):
return (datetime.datetime.utcnow()).strftime('%H:%M:%S')
logLevel = logLevel.upper()
levels = {'DEBUG' : logging.DEBUG,
'INFO' : logging.INFO,
'WARNING' : logging.WARNING,
'ERROR' : logging.ERROR,
'CRITICAL' : logging.CRITICAL}
today = datetime.datetime.utcnow().strftime('%Y-%m-%d')
full_log_path = log_path + '%s.%s.log' % (loggerName, today)
logger = logging.getLogger(loggerName+'.'+today)
if not len(logger.handlers):
logger.setLevel(levels[logLevel])
fh = logging.FileHandler(full_log_path)
formatter = Formatter('%(asctime)s.%(msecs)03d | %(message)s', datefmt='%H:%M:%S')
fh.setFormatter(formatter)
logger.addHandler(fh)
ch = logging.StreamHandler()
ch.setLevel(logging.ERROR)
ch.setFormatter(formatter)
logger.addHandler(ch)
logger.propagate = False
logger.info('loggerName: %s' % loggerName)
return logger
def run(myLoggingLevel):
while True:
log = getLogger(loggerName='testLogLevel', logLevel=myLoggingLevel)
log.debug('I am in debug')
log.info('I am in info')
time.sleep(3)
run(myLoggingLevel='debug')
In order to change the logging level during the execution, you can delete your logger and create a new one based on the new specs in your configuration file. You can create your own watchdog for instance that tracks whether your configuration file has changed or not and update accordingly the logger level as suggested. Or you can create an event for that via threading.Event. You may want to have a look at the threading module.
Finally i did a function, run it on timeout or on event e.g. a button click to modify the logging level.
I have the following class:
class Log(object):
# class new
#new is used instead of init because __new__ is able to return (where __init__ can't)
def __new__(self, name, consolelevel, filelevel):
formatter = logging.Formatter('%(asctime)s %(levelname)s: %(name)s: %(message)s')
#Create consolehandler and set formatting (level is set in the ROOT)
consolehandler = StreamHandler()
consolehandler.setFormatter(formatter)
#Create filehandler, set level and set formatting
filehandler = FileHandler(name + '.log')
filehandler.setLevel(filelevel)
filehandler.setFormatter(formatter)
#Create the root logger, add console and file logger. Set the rootlevel == consolelevel.
self.root = logging.getLogger(name)
#causing me problems....
self.root.setLevel(consolelevel)
self.root.addHandler(consolehandler)
self.root.addHandler(filehandler)
self.root.propagate = True
return self.root
# Close the logger object
def close():
# to be implemented
pass
I use this class to log to the console and to a file (depending on the set level). The problem is that the root level seems to be leading for the addhandlers. Is there a way to disable this? Now I set the rootlevel to the same level as the consolelevel but this does not work...
Any advice?
Thanks in advance and with best regards,
JR
A problem that I can see in your code is that it will add more handlers whenever you instantiate the Log class. You probably do not want this.
Keep in mind that getLogger returns always the same instance when called with the same argument, and basically it implements the singleton pattern.
Hence when you later call addHandler it will add a new handler everytime.
The way to deal with logging is to create a logger at the module level and use it.
Also I'd avoid using __new__. In your case you can use a simple function. And note that your Log.close method wont work, because your __new__ method does not return a Log instance, and thus the returned logger doesn't have that method.
Regarding the level of the logger, I don't understand why you do not set the level on the consolehandler but on the whole logger.
This is a simplified version of the module I am making. The module contains a few classes that all need a logging functionality. Each class logs to a different file and and it should also be possible to change the file handler levels between classes (e.g. gamepad class: console.debug and filehandler.info and MQTT class: console info and filehandler.debug).
Therefor I thought that setting up a log class would be the easiest way. Please bear in mind that I usually do electronics but now combined with python. So my skills are pretty basic....
#!/bin/env python2.7
from future import division
from operator import *
import logging
from logging import FileHandler
from logging import StreamHandler
import pygame
import threading
from pygame.locals import *
import mosquitto
import time
from time import sleep
import sys
class ConsoleFileLogger(object):
# class constructor
def __init__(self, filename, loggername, rootlevel, consolelevel, filelevel):
# logger levels: DEBUG, INFO, WARNING, ERROR, CRITICAL
# define a root logger and set its ROOT logging level
logger = logging.getLogger(loggername)
logger.setLevel(rootlevel)
# define a Handler which writes messages or higher to the sys.stderr (Console)
self.console = logging.StreamHandler()
# set the logging level
self.console.setLevel(consolelevel)
# define a Handler which writes messages to a logfile
self.logfile = logging.FileHandler(filename + '.log')
# set the logging level
self.logfile.setLevel(filelevel)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s %(levelname)s: %(name)s: %(message)s')
self.console.setFormatter(formatter)
self.logfile.setFormatter(formatter)
# add the handlers to the root logger
logger.addHandler(self.console)
logger.addHandler(self.logfile)
self._logger = logger
# set a net instance of the logger
def set(self):
return self._logger
# Stop and remove the ConsoleFileLogger object
def remove(self):
self._logger.removeHandler(self.console)
self._logger.removeHandler(self.logfile)
self._logfile.FileHandler().close()
class Gamepad():
# class constructor
def __init__(self, mqttgamepad):
self.logger = ConsoleFileLogger('BaseLogFiles/Gamepad', 'Gamepad', logging.INFO, logging.INFO, logging.INFO).set()
if joystickcount == 0:
self.logger.error('No gamepad connected')
elif joystickcount == 1:
self.gamepad = pygame.joystick.Joystick(0)
self.gamepad.init()
self.logger.debug('Joystick name %s', self.gamepad.get_name())
self.logger.debug('nb of axes = %s', self.gamepad.get_numaxes())
self.logger.debug('nb of balls = %s', self.gamepad.get_numballs())
self.logger.debug('nb of buttons = %s', self.gamepad.get_numbuttons())
self.logger.debug('nb of mini joysticks = %s', self.gamepad.get_numhats())
elif joystickcount > 1:
self.logger.error('only one gamepad is allowed')
def run(self):
self.logger.debug('gamepad running')
class MQTTClient():
def __init__(self, clientname):
self.logger = ConsoleFileLogger('BaseLogFiles/MQTT/Pub', clientname, logging.DEBUG, logging.DEBUG, logging.DEBUG).set()
self.logger.debug('test')
def run(self):
self.logger.info('Connection MQTT Sub OK')
def main():
logger = ConsoleFileLogger('BaseLogFiles/logControlCenterMain', 'ControlCenterMain', logging.DEBUG, logging.DEBUG, logging.DEBUG).set()
mqttclient = MQTTClient("MQTTClient")
mqttclient.connect()
gamepad = Gamepad(mqttclient)
if gamepad.initialized():
gamepadthread = threading.Thread(target=gamepad.run)
gamepadthread.start()
mqtttpubhread = threading.Thread(target=mqttclient.run)
mqtttpubhread.start()
logger.info('BaseMain started')
# Monitor the running program for a KeyboardInterrupt exception
# If this is the case all threads and other methods can be closed the correct way :)
while 1:
try:
sleep(1)
except KeyboardInterrupt:
logger.info('Ctrl-C pressed')
gamepad.stop()
mqttclient.stop()
logger.info('BaseMain stopped')
sys.exit(0)
if name == 'main':
main()