Using python's logging module, I'd like to return non-zero if an error or critical was logged, or zero if no error or critical was logged. Do I have to add a layer to catch this, or is this native functionality?
import logging
def CheckMaxErr():
pass
logging.debug("Cat")
CheckMaxErr() #should return logging.DEBUG
logging.error("Dog")
CheckMaxErr() #should return logging.ERROR
You can Implement a subclass of a logging handler and set a flag attribute on the instance if it is ever called. You would set the level of the handler to the logging.ERROR level to have it trigger on those messages.
import logging
class MyHandler(logging.Handler):
def __init__(self):
super().__init__()
self.flag = False
def emit(self, record):
self.flag = True
logger = logging.getLogger(__name__)
handle = MyHandler()
handle.setLevel(logging.ERROR)
logger.addHandler(handle)
logger.debug("debug message")
print(handle.flag) # False
logger.error("ERROR")
print(handle.flag) # True
In this example, the handler doesn't actually do anything with the message, just sets the flag when it has been triggered. You can add another handler to log to wherever you want (console, file, etc.)
Related
I've added logs to a Python 2 application using the logging module.
Now I want to add a closing statement at the end, dependent on the worst thing logged.
If the worst thing logged had the INFO level or lower, print "SUCCESS!"
If the worst thing logged had the WARNING level, write "SUCCESS!, with warnings. Please check the logs"
If the worst thing logged had the ERROR level, write "FAILURE".
Is there a way to get this information from the logger? Some built in method I'm missing, like logging.getWorseLevelLogSoFar?
My current plan is to replace all log calls (logging.info et al) with calls to wrapper functions in a class that also keeps track of that information.
I also considered somehow releasing the log file, reading and parsing it, then appending to it. This seems worse than my current plan.
Are there other options? This doesn't seem like a unique problem.
I'm using the root logger and would prefer to continue using it, but can change to a named logger if that's necessary for the solution.
As you said yourself, I think writing a wrapper function would be the neatest and fastest approach. The problem would be that you need a global variable, if you're not working within a class
global worst_log_lvl = logging.NOTSET
def write_log(logger, lvl, msg):
logger.log(lvl, msg)
if lvl > worst_log_lvl:
global worst_log_lvl
worst_log_lvl = lvl
or make worst_log_lvl a member of a custom class, where you emulate the signature of logging.logger, that you use instead of the actual logger
class CustomLoggerWrapper(object):
def __init__(self):
# setup of your custom logger
self.worst_log_lvl = logging.NOTSET
def debug(self):
pass
# repeat for other functions like info() etc.
As you're only using the root logger, you could attach a filter to it which keeps track of the level:
import argparse
import logging
import random
LEVELS = ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']
class LevelTrackingFilter(logging.Filter):
def __init__(self):
self.level = logging.NOTSET
def filter(self, record):
self.level = max(self.level, record.levelno)
return True
def main():
parser = argparse.ArgumentParser()
parser.add_argument('maxlevel', metavar='MAXLEVEL', default='WARNING',
choices=LEVELS,
nargs='?', help='Set maximum level to log')
options = parser.parse_args()
maxlevel = getattr(logging, options.maxlevel)
logger = logging.getLogger()
logger.addHandler(logging.NullHandler()) # needs Python 2.7
filt = LevelTrackingFilter()
logger.addFilter(filt)
for i in range(100):
level = getattr(logging, random.choice(LEVELS))
if level > maxlevel:
continue
logger.log(level, 'message')
if filt.level <= logging.INFO:
print('SUCCESS!')
elif filt.level == logging.WARNING:
print('SUCCESS, with warnings. Please check the logs.')
else:
print('FAILURE')
if __name__ == '__main__':
main()
There's a "good" way to get this done automatically by using context filters.
TL;DR I've built a package that has the following contextfilter baked in. You can install it with pip install ofunctions.logger_utils then use it with:
from ofunctions import logger_utils
logger = logger_utils.logger_get_logger(log_file='somepath', console=True)
logger.error("Oh no!")
logger.info("Anyway...")
# Now get the worst called loglevel (result is equivalent to logging.ERROR level in this case)
worst_level = logger_utils.get_worst_logger_level(logger)
Here's the long solution which explains what happens under the hood:
Let's built a contextfilter class that can be injected into logging:
class ContextFilterWorstLevel(logging.Filter):
"""
This class records the worst loglevel that was called by logger
Allows to change default logging output or record events
"""
def __init__(self):
self._worst_level = logging.INFO
if sys.version_info[0] < 3:
super(logging.Filter, self).__init__()
else:
super().__init__()
#property
def worst_level(self):
"""
Returns worst log level called
"""
return self._worst_level
#worst_level.setter
def worst_level(self, value):
# type: (int) -> None
if isinstance(value, int):
self._worst_level = value
def filter(self, record):
# type: (str) -> bool
"""
A filter can change the default log output
This one simply records the worst log level called
"""
# Examples
# record.msg = f'{record.msg}'.encode('ascii', errors='backslashreplace')
# When using this filter, something can be added to logging.Formatter like '%(something)s'
# record.something = 'value'
if record.levelno > self.worst_level:
self.worst_level = record.levelno
return True
Now inject this filter into you logger instance
logger = logging.getLogger()
logger.addFilter(ContextFilterWorstLevel())
logger.warning("One does not simply inject a filter into logging")
Now we can iter over present filters and extract the worst called loglevel like this:
for flt in logger.filters:
if isinstance(flt, ContextFilterWorstLevel):
print(flt.worst_level)
I have a logger and a class DuplicateFilter that filters messages that already were logged once. I would like to include the time when the logging happened into my filter but when I try to access the property asctime I get: AttributeError: 'LogRecord' object has no attribute 'asctime'
Here a small example how I set up my logger:
import logging
import logging.handlers as log_handlers
def setup_logger(filename):
class DuplicateFilter(object):
def __init__(self):
self.msgs = set()
def filter(self, record):
if logger.level == 10:
return True
rv = True
try:
print(record.asctime)
msg = record.threadName + " " + record.msg
if msg in self.msgs:
rv = False
except Exception as e:
print(traceback.format_exc())
return rv
self.msgs.add(msg)
return rv
log_formatter = logging.Formatter("%(asctime)s [%(levelname)-5.5s] [%(threadName)-30.30s] %(message)s")
file_handler = log_handlers.TimedRotatingFileHandler(filename, encoding="UTF-8", when='W6', backupCount=12)
file_handler.setFormatter(log_formatter)
console_handler = logging.StreamHandler()
console_handler.setFormatter(log_formatter)
logger = logging.getLogger(filename)
logger.propagate = False
logger.addHandler(console_handler)
logger.addHandler(file_handler)
logger.setLevel(logging.INFO)
dup_filter = DuplicateFilter()
logger.addFilter(dup_filter)
return logger
log = setup_logger("test.log")
for i in range(3):
log.info("wow")
Now my records look like this: 2018-07-18 14:34:49,642 [INFO ] [MainThread ] wow They clearly have an asctime and I explicitly set the asctime property in the constructor of my Formatter. The only question similar to mine I found says
To have message and asctime set, you must first call self.format(record) inside the emit method
but doesn't the logging.Formatter do that when you specify the log string the way I did with %(asctime)s?
EDIT: running.t was right, I just didn't understand what the documentation meant. I solved it by adding my formater to my filter and calling the format function at the beginning:
def __init__(self, formatter):
self.msgs = {}
self.formatter = formatter
def filter(self, record):
self.formatter.format(record)
In filter objects section of pyton logging module documentation I found following note:
Note that filters attached to handlers are consulted before an event is emitted by the handler, whereas filters attached to loggers are consulted whenever an event is logged (using debug(), info(), etc.), before sending an event to handlers. This means that events which have been generated by descendant loggers will not be filtered by a logger’s filter setting, unless the filter has also been applied to those descendant loggers.
Your filter is added to logger, while formatters are added to handlers. So in my opinion your filter method is applied before any of formatter you specified.
BTW, shouldn't your DuplicateFilter inherit from logging.Filter?
I´m just starting to learn Python and have encountered a Problem that I´m not able to solve.
I want to redirect every level above CRITICAL to sys.stderr and everything above WARNING to sys.stdout.
I came up with this script...
import logging
import sys
print("imported module {}".format(__name__))
class PyLogger(logging.Logger):
"""Wrapper for logging.Logger to redirect its message to
sys.stdout or sys.stderr accordingly """
def __init__(self, *args):
super(PyLogger, self).__init__(self, *args)
# get Logger
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
# build Formatter
formatter = logging.Formatter(fmt="%(asctime)s:%(name)s %(message)s")
# build StreamHandler for sys.stderr
error = logging.StreamHandler(stream=sys.stderr)
error.setLevel(logging.CRITICAL)
error.setFormatter(formatter)
logger.addHandler(error)
# build StreamHandler for sys.stdin
out = logging.StreamHandler(stream=sys.stdout)
out.setFormatter(formatter)
out.setLevel(logging.WARNING)
logger.addHandler(out)
def main():
logger = PyLogger()
# help(logger)
logger.info("INFO")
if __name__ == "__main__":
main()
When running this scrip directly I get the following error:
No handlers could be found for logger "<__main__.PyLogger object at 0x105f23c50>"
I´ve googled around and many people said that a logging.basicConfig() would do the job but that didn´t worked for me.
Maybe someone of you guys could help me out.
Thanks!
Your class subclasses logging.Logger, so you should not call getLogger or manipulate a logger as an attribute. Rather, the logger is self inside the class, and should be adjusted directly:
import logging
import sys
print("imported module {}".format(__name__))
class PyLogger(logging.Logger):
"""Wrapper for logging.Logger to redirect its message to
sys.stdout or sys.stderr accordingly """
def __init__(self, *args):
super(PyLogger, self).__init__(self, *args)
#####
# self *is* the logger!
self.setLevel(logging.DEBUG)
# build Formatter
formatter = logging.Formatter(fmt="%(asctime)s:%(name)s %(message)s")
# build StreamHandler for sys.stderr
error = logging.StreamHandler(stream=sys.stderr)
error.setLevel(logging.CRITICAL)
error.setFormatter(formatter)
#####
# Assign the handler to self
self.addHandler(error)
# build StreamHandler for sys.stdin
out = logging.StreamHandler(stream=sys.stdout)
out.setFormatter(formatter)
out.setLevel(logging.WARNING)
#####
# Assign the handler to self
self.addHandler(out)
def main():
logger = PyLogger()
# help(logger)
logger.info("INFO")
logger.warning("WARN")
logger.critical("CRIT")
if __name__ == "__main__":
main()
This displays the following, as expected:
ely#eschaton:~/programming$ python test_logger.py
imported module __main__
2018-03-01 11:59:41,896:<__main__.PyLogger object at 0x7fa236aa4a50> WARN
2018-03-01 11:59:41,896:<__main__.PyLogger object at 0x7fa236aa4a50> CRIT
2018-03-01 11:59:41,896:<__main__.PyLogger object at 0x7fa236aa4a50> CRIT
Notice how the critical message trips two different output handlers, so it appears twice (once because it satisfied warning level, once for critical level).
In your original code, notice that you are creating a variable called logger inside of __init__, but this not assigned to self or anything. This variable gets destroyed when it goes out of scope of the __init__ function, and so the assignment of any handlers is meaningless. Plus, because handlers weren't assigned to self, but the object that self is referencing is the logger that will be called later on, that is why you see the error about no handlers.
A library that I use emits warnings and errors through the logging module (logging.Logger's warn() and error() methods). I would like to implement an option to turn the warnings into errors (i.e., fail on warnings).
Is there an easy way to achieve this?
From looking at the documentation, I cannot see a ready-made solution. I assume it is possible by adding a custom Handler object, but I am not sure how to do it "right". Any pointers?
#hoefling's answer is close, but I would change it like so:
class LevelRaiser(logging.Filter):
def filter(self, record):
if record.levelno == logging.WARNING:
record.levelno = logging.ERROR
record.levelname = logging.getLevelName(logging.ERROR)
return True
def configure_library_logging():
library_root_logger = logging.getLogger(library.__name__)
library_root_logger.addFilter(LevelRaiser())
The reason is that filters are used to change LogRecord attributes and filter stuff out, whereas handlers are used to do I/O. What you're trying to do here isn't I/O, and so doesn't really belong in a handler.
Update: I like the proposal of Vinay made in this answer, injecting a custom Filter instead of a Handler is a much cleaner way. Please check it out!
You are on the right track with implementing own Handler. This is pretty easy to implement. I would do it like that: write a handler that edits the LogRecord in-place and attach one handler instance to the library's root loggers. Example:
# library.py
import logging
_LOGGER = logging.getLogger(__name__)
def library_stuff():
_LOGGER.warning('library stuff')
This is a script that uses the library:
import logging
import library
class LevelRaiser(logging.Handler):
def emit(self, record: logging.LogRecord):
if record.levelno == logging.WARNING:
record.levelno = logging.ERROR
record.levelname = logging.getLevelName(logging.ERROR)
def configure_library_logging():
library_root_logger = logging.getLogger(library.__name__)
library_root_logger.addHandler(LevelRaiser())
if __name__ == '__main__':
# do some example global logging config
logging.basicConfig(level=logging.INFO)
# additional configuration for the library logging
configure_library_logging()
# play with different loggers
our_logger = logging.getLogger(__name__)
root_logger = logging.getLogger()
root_logger.warning('spam')
our_logger.warning('eggs')
library.library_stuff()
root_logger.warning('foo')
our_logger.warning('bar')
library.library_stuff()
Run the script:
WARNING:root:spam
WARNING:__main__:eggs
ERROR:library:library stuff
WARNING:root:foo
WARNING:__main__:bar
ERROR:library:library stuff
Note that warning level is elevated to error level only on library's logging prints, all the rest remains unchanged.
You can assign logging.warn to logging.error before calling methods from your library:
import logging
warn_log_original = logging.warn
logging.warn = logging.error
library_call()
logging.warn = warn_log_original
I have the following class:
class Log(object):
# class new
#new is used instead of init because __new__ is able to return (where __init__ can't)
def __new__(self, name, consolelevel, filelevel):
formatter = logging.Formatter('%(asctime)s %(levelname)s: %(name)s: %(message)s')
#Create consolehandler and set formatting (level is set in the ROOT)
consolehandler = StreamHandler()
consolehandler.setFormatter(formatter)
#Create filehandler, set level and set formatting
filehandler = FileHandler(name + '.log')
filehandler.setLevel(filelevel)
filehandler.setFormatter(formatter)
#Create the root logger, add console and file logger. Set the rootlevel == consolelevel.
self.root = logging.getLogger(name)
#causing me problems....
self.root.setLevel(consolelevel)
self.root.addHandler(consolehandler)
self.root.addHandler(filehandler)
self.root.propagate = True
return self.root
# Close the logger object
def close():
# to be implemented
pass
I use this class to log to the console and to a file (depending on the set level). The problem is that the root level seems to be leading for the addhandlers. Is there a way to disable this? Now I set the rootlevel to the same level as the consolelevel but this does not work...
Any advice?
Thanks in advance and with best regards,
JR
A problem that I can see in your code is that it will add more handlers whenever you instantiate the Log class. You probably do not want this.
Keep in mind that getLogger returns always the same instance when called with the same argument, and basically it implements the singleton pattern.
Hence when you later call addHandler it will add a new handler everytime.
The way to deal with logging is to create a logger at the module level and use it.
Also I'd avoid using __new__. In your case you can use a simple function. And note that your Log.close method wont work, because your __new__ method does not return a Log instance, and thus the returned logger doesn't have that method.
Regarding the level of the logger, I don't understand why you do not set the level on the consolehandler but on the whole logger.
This is a simplified version of the module I am making. The module contains a few classes that all need a logging functionality. Each class logs to a different file and and it should also be possible to change the file handler levels between classes (e.g. gamepad class: console.debug and filehandler.info and MQTT class: console info and filehandler.debug).
Therefor I thought that setting up a log class would be the easiest way. Please bear in mind that I usually do electronics but now combined with python. So my skills are pretty basic....
#!/bin/env python2.7
from future import division
from operator import *
import logging
from logging import FileHandler
from logging import StreamHandler
import pygame
import threading
from pygame.locals import *
import mosquitto
import time
from time import sleep
import sys
class ConsoleFileLogger(object):
# class constructor
def __init__(self, filename, loggername, rootlevel, consolelevel, filelevel):
# logger levels: DEBUG, INFO, WARNING, ERROR, CRITICAL
# define a root logger and set its ROOT logging level
logger = logging.getLogger(loggername)
logger.setLevel(rootlevel)
# define a Handler which writes messages or higher to the sys.stderr (Console)
self.console = logging.StreamHandler()
# set the logging level
self.console.setLevel(consolelevel)
# define a Handler which writes messages to a logfile
self.logfile = logging.FileHandler(filename + '.log')
# set the logging level
self.logfile.setLevel(filelevel)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s %(levelname)s: %(name)s: %(message)s')
self.console.setFormatter(formatter)
self.logfile.setFormatter(formatter)
# add the handlers to the root logger
logger.addHandler(self.console)
logger.addHandler(self.logfile)
self._logger = logger
# set a net instance of the logger
def set(self):
return self._logger
# Stop and remove the ConsoleFileLogger object
def remove(self):
self._logger.removeHandler(self.console)
self._logger.removeHandler(self.logfile)
self._logfile.FileHandler().close()
class Gamepad():
# class constructor
def __init__(self, mqttgamepad):
self.logger = ConsoleFileLogger('BaseLogFiles/Gamepad', 'Gamepad', logging.INFO, logging.INFO, logging.INFO).set()
if joystickcount == 0:
self.logger.error('No gamepad connected')
elif joystickcount == 1:
self.gamepad = pygame.joystick.Joystick(0)
self.gamepad.init()
self.logger.debug('Joystick name %s', self.gamepad.get_name())
self.logger.debug('nb of axes = %s', self.gamepad.get_numaxes())
self.logger.debug('nb of balls = %s', self.gamepad.get_numballs())
self.logger.debug('nb of buttons = %s', self.gamepad.get_numbuttons())
self.logger.debug('nb of mini joysticks = %s', self.gamepad.get_numhats())
elif joystickcount > 1:
self.logger.error('only one gamepad is allowed')
def run(self):
self.logger.debug('gamepad running')
class MQTTClient():
def __init__(self, clientname):
self.logger = ConsoleFileLogger('BaseLogFiles/MQTT/Pub', clientname, logging.DEBUG, logging.DEBUG, logging.DEBUG).set()
self.logger.debug('test')
def run(self):
self.logger.info('Connection MQTT Sub OK')
def main():
logger = ConsoleFileLogger('BaseLogFiles/logControlCenterMain', 'ControlCenterMain', logging.DEBUG, logging.DEBUG, logging.DEBUG).set()
mqttclient = MQTTClient("MQTTClient")
mqttclient.connect()
gamepad = Gamepad(mqttclient)
if gamepad.initialized():
gamepadthread = threading.Thread(target=gamepad.run)
gamepadthread.start()
mqtttpubhread = threading.Thread(target=mqttclient.run)
mqtttpubhread.start()
logger.info('BaseMain started')
# Monitor the running program for a KeyboardInterrupt exception
# If this is the case all threads and other methods can be closed the correct way :)
while 1:
try:
sleep(1)
except KeyboardInterrupt:
logger.info('Ctrl-C pressed')
gamepad.stop()
mqttclient.stop()
logger.info('BaseMain stopped')
sys.exit(0)
if name == 'main':
main()