I am relatively new to Python and I'm developing my first Python GUI (slowly). One of the third-party modules I want to use uses Python's logging framework. I would like their logs to go to a GtkTextView. I know where their logger variable is, so can call logger.addHandler.
How do I inherit from logging.Handler correctly?
My implementation based on the source of StreamHandler is
class GtkTextViewHandler(logging.Handler):
def __init__(self, tv):
logging.Handler.__init__(self)
self.tv = tv
self.tbf = tv.get_buffer()
self.formatter = None
def emit(self, record):
try:
msg = self.format(record)
fs = "%s\n"
self.tbf.insert(self.tbf.get_end_iter(), fs % msg)
self.tv.scroll_to_iter(self.tv.get_end_iter(), 0.0, False, 0, 0)
except:
self.handleError(record)
Related
I have a python program that is using pygame. I want to create another pygame window for some additional content and have a seperate script for that. I use socket and localhost for communication.
I am using subprocess to run the script that displays the second pygame window. This script has a number of logging messages that are not displayed on the stdout of the terminal I am using. Is there a way to redirect the logging messages so that they are printed to the console alongside the logging messages of the main program?
So far I have set up a logwrapper that captures the logged output:
level = logging.DEBUG
def getLogger(module_name):
wrapper = LogWrapper(module_name)
return wrapper.getLogger(),wrapper
class LogWrapper():
def __init__(self,module_name):
self.module_name = module_name
self.log_capture_string = None
self.log_trace = []
#property
def trace(self):
values = self.log_capture_string.getvalue()
self.log_trace = self.log_trace+values.split("\n")
return self.log_trace
def getLogger(self, **kwargs):
### Create the logger
logging.config.dictConfigClass(configuration).configure()
logger = logging.getLogger(self.module_name)
logger.setLevel(level)
### Setup the console handler with a FIFOIO object
self.log_capture_string = FIFOIO(32768)
ch = logging.StreamHandler(self.log_capture_string)
ch.setLevel(logging.DEBUG)
### Optionally add a formatter
### Add the console handler to the logger
logger.addHandler(ch)
logger.info("set up logwrap for {}".format(self.module_name))
return logger
class FIFOIO(io.TextIOBase):
def __init__(self, size, *args):
self.maxsize = size
io.TextIOBase.__init__(self, *args)
self.deque = collections.deque()
def getvalue(self):
return ''.join(self.deque)
def write(self, x):
self.deque.append(x)
self.shrink()
def shrink(self):
if self.maxsize is None:
return
size = sum(len(x) for x in self.deque)
while size > self.maxsize:
x = self.deque.popleft()
size -= len(x)
But after running the main program and calling subprogram.logwrapper.trace upon termination doesn't capture any of the error messages from the runtime, only the initialization message, so I am looking for a better way to access this information.
I'm new to python and trying to create wrapper over logging to reuse changes needed to modify formatting etc.
I've written my wrapper class in following way -
import logging
import sys
from datetime import datetime
class CustomLogger:
"""This is custom logger class"""
_format_spec = f"[%(name)-24s | %(asctime)s | %(levelname)s ] (%(filename)-32s : %(lineno)-4d) ==> %(message)s"
_date_format_spec = f"%Y-%m-%d # %I:%M:%S %p"
def __init__(self, name, level=logging.DEBUG, format_spec=None):
""""""
self.name = name
self.level = level
self.format_spec = format_spec if format_spec else CustomLogger._format_spec
# Complete logging configuration.
self.logger = self.get_logger(self.name, self.level)
def get_file_handler(self, name, level):
"""This is a method to get a file handler"""
today = datetime.now().strftime(format="%Y-%m-%d")
file_handler = logging.FileHandler("{}-{}.log".format(name, today))
file_handler.setLevel(level)
file_handler.setFormatter(logging.Formatter(self.format_spec,
datefmt=CustomLogger._date_format_spec))
return file_handler
def get_stream_handler(self, level):
"""This is a method to get a stream handler"""
stream_handler = logging.StreamHandler(sys.stdout)
stream_handler.setLevel(level)
stream_handler.setFormatter(logging.Formatter(self.format_spec,
datefmt=CustomLogger._date_format_spec))
return stream_handler
def get_logger(self, name, level):
"""This is a method to get a logger"""
logger = logging.getLogger(name)
logger.addHandler(self.get_file_handler(name, level))
# logger.addHandler(self.get_stream_handler(level))
return logger
def info(self, msg):
"""info message logger method"""
self.logger.info(msg)
def error(self, msg):
"""error message logger method"""
self.logger.error(msg)
def debug(self, msg):
"""debug message logger method"""
self.logger.debug(msg)
def warn(self, msg):
"""warning message logger method"""
self.logger.warn(msg)
def critical(self, msg):
"""critical message logger method"""
self.logger.critical(msg)
def exception(self, msg):
"""exception message logger method"""
self.logger.exception(msg)
But when I try to use my CustomLogger, nothing goes into the log file.
def main():
"""This main function"""
logger = CustomLogger(name="debug", level=logging.DEBUG)
logger.info("Called main")
if __name__ == "__main__":
main()
If I do similar thing without class/function wrapper, it works. Not sure where I'm going wrong. Any pointer will help.
Further update on the question
After making this (custom_logger.py) as a package and using in actual application (app.py), I'm noticing it always prints custom_logger.py as filename but not app.py.
How to fix this? I'm ok with rewriting the CustomLogger class if required.
I missed to do setLevel() for the logger. After doing that, problem is resolved. I also added pid for the file handler file-name to avoid any future issue with multi-process env.
Let me know if there's anything I can do better here wrt any other potential issues.
I've added logs to a Python 2 application using the logging module.
Now I want to add a closing statement at the end, dependent on the worst thing logged.
If the worst thing logged had the INFO level or lower, print "SUCCESS!"
If the worst thing logged had the WARNING level, write "SUCCESS!, with warnings. Please check the logs"
If the worst thing logged had the ERROR level, write "FAILURE".
Is there a way to get this information from the logger? Some built in method I'm missing, like logging.getWorseLevelLogSoFar?
My current plan is to replace all log calls (logging.info et al) with calls to wrapper functions in a class that also keeps track of that information.
I also considered somehow releasing the log file, reading and parsing it, then appending to it. This seems worse than my current plan.
Are there other options? This doesn't seem like a unique problem.
I'm using the root logger and would prefer to continue using it, but can change to a named logger if that's necessary for the solution.
As you said yourself, I think writing a wrapper function would be the neatest and fastest approach. The problem would be that you need a global variable, if you're not working within a class
global worst_log_lvl = logging.NOTSET
def write_log(logger, lvl, msg):
logger.log(lvl, msg)
if lvl > worst_log_lvl:
global worst_log_lvl
worst_log_lvl = lvl
or make worst_log_lvl a member of a custom class, where you emulate the signature of logging.logger, that you use instead of the actual logger
class CustomLoggerWrapper(object):
def __init__(self):
# setup of your custom logger
self.worst_log_lvl = logging.NOTSET
def debug(self):
pass
# repeat for other functions like info() etc.
As you're only using the root logger, you could attach a filter to it which keeps track of the level:
import argparse
import logging
import random
LEVELS = ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']
class LevelTrackingFilter(logging.Filter):
def __init__(self):
self.level = logging.NOTSET
def filter(self, record):
self.level = max(self.level, record.levelno)
return True
def main():
parser = argparse.ArgumentParser()
parser.add_argument('maxlevel', metavar='MAXLEVEL', default='WARNING',
choices=LEVELS,
nargs='?', help='Set maximum level to log')
options = parser.parse_args()
maxlevel = getattr(logging, options.maxlevel)
logger = logging.getLogger()
logger.addHandler(logging.NullHandler()) # needs Python 2.7
filt = LevelTrackingFilter()
logger.addFilter(filt)
for i in range(100):
level = getattr(logging, random.choice(LEVELS))
if level > maxlevel:
continue
logger.log(level, 'message')
if filt.level <= logging.INFO:
print('SUCCESS!')
elif filt.level == logging.WARNING:
print('SUCCESS, with warnings. Please check the logs.')
else:
print('FAILURE')
if __name__ == '__main__':
main()
There's a "good" way to get this done automatically by using context filters.
TL;DR I've built a package that has the following contextfilter baked in. You can install it with pip install ofunctions.logger_utils then use it with:
from ofunctions import logger_utils
logger = logger_utils.logger_get_logger(log_file='somepath', console=True)
logger.error("Oh no!")
logger.info("Anyway...")
# Now get the worst called loglevel (result is equivalent to logging.ERROR level in this case)
worst_level = logger_utils.get_worst_logger_level(logger)
Here's the long solution which explains what happens under the hood:
Let's built a contextfilter class that can be injected into logging:
class ContextFilterWorstLevel(logging.Filter):
"""
This class records the worst loglevel that was called by logger
Allows to change default logging output or record events
"""
def __init__(self):
self._worst_level = logging.INFO
if sys.version_info[0] < 3:
super(logging.Filter, self).__init__()
else:
super().__init__()
#property
def worst_level(self):
"""
Returns worst log level called
"""
return self._worst_level
#worst_level.setter
def worst_level(self, value):
# type: (int) -> None
if isinstance(value, int):
self._worst_level = value
def filter(self, record):
# type: (str) -> bool
"""
A filter can change the default log output
This one simply records the worst log level called
"""
# Examples
# record.msg = f'{record.msg}'.encode('ascii', errors='backslashreplace')
# When using this filter, something can be added to logging.Formatter like '%(something)s'
# record.something = 'value'
if record.levelno > self.worst_level:
self.worst_level = record.levelno
return True
Now inject this filter into you logger instance
logger = logging.getLogger()
logger.addFilter(ContextFilterWorstLevel())
logger.warning("One does not simply inject a filter into logging")
Now we can iter over present filters and extract the worst called loglevel like this:
for flt in logger.filters:
if isinstance(flt, ContextFilterWorstLevel):
print(flt.worst_level)
I use a LoggerAdapter to let my python logging output Linux TIDs instead of the long unique IDs. But this way I don't modify an existing logger but I create a new object:
new_logger = logging.LoggerAdapter(
logger=logging.getLogger('mylogger'),
extra=my_tid_extractor())
Now I want this LoggerAdapter be used by certain modules. As long as I know a global variable being used as logger I can do something like this:
somemodule.logger = new_logger
But this is not nice - it works only in a couple of cases and you need to know the logger variables used by the modules.
Do you know a way to make a LoggerAdapter available globally e.g. by calling s.th. like
logging.setLogger('mylogger', new_logger)
Or alternatively: is there some other way to let Python logging output Linux thread IDs like printed by ps?
Alternatively, you can to implement custom logger, and make it default in logging module.
Here is example:
import logging
import ctypes
SYS_gettid = 186
libc = ctypes.cdll.LoadLibrary('libc.so.6')
FORMAT = '%(asctime)-15s [thread=%(tid)s] %(message)s'
logging.basicConfig(level=logging.DEBUG, format=FORMAT)
def my_tid_extractor():
tid = libc.syscall(SYS_gettid)
return {'tid': tid}
class CustomLogger(logging.Logger):
def _log(self, level, msg, args, exc_info=None, extra=None):
if extra is None:
extra = my_tid_extractor()
super(CustomLogger, self)._log(level, msg, args, exc_info, extra)
logging.setLoggerClass(CustomLogger)
logger = logging.getLogger('test')
logger.debug('test')
Output sample:
2015-01-20 19:24:09,782 [thread=5017] test
I think you need override LoggerAdapter.process() method
Because the default LoggerAdapter.process method will do nothing, Here is example:
import logging
import random
L=logging.getLogger('name')
class myLogger(logging.LoggerAdapter):
def process(self,msg,kwargs):
return '(%d),%s' % (self.extra['name1'](1,1000),msg) ,kwargs
#put the randint function object
LA=myLogger(L,{'name1':random.randint})
#now,do some logging
LA.debug('some_loging_messsage')
out>>DEBUG:name:(167),some_loging_messsage
I had a similar problem. My solution might be a bit more generic than the accepted one.
I’ve also used a custom logger class, but I did a generic extension that allows me to register adapters after it’s instantiated.
class AdaptedLogger(logging.Logger):
"""A logger that allows you to register adapters on a instance."""
def __init__(self, name):
"""Create a new logger instance."""
super().__init__(name)
self.adapters = []
def _log(self, level, msg, *args, **kwargs):
"""Let adapters modify the message and keyword arguments."""
for adapter in self.adapters:
msg, kwargs = adapter.process(msg, kwargs)
return super()._log(level, msg, *args, **kwargs)
To make you logger use the class you have to instantiate it before it is used elsewhere. For example using:
original_class = logging.getLoggerClass()
logging.setLoggerClass(AdaptedLogger)
logcrm_logger = logging.getLogger("test")
logging.setLoggerClass(original_class)
Then you can register adapters on the instance at any time later on.
logger = logging.getLogger("test")
adapter = logging.LoggerAdapter(logger, extra=my_tid_extractor())
logger.adapters.append(adapter)
Actually the “adapters” can be any object now as long as they have a process-method with a signature compatible with logging.LoggingAdapter.process().
I have the following class:
class Log(object):
# class new
#new is used instead of init because __new__ is able to return (where __init__ can't)
def __new__(self, name, consolelevel, filelevel):
formatter = logging.Formatter('%(asctime)s %(levelname)s: %(name)s: %(message)s')
#Create consolehandler and set formatting (level is set in the ROOT)
consolehandler = StreamHandler()
consolehandler.setFormatter(formatter)
#Create filehandler, set level and set formatting
filehandler = FileHandler(name + '.log')
filehandler.setLevel(filelevel)
filehandler.setFormatter(formatter)
#Create the root logger, add console and file logger. Set the rootlevel == consolelevel.
self.root = logging.getLogger(name)
#causing me problems....
self.root.setLevel(consolelevel)
self.root.addHandler(consolehandler)
self.root.addHandler(filehandler)
self.root.propagate = True
return self.root
# Close the logger object
def close():
# to be implemented
pass
I use this class to log to the console and to a file (depending on the set level). The problem is that the root level seems to be leading for the addhandlers. Is there a way to disable this? Now I set the rootlevel to the same level as the consolelevel but this does not work...
Any advice?
Thanks in advance and with best regards,
JR
A problem that I can see in your code is that it will add more handlers whenever you instantiate the Log class. You probably do not want this.
Keep in mind that getLogger returns always the same instance when called with the same argument, and basically it implements the singleton pattern.
Hence when you later call addHandler it will add a new handler everytime.
The way to deal with logging is to create a logger at the module level and use it.
Also I'd avoid using __new__. In your case you can use a simple function. And note that your Log.close method wont work, because your __new__ method does not return a Log instance, and thus the returned logger doesn't have that method.
Regarding the level of the logger, I don't understand why you do not set the level on the consolehandler but on the whole logger.
This is a simplified version of the module I am making. The module contains a few classes that all need a logging functionality. Each class logs to a different file and and it should also be possible to change the file handler levels between classes (e.g. gamepad class: console.debug and filehandler.info and MQTT class: console info and filehandler.debug).
Therefor I thought that setting up a log class would be the easiest way. Please bear in mind that I usually do electronics but now combined with python. So my skills are pretty basic....
#!/bin/env python2.7
from future import division
from operator import *
import logging
from logging import FileHandler
from logging import StreamHandler
import pygame
import threading
from pygame.locals import *
import mosquitto
import time
from time import sleep
import sys
class ConsoleFileLogger(object):
# class constructor
def __init__(self, filename, loggername, rootlevel, consolelevel, filelevel):
# logger levels: DEBUG, INFO, WARNING, ERROR, CRITICAL
# define a root logger and set its ROOT logging level
logger = logging.getLogger(loggername)
logger.setLevel(rootlevel)
# define a Handler which writes messages or higher to the sys.stderr (Console)
self.console = logging.StreamHandler()
# set the logging level
self.console.setLevel(consolelevel)
# define a Handler which writes messages to a logfile
self.logfile = logging.FileHandler(filename + '.log')
# set the logging level
self.logfile.setLevel(filelevel)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s %(levelname)s: %(name)s: %(message)s')
self.console.setFormatter(formatter)
self.logfile.setFormatter(formatter)
# add the handlers to the root logger
logger.addHandler(self.console)
logger.addHandler(self.logfile)
self._logger = logger
# set a net instance of the logger
def set(self):
return self._logger
# Stop and remove the ConsoleFileLogger object
def remove(self):
self._logger.removeHandler(self.console)
self._logger.removeHandler(self.logfile)
self._logfile.FileHandler().close()
class Gamepad():
# class constructor
def __init__(self, mqttgamepad):
self.logger = ConsoleFileLogger('BaseLogFiles/Gamepad', 'Gamepad', logging.INFO, logging.INFO, logging.INFO).set()
if joystickcount == 0:
self.logger.error('No gamepad connected')
elif joystickcount == 1:
self.gamepad = pygame.joystick.Joystick(0)
self.gamepad.init()
self.logger.debug('Joystick name %s', self.gamepad.get_name())
self.logger.debug('nb of axes = %s', self.gamepad.get_numaxes())
self.logger.debug('nb of balls = %s', self.gamepad.get_numballs())
self.logger.debug('nb of buttons = %s', self.gamepad.get_numbuttons())
self.logger.debug('nb of mini joysticks = %s', self.gamepad.get_numhats())
elif joystickcount > 1:
self.logger.error('only one gamepad is allowed')
def run(self):
self.logger.debug('gamepad running')
class MQTTClient():
def __init__(self, clientname):
self.logger = ConsoleFileLogger('BaseLogFiles/MQTT/Pub', clientname, logging.DEBUG, logging.DEBUG, logging.DEBUG).set()
self.logger.debug('test')
def run(self):
self.logger.info('Connection MQTT Sub OK')
def main():
logger = ConsoleFileLogger('BaseLogFiles/logControlCenterMain', 'ControlCenterMain', logging.DEBUG, logging.DEBUG, logging.DEBUG).set()
mqttclient = MQTTClient("MQTTClient")
mqttclient.connect()
gamepad = Gamepad(mqttclient)
if gamepad.initialized():
gamepadthread = threading.Thread(target=gamepad.run)
gamepadthread.start()
mqtttpubhread = threading.Thread(target=mqttclient.run)
mqtttpubhread.start()
logger.info('BaseMain started')
# Monitor the running program for a KeyboardInterrupt exception
# If this is the case all threads and other methods can be closed the correct way :)
while 1:
try:
sleep(1)
except KeyboardInterrupt:
logger.info('Ctrl-C pressed')
gamepad.stop()
mqttclient.stop()
logger.info('BaseMain stopped')
sys.exit(0)
if name == 'main':
main()