Call a function everytime logging module in python is called - python

I would like to call a function everytime, I do any sort of logging using the logging module in python.
As can be seen below, instead of calling copyfiles at each instance of logging, I would like to see if there is way to injest copyfiles to the filehandler in someway or even a wrapper.
from sys import argv
import shutil
import logging
logger = logging.getLogger('')
sh = logging.StreamHandler(sys.stdout)
logfile = 'logs/log-run-{}.log'.format(date.today().strftime('%d-%m-%Y'))
fh = logging.FileHandler(logfile)
formatter = logging.Formatter('[%(asctime)s] %(levelname)s \
[%(filename)s.%(funcName)s:%(lineno)d] \
%(message)s', datefmt='%a, %d %b %Y %H:%M:%S')
logger.setLevel(logging.INFO)
sh.setFormatter(formatter)
fh.setFormatter(formatter)
logger.addHandler(sh)
logger.addHandler(fh)
LOGPATH = args[1]
def copyfiles():
"""
Copy files to azure blob storage
"""
fh.flush()
shutil.copy(logfile, LOGPATH)
logging.info('test')
copyfiles()
logging.info('foobar')
copyfiles()
I tried digging into invoking copyfiles each time logging is called, but I ended up no where.
In case you are wondering why I am copy files from logging, this is why.

Currently this is what I could think of:
Override the flush() of FileHandler by inheriting it into a utility
class like FlushCopyFileHandler class shown in the code below.
Instead of using the FileHandler use the FlushCopyFileHandler class
and all you have to do is call this overridden flush().
"""
from logging import FileHandler
from shutil
class FlushCopyFileHandler(FileHandler):
# These arguments are passed along to the parent class FileHandler.
def __init__(self, *args, **kwargs):
super(FlushCopyFileHandler, self).__init__(filename, *args, **kwargs)
self.copy_destination = "some default destination path" # can also be set from args.
# When this flush() is called it will call the flush() of FileHandler class.
# After that it will call the shutil.copy() method.
def flush(self):
super().flush()
shutil.copy(self.filename, self.copy_destination)
# Use this to change destination path later on.
def set_copy_path(self, destination):
self.copy_destination = destination
"""

Related

Always Reference Logger Object in Main

Is it possible for a library of code to obtain a reference to a logger object created by client code that uses a unique name?
The python advanced logging tutorial says the following:
A good convention to use when naming loggers is to use a module-level logger, in each module which uses logging, named as follows:
logger = logging.getLogger(__name__) This means that logger names track the package/module hierarchy, and it’s intuitively obvious where events are logged just from the logger name.
Every module in my library does:
LOG = logging.getLogger(__name__)
This works fine when client code does something like:
logger = logging.getLogger()
But breaks (I get no log output to the registered handlers from the logging object in main) when client code does something like:
logger = logging.getLogger('some.unique.path')
Because I am packaging my code as a library to be used with a lot of different clients, I want to have the most extensible logging. That is, I want my module level logging to reference the same logger object as main whenever possible, whether the client code is using a named logger or not.
Here is an example program to reproduce on your end. Imagine test.py is my library of code that I want to always reference any logger created in main.py.
Example Output
% python3 main.py
in main
%
Desired Output
% python3 main.py
in main
hi
%
main.py
import logging
from test import somefunc
LOG = logging.getLogger('some.unique.path')
LOG.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
LOG.addHandler(ch)
def main():
LOG.info('in main')
somefunc()
if __name__ == '__main__':
main()
test.py
import logging
LOG = logging.getLogger(__name__)
def somefunc():
LOG.info('hi')
This is the approach I would take.
Create a separate logging utility. I have attached the code for it below.
Now, import this logger utility (or) class (or) function that this provides, wherever needed.
Updated your code, assuming I am using the above linked logger utility and tested it locally on my machine.
Code for main.py
from logger import get_logger
from test import somefunc
LOG = get_logger()
def main():
LOG.info("in main")
somefunc()
if __name__ == "__main__":
main()
Code for test.py
from logger import get_logger
LOG = get_logger()
def somefunc():
LOG.info("hi")
Code for logger.py - Attaching the code here too
import logging
from logging.handlers import RotatingFileHandler
bytes_type = bytes
unicode_type = str
basestring_type = str
DEFAULT_LOGGER = "default"
INTERNAL_LOGGER_ATTR = "internal"
CUSTOM_LOGLEVEL = "customloglevel"
logger = None
_loglevel = logging.DEBUG
_logfile = None
_formatter = None
def get_logger(
name=None,
logfile=None,
level=logging.DEBUG,
maxBytes=0,
backupCount=0,
fileLoglevel=None,
):
_logger = logging.getLogger(name or __name__)
_logger.propagate = False
_logger.setLevel(level)
# Reconfigure existing handlers
has_stream_handler = False
for handler in list(_logger.handlers):
if isinstance(handler, logging.StreamHandler):
has_stream_handler = True
if isinstance(handler, logging.FileHandler) and hasattr(
handler, INTERNAL_LOGGER_ATTR
):
# Internal FileHandler needs to be removed and re-setup to be able
# to set a new logfile.
_logger.removeHandler(handler)
continue
# reconfigure handler
handler.setLevel(level)
if not has_stream_handler:
stream_handler = logging.StreamHandler()
setattr(stream_handler, INTERNAL_LOGGER_ATTR, True)
stream_handler.setLevel(level)
_logger.addHandler(stream_handler)
if logfile:
rotating_filehandler = RotatingFileHandler(
filename=logfile, maxBytes=maxBytes, backupCount=backupCount
)
setattr(rotating_filehandler, INTERNAL_LOGGER_ATTR, True)
rotating_filehandler.setLevel(fileLoglevel or level)
_logger.addHandler(rotating_filehandler)
return _logger
def setup_default_logger(
logfile=None, level=logging.DEBUG, formatter=None, maxBytes=0, backupCount=0
):
global logger
logger = get_logger(
name=DEFAULT_LOGGER, logfile=logfile, level=level, formatter=formatter
)
return logger
def reset_default_logger():
"""
Resets the internal default logger to the initial configuration
"""
global logger
global _loglevel
global _logfile
global _formatter
_loglevel = logging.DEBUG
_logfile = None
_formatter = None
logger = get_logger(name=DEFAULT_LOGGER, logfile=_logfile, level=_loglevel)
# Initially setup the default logger
reset_default_logger()
def loglevel(level=logging.DEBUG, update_custom_handlers=False):
"""
Set the minimum loglevel for the default logger
Reconfigures only the internal handlers of the default logger (eg. stream and logfile).
Update the loglevel for custom handlers by using `update_custom_handlers=True`.
:param int level: Minimum logging-level (default: `logging.DEBUG`).
:param bool update_custom_handlers: custom handlers to this logger; set `update_custom_handlers` to `True`
"""
logger.setLevel(level)
# Reconfigure existing internal handlers
for handler in list(logger.handlers):
if hasattr(handler, INTERNAL_LOGGER_ATTR) or update_custom_handlers:
# Don't update the loglevel if this handler uses a custom one
if hasattr(handler, CUSTOM_LOGLEVEL):
continue
# Update the loglevel for all default handlers
handler.setLevel(level)
global _loglevel
_loglevel = level
def formatter(formatter, update_custom_handlers=False):
"""
Set the formatter for all handlers of the default logger
:param Formatter formatter: default uses the internal LogFormatter.
:param bool update_custom_handlers: custom handlers to this logger - set ``update_custom_handlers`` to `True`
"""
for handler in list(logger.handlers):
if hasattr(handler, INTERNAL_LOGGER_ATTR) or update_custom_handlers:
handler.setFormatter(formatter)
global _formatter
_formatter = formatter
def logfile(
filename,
formatter=None,
mode="a",
maxBytes=0,
backupCount=0,
encoding=None,
loglevel=None,
):
"""
Function to handle the rotating fileHandler
:param filename: filename logs are being collected
:param mode: fileMode
:param maxBytes: values for roll-over at a pre-determined size; if zero; rollover never occurs
:param backupCount: if value is non-zero; system saves old logfiles; by appending extensions
:param encoding: set encoding option; if not None; open file with that encoding.
:param loglevel: loglevel set
"""
# Step 1: If an internal RotatingFileHandler already exists, remove it
for handler in list(logger.handlers):
if isinstance(handler, RotatingFileHandler) and hasattr(
handler, INTERNAL_LOGGER_ATTR
):
logger.removeHandler(handler)
# Step 2: If wanted, add the RotatingFileHandler now
if filename:
rotating_filehandler = RotatingFileHandler(
filename,
mode=mode,
maxBytes=maxBytes,
backupCount=backupCount,
encoding=encoding,
)
# Set internal attributes on this handler
setattr(rotating_filehandler, INTERNAL_LOGGER_ATTR, True)
if loglevel:
setattr(rotating_filehandler, CUSTOM_LOGLEVEL, True)
# Configure the handler and add it to the logger
rotating_filehandler.setLevel(loglevel or _loglevel)
logger.addHandler(rotating_filehandler)
Output:
in main
hi
Note:
Do dive deep into the Logger Utility linked to understand all the internal details.
Use logging.basicConfig instead of manually trying to configure a logger. Loggers inherit their configuration from their parent.
import logging
from test import somefunc
LOG = logging.getLogger('some.unique.path')
def main():
LOG.info('in main')
somefunc()
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
main()

Extending Logger Class - Base Class Method "getChild()" not behaving as expected

I enclose the code sample below. I extended the Logger class to try to make it more user-friendly for our purposes by adding a SumoLogic handler that can be easily utilised to send log messages to SumoLogic. This functionality works as expected and is not the issue. An abbreviated but functional form of my extended class can be seen below:
import sys
import logging
class CustomLogger(logging.getLoggerClass()):
def __init__(self, logger_name, log_level="INFO"):
MSG_FORMAT = '%(asctime)s %(levelname)s %(name)s: %(message)s'
DATETIME_FORMAT = '%Y-%m-%d %H:%M:%S'
formatter = logging.Formatter(MSG_FORMAT, DATETIME_FORMAT)
# Initialise logger instance.
super(CustomLogger, self).__init__(logger_name)
self.handlers = []
self.setLevel(log_level)
# Create the stdout handler
stream_handler = logging.StreamHandler(sys.stdout)
stream_handler.setLevel(log_level)
stream_handler.setFormatter(formatter)
self.addHandler(stream_handler)
self.debug('Initialised "{}" Logger'.format(logger_name))
def add_sumologic_handler(self, **kwargs):
"""
Call this method with a kwargs dictionary to add a Sumologic handler to your logger. This is done in the
following way:
... Comments removed ...
"""
self.debug(f"Attempting to add Sumologic Handler with the following endpoint settings:\n{kwargs}")
# ... SumoLogic handler addition code here...
def remove_sumologic_handler(self):
"""
Call this method to turn off Sumologic logging, if it has already been activated.
"""
self.debug(f"Attempting to remove Sumologic Handler.")
# ... SumoLogic handler removal code here...
I have found that when I call other methods from the base Logger class, they behave contrary to my expectations. By way of example, I often use the logger.getChild() method in script functions to identify the function that currently has execution focus, when writing log messages. When I call it, I expect it to take my logger and append the given child name as a postfix to my base logger name when writing log messages. However, that is not what is occurring. Instead, my new child logger is created, but with a base log level of WARNING, meaning it is not inheriting the log level of my parent logger.
Here is a test case that features the expected behaviour, that does not use my CustomLogger() class:
import sys
import logging
def initialise_logger(logger_name, log_level="INFO"):
MSG_FORMAT = '%(asctime)s %(levelname)s %(name)s: %(message)s'
DATETIME_FORMAT = '%Y-%m-%d %H:%M:%S'
formatter = logging.Formatter(MSG_FORMAT, DATETIME_FORMAT)
# Create the stdout handler
stream_handler = logging.StreamHandler(sys.stdout)
stream_handler.setLevel(log_level)
stream_handler.setFormatter(formatter)
# Create the logger
logger = logging.getLogger(logger_name)
logger.setLevel(log_level)
logger.addHandler(stream_handler)
return logger
logger1 = initialise_logger("expected.logger", "INFO")
logger1.info("Test Message 1")
logger1a = logger1.getChild("child")
logger1a.info("Expected log message - note logger name hierarchy and inherited log level of INFO.")
... and here is an example using my CustomLogger() class extension of Logger:
logger1 = CustomLogger("expected.logger", "INFO")
logger1.info("Test Message 1")
logger1a = logger1.getChild("child")
logger1a.info("Expected log message is not output by the child logger, meaning the parent log level has not been inherited by the getChild() call.")
logger1a.warning("Now a log message is output by the child logger.")
assert isinstance(logger1, CustomLogger), "logger1 is NOT an instance of CustomLogger"
assert isinstance(logger1a, CustomLogger), "logger1a is NOT an instance of CustomLogger"
assert isinstance(logger1a, logging.getLoggerClass()), "logger1a is NOT an instance of Logger"
The above shows that in the case of my CustomLogger class instance, a call to the base Logger class's getChild() function returns an instance of Logger rather than CustomLogger; contrary to my expectations of it inheriting the parent CustomLogger class instance, along with its settings.
Is anybody able to explain what is going wrong for me here, or provide a workaround, please?

Logging across multiple modules with always changing filenames

I have a logging function with hardcoded logfile name (LOG_FILE):
setup_logger.py
import logging
import sys
FORMATTER = logging.Formatter("%(levelname)s - %(asctime)s - %(name)s - %(message)s")
LOG_FILE = "my_app.log"
def get_console_handler():
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setFormatter(FORMATTER)
return console_handler
def get_file_handler():
file_handler = logging.FileHandler(LOG_FILE)
file_handler.setFormatter(FORMATTER)
return file_handler
def get_logger(logger_name):
logger = logging.getLogger(logger_name)
logger.setLevel(logging.DEBUG) # better to have too much log than not enough
logger.addHandler(get_console_handler())
logger.addHandler(get_file_handler())
# with this pattern, it's rarely necessary to propagate the error up to parent
logger.propagate = False
return logger
I use this in various modules this way:
main.py
from _Core import setup_logger as log
def main(incoming_feed_id: int, type: str) -> None:
logger = log.get_logger(__name__)
...rest of my code
database.py
from _Core import setup_logger as log
logger = log.get_logger(__name__)
Class Database:
...rest of my code
etl.py
import _Core.database as db
from _Core import setup_logger as log
logger = log.get_logger(__name__)
Class ETL:
...rest of my code
What I want to achieve is to always change the logfile's path and name on each run based on arguments passed to the main() function in main.py.
Simplified example:
If main() receives the following arguments: incoming_feed_id = 1, type = simple_load, the logfile's name should be 1simple_load.log.
I am not sure what is the best practice for this. What I came up with is probably the worst thing to do: Add a log_file parameter to the get_logger() function in setup_logger.py, so I can add a filename in main() in main.py. But in this case I would need to pass the parameters from main to the other modules as well, which I do not think I should do as for example the database class is not even used in main.py.
I don't know enough about your application to be sure this'll work for you, but you can just configure the root logger in main() by calling get_logger('', filename_based_on_cmdline_args), and stuff logged to the other loggers will be passed to the root logger's handlers for processing if the logger levels configured allow it. The way you're doing it now seems to open multiple handlers pointing to the same file, which seems sub-optimal. The other modules can just use logging.getLogger(__name__) rather than log.get_logger(__name__).

How can I implement logging in python for multiple modules

Env Summary:
Hi, I have a Python project, where I request some information from a storage system and display this on screen.
Env:
module executer
Module rest has a class RestThreePar
Module storagebox has a class StorageBox
Module locater has a class Locater
Problem:
I want to implement a single module/class for logging for the hole project, which needs to log stuff to 4 different files (each module has it's own log file). When I import logging separately for each module it works like expected but whenever I try to make a separate class and import that one (like from logger import Logger) it goes wrong (I am not sure but it looks like it writes one line multiple times in the files).
I am not a developer and don't know what to do next.
I have tried to use the python documentation but had no success.
https://docs.python.org/3.6/howto/logging-cookbook.html
import logging
import os
class Logger:
def __init__(self, cls_name, file):
self.cls_name = cls_name
current_dir = os.getcwd()
self.file = f'{current_dir}/logs/{file}'
def log(self):
logger = logging.getLogger(self.cls_name)
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s,%(levelname)s,%(name)s,%(message)s')
file_handler = logging.FileHandler(self.file)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
return logger
def log_info(self, msg):
logger = self.log()
logger.info(msg)
def log_exception(self, msg):
logger = self.log()
logger.exception(msg)
if __name__ == '__main__':
pass

logging error in python

I have been writing simple scripts and I am trying to use logger to generate log for each functions in the scripts.
1) based on the function name I create a logger filehandler and I try to put logs using that handler. I also delete the previous existing file with the same name.
3) at the end of the function I close the handler.
My problem are:
1)even though I close the handler, the next time I run the same function I get an error that the file I am trying to delete is (as a part of setting the logger file handler) is still being used.
2) Also the logger prints everything to console which I dont want, I just want it to write everything to the file.
Here are the logger functions:
def setLogger(path):
"""
#purpose: Intializes basic logging directory and file
"""
LOG_FILENAME = path + "\\" + "log.txt"
#logging.basicConfig(filename=LOG_FILENAME,
# format='%(levelname)s %(asctime)s %(message)s',level=logging.INFO
# )
logger = logging.getLogger()
logger.setLevel(logging.INFO)
file_handler = logging.FileHandler(LOG_FILENAME)
file_handler.setLevel(logging.INFO)
formatter = logging.Formatter("%(levelname)s %(asctime)s %(message)s")
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
return logger
def unsetLogger(logger):
"""
#purpose: performs a basic shutdown of logger
"""
logger.handlers[0].close()
logger.removeHandler(logger.handlers[0])
logging.shutdown
The way i use them is:
for eg:
def fun():
os.remove(path)
logger = setLogger(path)
` logging.info("hi") #this writes to file and prints on the console as well
unsetLogger(logger)
if I run the function fun() once, its all good. but if i run it again, I get that can't delete error for the log file.
Thanks in Advance.
learningNinja
After making some slight modifications, I came up with the following test to try to reproduce your error, but I don't get any errors.
import os
import logging
def setLogger(path):
"""
#purpose: Intializes basic logging directory and file
"""
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# Simplified log file path (I just use full value passed in, and don't append "\log.txt")
file_handler = logging.FileHandler(path)
file_handler.setLevel(logging.INFO)
formatter = logging.Formatter("%(levelname)s %(asctime)s %(message)s")
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
return logger
def unsetLogger(logger):
"""
#purpose: performs a basic shutdown of logger
"""
logger.handlers[0].close()
logger.removeHandler(logger.handlers[0])
logging.shutdown
def fun():
try:
# Was getting error trying to remove a file that didn't exist on
# first execution...
os.remove("log.txt")
except:
pass
logger = setLogger("log.txt")
logging.info("hi")
unsetLogger(logger)
fun()
fun()
fun()
See if there is anything I'm doing differently than your actual code and maybe that might help you.

Categories