import logging function from a module - python

I'm trying to import a function which initializes two different logging handlers with different levels. The problem is that for option 1 below, I'm getting the root logger, and for option 2, I can't get any logs to print to screen.
Does anybody have any thoughts or suggestions that might help?
Option 1
TestModule:
def set_logger(
app_name=argv[0][:-3]):
logging.basicConfig(
level=logging.DEBUG,
format='[%(levelname)s][%(module)s][%(asctime)s] - %(message)s',
filename="test.log"
)
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('[%(levelname)s][%(module)s][%(asctime)s] - %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
Option 2 TestModule:
def set_logger(
app_name=argv[0][:-3]):
formatter = logging.Formatter('[%(levelname)s][%(module)s][%(asctime)s] - %(message)s')
logger = logging.getLogger(app_name)
stream_log = logging.StreamHandler()
stream_log.setLevel(logging.INFO)
stream_log.setFormatter(formatter)
file_log = logging.FileHandler("test.log")
file_log.setLevel(logging.DEBUG)
file_log.setFormatter(formatter)
logger.addHandler(stream_log)
logger.addHandler(file_log)
in Script:
from Module import set_logger
import logging
if __name__ == "__main__"
set_logger()
logging.info("start_app")
What am I missing here?

In option 2, you are initializing a logger via logging.getLogger()
You need to return the logger in that function and call it using the returned logger,
if __name__ == "__main__"
logger = set_logger()
logger.info("start_app")

Related

Separate local logger with root log python

I am using logging module in python. In my main.py file I am using two logger.
Root logger (To get logs from multiple modules in same directory)
Local logger (To log specific information)
I want information of local logger to be separate from root logger. But when I am creating separate logger. Information of local logger is also present in root logger info.
Here is the sample of how I am doing this
# main.py
import logging
def setup_logger(filename, name = ''):
if name == '':
logging.basicConfig(filename=filename,
format='%(asctime)s %(funcName)s %(levelname)s %(message)s',
filemode='a')
logger = logging.getLogger()
else:
"""
handler = logging.FileHandler(filename, mode = 'a')
handler.setFormatter(logging.Formatter('%(asctime)s %(funcName)s %(levelname)s %(message)s'))
logger = logging.getLogger(name)
logger.addHandler(handler)
"""
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
handler = logging.FileHandler(filename)
handler.setFormatter(formatter)
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
logger.addHandler(handler)
return logger
logger.setLevel(logging.DEBUG)
return logger
logger = setup_logger('main.log')
local_logger = setup_logger('local_log.log', 'local_log')
# other file under root log
logger = logging.getLogger("__main__." + __name__)
You have to stop propagation if you don't want the local loggers to send their logs to the root loggers handlers:
logger.propagate = False
This part of the documentation explains it well: https://docs.python.org/3/howto/logging.html#logging-flow

Python Custom logging handler to create each log file per key in the dictionary

I have a dictionary with some sample data like below
{"screener": "media", "anchor": "media","reader": "media"}
and I wanted to create a log file for each key in the dictionary. I'm planning to use this logging in streaming job which will get reused for every batch in the streaming. here I'm planning to use the rotating file handler per key as well.
here is my snippet
import logging
from logging.handlers import RotatingFileHandler
import time
logger = logging.getLogger('my_logger')
logger.setLevel(logging.DEBUG)
dict_store = {"screener": "media", "anchor": "media", "reader": "media"}
dict_log_handler = {}
def createFileHandler(name):
handler = RotatingFileHandler(name, maxBytes=2000000000, backupCount=10)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
return handler
def runBatch():
print("logging batch")
for name in dict_store.keys():
print(name)
if name in dict_log_handler:
print(f"getting logger from dict_handler {name}")
handler = dict_log_handler[name]
else:
handler = createFileHandler(name)
dict_log_handler[name] = handler
logger.addHandler(handler)
logger.info('Hello, world!')
logger.info('Hello, world!')
logger.info('Hello, world!')
logger.info('Hello, world!')
logger.info('Hello, world!')
logger.info('Hello, world!')
logger.removeHandler(handler)
time.sleep(0.1)
for i in range(0, 3):
runBatch()
It is working as expected currently. I'm just thinking of implementing similar stuff inside the overriding or creating a custom handler (like if we pass a name, it should automatically do this stuff) and the overall expectation is it should not affect the performance.
Question is, I wanted to wrap this in a class and using it directly. possible ?
You question is not clear what exactly you want to do, but if the idea is to use multiple loggers as your code shows then you can do something like this:
logging.getLogger(name) this is the method which is used to access the specific logger, in your code you are using the same logger but using addHandler and removeHandler to switch to specific logger.
You can create multiple loggers like this:
import logging
from logging.handlers import RotatingFileHandler
dict_store = {"screener": "media", "anchor": "media", "reader": "media"}
for name in dict_store:
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
handler = RotatingFileHandler(name, maxBytes=2000000000, backupCount=10)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
You can wrap the above code in your own logging class/method and use it as needed. Keep in mind this needs to called only once.
Next time you can access the specific log and use the logging method:
logger = logging.getLogger(<logger_name>)
logger.debug("debug")
logger.info("info")
logger.warning("warning")
logger.error("error")
logger.critical("critical")

Python Logging - How to inherit root logger level & handler

I am a python newbie, trying to implement logging into my code. I have two modules
main.py
submodule.py
main.py
import logging
from logging.handlers import RotatingFileHandler
import submodule
import logging
from logging.handlers import RotatingFileHandler
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
fh = RotatingFileHandler('master.log', maxBytes=2000000, backupCount=10)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
logger.addHandler(fh)
logger.debug('DEBUG LEVEL - MAIN MODULE')
logger.info('INFO LEVEL - MAIN MODULE')
submodule.loggerCall()
submodule.py
import logging
from logging.handlers import RotatingFileHandler
def loggerCall():
logger = logging.getLogger(__name__)
# logger.setLevel(logging.DEBUG)
fh = RotatingFileHandler('master.log', maxBytes=2000000, backupCount=10)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
logger.addHandler(fh)
logger.debug('SUBMODULE: DEBUG LOGGING MODE : ')
logger.info('Submodule: INFO LOG')
return
I thought as longs as I call the getLogger from my submodule, it should inherit the log level & handler details from root logger. However, in my case, I have to specify log level and handler again in submodule to get them print to same log file.
Also, If I have lots of methods, and classes inside my submodule. How can I go about it without having to define my log level & handler again.
Idea is to have a single log file with main, and sub modules printing in the same log based on the log level set in the main module.
The problem here is that you're not initializing the root logger; you're initializing the logger for your main module.
Try this for main.py:
import logging
from logging.handlers import RotatingFileHandler
import submodule
logger = logging.getLogger() # Gets the root logger
logger.setLevel(logging.DEBUG)
fh = RotatingFileHandler('master.log', maxBytes=2000000, backupCount=10)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
logger.addHandler(fh)
logger.debug('DEBUG LEVEL - MAIN MODULE')
logger.info('INFO LEVEL - MAIN MODULE')
submodule.loggerCall()
Then try this for submodule.py:
def loggerCall():
logger = logging.getLogger(__name__)
logger.debug('SUBMODULE: DEBUG LOGGING MODE : ')
logger.info('Submodule: INFO LOG')
return
Since you said you wanted to send log messages from all your submodules to the same place, you should initialize the root logger and then simply use the message logging methods (along with setlevel() calls, as appropriate). Because there's no explicit handler for your submodule, logging.getLogger(__name__) will traverse the tree to the root, where it will find the handler you established in main.py.

Logging to two files with different settings

I am already using a basic logging config where all messages across all modules are stored in a single file. However, I need a more complex solution now:
Two files: the first remains the same.
The second file should have some custom format.
I have been reading the docs for the module, bu they are very complex for me at the moment. Loggers, handlers...
So, in short:
How to log to two files in Python 3, ie:
import logging
# ...
logging.file1.info('Write this to file 1')
logging.file2.info('Write this to file 2')
You can do something like this:
import logging
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
def setup_logger(name, log_file, level=logging.INFO):
"""To setup as many loggers as you want"""
handler = logging.FileHandler(log_file)
handler.setFormatter(formatter)
logger = logging.getLogger(name)
logger.setLevel(level)
logger.addHandler(handler)
return logger
# first file logger
logger = setup_logger('first_logger', 'first_logfile.log')
logger.info('This is just info message')
# second file logger
super_logger = setup_logger('second_logger', 'second_logfile.log')
super_logger.error('This is an error message')
def another_method():
# using logger defined above also works here
logger.info('Inside method')
def setup_logger(logger_name, log_file, level=logging.INFO):
l = logging.getLogger(logger_name)
formatter = logging.Formatter('%(message)s')
fileHandler = logging.FileHandler(log_file, mode='w')
fileHandler.setFormatter(formatter)
streamHandler = logging.StreamHandler()
streamHandler.setFormatter(formatter)
l.setLevel(level)
l.addHandler(fileHandler)
l.addHandler(streamHandler)
setup_logger('log1', txtName+"txt")
setup_logger('log2', txtName+"small.txt")
logger_1 = logging.getLogger('log1')
logger_2 = logging.getLogger('log2')
logger_1.info('111messasage 1')
logger_2.info('222ersaror foo')

Python TimedRotatingFileHandler logs to a file and stderr

I am trying to include simple logging into my application using TimedRotatingFileHandler. However I get the output both into the designated file and into the standard error. I reduced the problem to a small example:
import logging, logging.handlers
import sys
logging.basicConfig(format='%(asctime)s %(message)s')
loghandler = logging.handlers.TimedRotatingFileHandler("logfile",when="midnight")
logger = logging.getLogger()
logger.setLevel(logging.INFO)
logger.addHandler(loghandler)
for k in range(5):
logger.info("Line %d" % k)
I get 5 log lines both in my 'logfile' and this program's stderr. What am I doing wrong?
This is the way you can have the print just on the log files and not to stdout/stderr:
import logging
from logging.handlers import TimedRotatingFileHandler
logHandler = TimedRotatingFileHandler("logfile",when="midnight")
logFormatter = logging.Formatter('%(asctime)s %(message)s')
logHandler.setFormatter( logFormatter )
logger = logging.getLogger( 'MyLogger' )
logger.addHandler( logHandler )
logger.setLevel( logging.INFO )
for k in range(5):
logger.info("Line %d" % k)
logging.basicConfig sets up a handler that prints to standard error.
logger.addHandler(loghandler) sets up a TimedRotatingFileHandler.
Do you wish to squelch output to standard error?
If so, simply remove the call tologging.basicConfig.

Categories