How can I write different data into two different logs? - python

I created two separate logs with two different names (windows). One is a rotating log and the other not. How can I prevent the data that I write to the rotating log from logging to the non-rotating log?
I am not very log savvy.
importing module
import logging
from logging.handlers import RotatingFileHandler
def main():
def normalLogging(fn1):
global normalLogger
#Create and configure logger
logging.basicConfig(filename=fn1, format='%(asctime)s %(message)s', filemode='w')
#Creating an object
normalLogger=logging.getLogger()
#Setting the threshold of logger to DEBUG
normalLogger.setLevel(logging.DEBUG)
def rotatingLog(fn2):
global rotatingLogger
# start the roll-screen logger too
rotatingLogger = logging.getLogger(fn2)
rotatingLogger.setLevel(logging.DEBUG)
# add a rotating logger
handlerScreen = RotatingFileHandler(fn2, maxBytes=1000, backupCount=3)
rotatingLogger.addHandler(handlerScreen)
def normalTest():
#Test messages to the rotating log
normalLogger.debug("normalLogger.debug - Harmless debug Message")
normalLogger.info("normalLogger.info - Just an information")
normalLogger.warning("normalLogger.warning - Its a Warning")
normalLogger.error("normalLogger.error - Did you try to divide by zero")
normalLogger.critical("normalLogger.critical - Internet is down")
def rotatorTest():
for i in range(1, 100):
#Test messages to the rotating log
rotatingLogger.debug("rotatingLogger.debug %s - Harmless debug Message" % i)
rotatingLogger.info("rotatingLogger.info %s - Just an information" % i)
rotatingLogger.warning("rotatingLogger.warning %s - Its a Warning" % i)
rotatingLogger.error("rotatingLogger.error %s - Did you try to divide by zero" % i)
rotatingLogger.critical("rotatingLogger.critical %s - Internet is down" % i)
# fn2 = "rotatorLog"
rotatingLog("rotatorLog")
rotatorTest()
# fn1 = "normalLog"
normalLogging("normalLog")
normalTest()
rotatorTest()
if __name__ == '__main__':
main()
The rotating log maintains it's own unique data, but the normal log has data from the rotating log. I expected that the data would be unique to each log since I write to them separately, but that's not the case.

All you need to do is rotatingLogger.propagate = False to stop it's logs being sent to the root logger which is used for normalLogger. basicConfig configures the root logger and logging.getLogger without a name returns the root logger.

First: use name for normal logger - ie. logging.getLogger(fn1)
Second: use FileHandler instead of basicConfig for normal logger.
import logging
from logging.handlers import RotatingFileHandler
from logging import FileHandler
def main():
def normalLogging(fn1):
global normalLogger
global normalLogger
normalLogger = logging.getLogger(fn1)
normalLogger.setLevel(logging.DEBUG)
handlerScreen = FileHandler(fn1)
handlerScreen.setFormatter(logging.Formatter('%(asctime)s %(message)s'))
normalLogger.addHandler(handlerScreen)
def rotatingLog(fn2):
global rotatingLogger
rotatingLogger = logging.getLogger(fn2)
rotatingLogger.setLevel(logging.DEBUG)
handlerScreen = RotatingFileHandler(fn2, maxBytes=1000, backupCount=3)
handlerScreen.setFormatter(logging.Formatter('%(asctime)s %(message)s'))
rotatingLogger.addHandler(handlerScreen)
# ... rest ...
As I remember basicConfig creates logger which later is used by all loggers so they use the same settings.

Related

Create two separate log file from one handler with different levels

I have a big project, I want to add logging to it with the python logging module.
What I want is: Log all levels (Debug and above) in main.log, and other log file takes a certain level (Which I want is Critical and error) from main.log and log it in sub.log.
So main.log will log every level (Debug and above),
And sub.log log Critical and error levels only.
what is the best way to do that, What comes to my mind is:
Do a function that will walk all over the main.log and search if there is any (Critical or Error level) in line, copy it and paste it in sub.log (but I am afraid this will be not efficient since I have maybe hundreds of lines)
This is logger.py
# logging.py
import logging
from datetime import datetime
import time
FORMAT = '[%(levelname)s]: %(asctime)s : %(filename)s :%(lineno)s: %(funcName)s() =>: %(message)s'
TIME_FORMAT = '%Y-%m-%d %H:%M:%S'
PATH = '../../../scripts/utils/log_modules/logs/'
LOGFILE = f"logs_{datetime.now().strftime('%d%m%Y_%H%M%S')}.log"
def log(name, file_name=LOGFILE, level=logging.DEBUG):
file_name = PATH + file_name
logger = logging.getLogger(name)
logger.setLevel(level)
formatter = logging.Formatter(FORMAT, TIME_FORMAT)
file_h = logging.FileHandler(file_name)
file_h.setFormatter(formatter)
logger.addHandler(file_h)
return logger
# two files separate
and here I call the function and create an object, and start logging:
module2.py:
# From random module.py
from logger import *
add_log = log(__name__)
# **I don't** want to make other object from the function just one.
def foo():
# do something
add_log.info("done correctly") # This will be logged in Main log Only
add_log.erorr("There is an error") # This will be logged in Main.log And Sub.log TOO
if __name__ == '__main__':
foo()
You should add a second handler to your logger, and have the logging machinery to handle everything:
def log(name, file_name=LOGFILE, level=logging.DEBUG):
file_name = PATH + file_name
logger = logging.getLogger(name)
logger.setLevel(level)
formatter = logging.Formatter(FORMAT, TIME_FORMAT)
# build and add main handler
file_h = logging.FileHandler(file_name)
file_h.setFormatter(formatter)
logger.addHandler(file_h)
# build and add sub handler
file_h = logging.FileHandler('sub_' + file_name)
file_h.setFormatter(formatter)
file_h.set_level(logging.ERROR)
logger.addHandler(file_h)
return logger

Logging level to be changed in the middle of a scripts

I have an application which continues running, the application won't be stopped, it will idle when there are no instructions by a client, and listen to the external event and react if there is any instruction input by the user at anytime
There is a configuration file which can be changed while the application is running, and the logging level is one of them. myLoggingLevel is the param below.
Is there anyway to change the logging level while the application is running?
I am using ConfigParser.RawConfigParser() for the configuration change.
import time
import logging
import datetime
def getLogger(loggerName='myLoggerName', logLevel='INFO', log_path='C:/logs/'):
class Formatter(logging.Formatter):
def formatTime(self, record, datefmt=None):
return (datetime.datetime.utcnow()).strftime('%H:%M:%S')
logLevel = logLevel.upper()
levels = {'DEBUG' : logging.DEBUG,
'INFO' : logging.INFO,
'WARNING' : logging.WARNING,
'ERROR' : logging.ERROR,
'CRITICAL' : logging.CRITICAL}
today = datetime.datetime.utcnow().strftime('%Y-%m-%d')
full_log_path = log_path + '%s.%s.log' % (loggerName, today)
logger = logging.getLogger(loggerName+'.'+today)
if not len(logger.handlers):
logger.setLevel(levels[logLevel])
fh = logging.FileHandler(full_log_path)
formatter = Formatter('%(asctime)s.%(msecs)03d | %(message)s', datefmt='%H:%M:%S')
fh.setFormatter(formatter)
logger.addHandler(fh)
ch = logging.StreamHandler()
ch.setLevel(logging.ERROR)
ch.setFormatter(formatter)
logger.addHandler(ch)
logger.propagate = False
logger.info('loggerName: %s' % loggerName)
return logger
def run(myLoggingLevel):
while True:
log = getLogger(loggerName='testLogLevel', logLevel=myLoggingLevel)
log.debug('I am in debug')
log.info('I am in info')
time.sleep(3)
run(myLoggingLevel='debug')
In order to change the logging level during the execution, you can delete your logger and create a new one based on the new specs in your configuration file. You can create your own watchdog for instance that tracks whether your configuration file has changed or not and update accordingly the logger level as suggested. Or you can create an event for that via threading.Event. You may want to have a look at the threading module.
Finally i did a function, run it on timeout or on event e.g. a button click to modify the logging level.

pythonic way to setup logging with a module involved

I wrote a simple bot.py that defines a logger. The logger worked fine.
But then I needed to keepalive the bot.py script, so I wrote a keepalive.py that is called hourly and uses a sockets to identify when bot.py died, and restart it.
But I don't see the right/pythonic way to setup and share the logger between keepalive.py and bot.py.
I experimented with trying to pass the logger instance to bot.py when it's called from the keepalive.py, and otherwise creating it (if bot.py is run directly), but it would require me pass around the logger as a parameter to all the functions I call (example, the demo() function). This is gross.
Whats the best way to setup a logger once, and then use it everywhere else without doing too much passing around, etc?
bot.py
import logging
def demo():
#this won't work unless I pass in the logger as a parameter
#logger.info("demonstrate the Inner demo() called from Inner main works")
pass
def main(logger=None):
if logger is None:
# Enable logging
logging.basicConfig(
filename='log_experiment.txt',
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
logger = logging.getLogger(__name__)
logger.info("I created logger instance inside Inner Main()")
else:
logger.info("I am using the logger passed to Inner main() as a parameter")
demo()
if __name__ == "__main__":
main()
keepalive.py
import logging
import socket
import sys
# Enable logging
logging.basicConfig(
filename='log_experiment.txt',
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
logger = logging.getLogger(__name__)
logger.info("I created logger instance inside keepalive")
lock_socket = None # we want to keep the socket open until the very end of
# our script so we use a global variable to avoid going
# out of scope and being garbage-collected
def is_lock_free():
global lock_socket
lock_socket = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
try:
lock_id = "straits.keepalive" # this should be unique. using your username as a prefix is a convention
lock_socket.bind('\0' + lock_id)
logging.debug("Acquired lock %r" % (lock_id,))
return True
except socket.error:
# socket already locked, task must already be running
logging.info("Failed to acquire lock %r" % (lock_id,))
return False
if not is_lock_free():
print("alive")
sys.exit()
print("**RESTART REQUIRED")
# then, either include the rest of your script below,
# or import it, if it's in a separate file:
import inner as bot
bot.main(logger)

Python - Logger over multiple files

I created a module named log.py where a function defines how the log will be registered. Here is the atomic code:
import logging
import time
def set_up_log():
"""
Create a logging file.
"""
#
# Create the parent logger.
#
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
#
# Create a file as handler.
#
file_handler = logging.FileHandler('report\\activity.log')
file_handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(filename)s - %(name)s - % (levelname)4s - %(message)s')
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
#
# Start recording.
#
logger.info('______ STARTS RECORDING _______')
if __name__=='__main__':
set_up_log()
A second module named read_file.py is using this log.py to record potential error.
import logging
import log
log.set_up_log()
logger = logging.getLogger(__name__)
def read_bb_file(input_file):
"""
Input_file must be the path.
Open the source_name and read the content. Return the result.
"""
content = list()
logger.info('noi')
try:
file = open(input_file, 'r')
except IOError, e:
logger.error(e)
else:
for line in file:
str = line.rstrip('\n\r')
content.append(str)
file.close()
return content
if __name__ == "__main__":
logger.info("begin execution")
c = read_bb_file('textatraiter.out')
logger.info("end execution")
In the command prompt lauching read_file.py, I get this error:
No handlers could be found for logger "__main__"
My result in the file is the following
2014-05-12 13:32:58,690 - log.py - log - INFO - ______ STARTS RECORDING _______
I read lots of topics here and on Py Doc but it seems I did not understand them properly since I have this error.
I add I would like to keep the log settlement appart in a function and not define it explicitely in my main method.
You have 2 distinct loggers and you're only configuring one.
The first is the one you make in log.py and set up correctly. Its name however will be log, because you have imported this module from read_file.py.
The second logger, the one you're hoping is the same as the first, is the one you assign to the variable logger in read_file.py. Its name will be __main__ because you're calling this module from the command line. You're not configuring this logger.
What you could do is to add a parameter to set_up_log to pass the name of the logger in, e.g.
def set_up_log(logname):
logger = logging.getLogger(logname)
That way, you will set the handlers and formatters for the correct logging instance.
Organizing your logs in a hierarchy is the way logging was intended to be used by Vinay Sajip, the original author of the module. So your modules would only log to a logging instance with the fully qualified name, as given by __name__. Then your application code could set up the loggers, which is what you're trying to accomplish with your set_up_log function. You just need to remember to pass it the relevant name, that's all. I found this reference very useful at the time.

Nothing is written to file when I override `log_message()` in `SimpleHTTPRequestHandler`

I have the following code for a webserver. I have a problem that nothing is printed in httpd.log- and I'm clueless why. The file gets created and if I add a print()statement to log_message() it is printed out, but nothing ever is written to the file.
from http.server import HTTPServer
from http.server import SimpleHTTPRequestHandler
import logging
from logging.handlers import RotatingFileHandler
class MyHandler(SimpleHTTPRequestHandler):
def __init__(self, *args):
self.logger = logging.getLogger("httpd")
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh = RotatingFileHandler("httpd.log", mode='wa',
maxBytes=1 << 20, backupCount=2)
fh.setFormatter(formatter)
fh.setLevel(logging.INFO)
self.logger.addHandler(fh)
self.logger.info("Creating HTTP Request Handler")
super(SimpleHTTPRequestHandler, self).__init__(*args)
def log_message(self, format, *args):
self.logger.info("%s - - [%s] %s\n" %
(self.address_string(),
self.log_date_time_string(),
format%args))
def main():
server = HTTPServer(('', 80), MyHandler)
server.serve_forever()
if __name__ == '__main__':
main()
This is on Python 3.1.3
A logger can have more than one handler. Different handlers may have different log levels. The logger has its own log level which is unaffected by handlers and passes messages with appropriate log levels to the handlers to do with as they see fit.
So while your handler was interested in INFO level messages your logger was ignoring them.
The default log level of WARNING is inherited from the root logger.
I needed to call self.logger.setLevel(logging.INFO) in addition to calling fh.setLevel()
If someone explains why, I'll accept that answer :)
open(LOGFILE, "w+").write("%s - - [%s] %s\n" % (self.address_string(), self.log_date_time_string(), format%args))
This is something you may be interested.

Categories