Python logging command does not work at real time - python

I have a logging widget in Tkinter ( ScrolledText ) with a TextHandler class that handle logs and print theme in widget
class TextHandler(logging.Handler):
def __init__(self, text):
# run the regular Handler __init__
logging.Handler.__init__(self)
# Store a reference to the Text it will log to
self.text = text
def emit(self, record):
msg = self.format(record)
def append():
self.text.configure(state='normal')
self.text.insert(Tkinter.END, msg + '\n')
self.text.configure(state='disabled')
# Autoscroll to the bottom
self.text.yview(Tkinter.END)
self.text.after(0, append)
st = ScrolledText.ScrolledText(self, width=190, height=9, state='disabled')
st.configure(font='TkFixedFont')
st.place(x=0, y=539)
text_handler = TextHandler(st)
# Logging configuration
logging.basicConfig(filename='test.log',
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s')
# Add the handler to logger
self.logger = logging.getLogger()
self.logger.addHandler(text_handler)
And call logging.info(msg) for log messages. but there is a problem. everything is work well but when this function called before a process ( for example some works with lists ) my logs appear after that !!
logging.info("message")
print "message"
for topic in news:
...
print method works fine here but there is problem for logging. i have my log message just after the loop end.
So ... what is the problem ?

You should not use after(0, ...). I'm not sure if it's the only problem, but it's definitely one problem. You are starving the event handler -- the "idle" queue will never empty, so it doesn't have the chance to service normal events. You have, in effect, created an infinite loop.
You should give a small non-zero interval, which will help this problem.

Related

Python polling library show message for each iteration

A polling method is implemented and it works every second to check the request status. Is it possible to add a log for each iteration of the polling?
result = poll(
lambda: getSomething(),
timeout=100,
step=1,
check_success=IsPollingSuccessfull
)
I need something like,
Waiting for the response + time
Waiting for the response + time
Waiting for the response + time
Waiting for the response + time
EDIT:
I want to print log to the console.
Have you considered python's logging? Here is the documentation
you can create a logger instance that saves to all messages to file. Then you can use it everywhere in your code and log anything you'd like with different logging levels.
Here is how I create and use the logger:
# Method to create an instance of a logger
import logging
def set_logger(context, file_name, verbose=False):
logger = logging.getLogger(context)
logger.setLevel(logging.DEBUG if verbose else logging.INFO)
formatter = logging.Formatter(
'[%(asctime)s][%(levelname)s]:' + context + ':[%(filename).30s:%(funcName).30s:%(lineno)3d]:%(message)s',
datefmt='%Y-%m-%d %H:%M:%S\x1b[0m')
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.DEBUG if verbose else logging.INFO)
console_handler.setFormatter(formatter)
logger.handlers = []
logger.addHandler(console_handler)
file_handler = logging.FileHandler($path_to_save_logger + file_name)
file_handler.setLevel(logging.DEBUG if verbose else logging.INFO)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
return logger
then create the instance and use it.
from termcolor import colored
my_logger = set_logger(colored('GENERAL', 'yellow'), "/tmp/my_logger.txt", verbose=True)
my_logger.info("this is an info message")
my_logger.debug("this is a debug message")
.....
EDIT: assuming you're using polling2.poll()
You can add a logger into you poll() call - documentation
import logging
poll(lambda: getSomething(),
timeout=100,
step=1,
check_success=IsPollingSuccessful,
log=logging.DEBUG)

python-daemon and logging: set logging level interactively

I have a python-daemon process that logs to a file via a ThreadedTCPServer (inspired by the cookbook example: https://docs.python.org/2/howto/logging-cookbook.html#sending-and-receiving-logging-events-across-a-network, as I will have many such processes writing to the same file). I am controlling the spawning of the daemon process using subprocess.Popen from an ipython console, and this is how the application will be run. I am able to successfully write to the log file from both the main ipython process, as well as the daemon process, but I am unable to change the level of both by just simply setting the level of the root logger in ipython. Is this something that should be possible? Or will it require custom functionality to set the logging.level of the daemon separately?
Edit: As requested, here is an attempt to provide a pseudo-code example of what I am trying to achieve. I hope that this is a sufficient description.
daemon_script.py
import logging
import daemon
from other_module import function_to_run_as_daemon
class daemon(object):
def __init__(self):
self.daemon_name = __name__
logging.basicConfig() # <--- required, or I don't get any log messages
self.logger = logging.getLogger(self.daemon_name)
self.logger.debug( "Created logger successfully" )
def run(self):
with daemon.daemonContext( files_preserve = [self.logger.handlers[0].stream] )
self.logger.debug( "Daemonised successfully - about to enter function" )
function_to_run_as_daemon()
if __name__ == "__main__":
d = daemon()
d.run()
Then in ipython i would run something like
>>> import logging
>>> rootlogger = logging.getLogger()
>>> rootlogger.info( "test" )
INFO:root:"test"
>>> subprocess.Popen( ["python" , "daemon_script.py"] )
DEBUG:__main__:"Created logger successfully"
DEBUG:__main__:"Daemonised successfully - about to enter function"
# now i'm finished debugging and testing, i want to reduce the level for all the loggers by changing the level of the handler
# Note that I also tried changing the level of the root handler, but saw no change
>>> rootlogger.handlers[0].setLevel(logging.INFO)
>>> rootlogger.info( "test" )
INFO:root:"test"
>>> print( rootlogger.debug("test") )
None
>>> subprocess.Popen( ["python" , "daemon_script.py"] )
DEBUG:__main__:"Created logger successfully"
DEBUG:__main__:"Daemonised successfully - about to enter function"
I think that I may not be approaching this correctly, but, its not clear to me what would work better. Any advice would be appreciated.
The logger you create in your daemon won't be the same as the logger you made in ipython. You could test this to be sure, by just printing out both logger objects themselves, which will show you their memory addresses.
I think a better pattern would be be that you pass if you want to be in "debug" mode or not, when you run the daemon. In other words, call popen like this:
subprocess.Popen( ["python" , "daemon_script.py", "debug"] )
It's up to you, you could pass a string meaning "debug mode is on" as above, or you could pass the log level constant that means "debug", e.g.:
subprocess.Popen( ["python" , "daemon_script.py", "10"] )
(https://docs.python.org/2/library/logging.html#levels)
Then in the daemon's init function use argv for example, to get that argument and use it:
...
import sys
def __init__(self):
self.daemon_name = __name__
logging.basicConfig() # <--- required, or I don't get any log messages
log_level = int(sys.argv[1]) # Probably don't actually just blindly convert it without error handling
self.logger = logging.getLogger(self.daemon_name)
self.logger.setLevel(log_level)
...

Take an action each time a log is rotated in python

I want to log a line describing the logging information for each log file created by the logger.
Currently I'm using a separate logger process which will be running all the time. It receives the information from a queue and writes it to the log. A lot of modules will be passing information to this logging queue.
My current sample code is:
import logging
import time
from logging.handlers import TimedRotatingFileHandler as rotate
def info_log(log_queue):
logger = logging.getLogger("My Log")
logger.setLevel(logging.INFO)
handler = rotate("log/info.log", when="D", interval=30, backupCount=13)
logger.addHandler(handler)
desc_string = "yyyy/mm/dd-HH:MM:SS \t name \t country \n"
logger.info(desc_string)
while True:
result=log_queue.get().split("#")
logger.info(result[0] + "\t" result[1] + "\t" + result[2] + "\n")
Each time the log is rotated, I want desc_string to be written 1st to the log file.How can I do that?
Or in other words, How to know in the program when a log is rotated?
Maybe you can simply override the doRollover method from TimedRotatingFileHandler ?
class CustomFileHandler(TimedRotatingFileHandler):
def doRollover(self):
super().doRollover()
self.stream.write(desc_string)
# to use it
handler = CustomFileHandler("log/info.log", when="D", interval=30, backupCount=13)
logger.addHandler(handler)

redirect subprocess log to wxpython txt ctrl

I would like to capture log o/p from a python based subprocess . Here is part of my code. How do I ridirect my log also to this txt ctrl
Here is mytest.py:
import logging
log=logging.getLogger('test')
class MyTestClass():
def TestFunction(self) :
log.info("start function"
# runs for 5 - 10 mins and has lots of log statments
print "some stuff"
log.info("after Test Function")
# for now
return a,b
#sys.exit(2)
if __name__ == "__main__":
myApp=MyTestClass()
myApp.TestFunction()
I am doing something of this sort in my maingui:
class WxLog(logging.Handler):
def __init__(self, ctrl):
logging.Handler.__init__(self)
self.ctrl = ctrl
def emit(self, record):
if self.ctrl:
self.ctrl.AppendText(self.format(record)+"\n")
and in my gui
self.log = wx.TextCtrl(self, -1, "", style=wx.TE_MULTILINE| wx.TE_RICH2)
#logging.basicConfig(level=logging.INFO)
self.logr = logging.getLogger('')
self.logr.setLevel(logging.INFO)
hdlr = WxLog(self.log)
hdlr.setFormatter(logging.Formatter('%(message)s '))
self.logr.addHandler(hdlr)
#snip
prog = os.path.join(mydir,"mytest.py")
params = [sys.executable,prog]
# Start the subprocess
outmode = subprocess.PIPE
errmode = subprocess.STDOUT
self._proc = subprocess.Popen(params,
stdout=outmode,
stderr=errmode,
shell=True
)
# Read from stdout while there is output from process
while self._proc.poll() == None:
txt = self._proc.stdout.readline()
print txt
# also direct log to txt ctrl
txt = 'Return code was ' + str(self._proc.returncode) +'\n'
# direct
self.logr.info("On end ")
You can try following the suggestion in this post.
Update: You can set the logger in the subprocess to use a SocketHandler and set up a socket server in the GUI to listen for messages from the subprocess, using the technique in the linked-to post to actually make things appear in the GUI. A working socket server is included in the logging documentation.
I wrote an article about how I redirect a few things like ping and traceroute using subprocess to my TextCtrl widget here: http://www.blog.pythonlibrary.org/2010/06/05/python-running-ping-traceroute-and-more/
That might help you figure it out. Here's a more generic article that doesn't use subprocess: http://www.blog.pythonlibrary.org/2009/01/01/wxpython-redirecting-stdout-stderr/
I haven't tried redirecting with the logging module yet, but that may be something I'll do in the future.

Running Python-script in thread and redirecting std.out/std.err to wx.TextCtrl in GUI

I'm trying to write a GUI that reads in settings for a python-script, then generates the script and runs it. The script can take dozens of minutes to run so in order to not block the GUI and frustrate the user I'm running it in a separate thread. Before I did this I used a separate class to redirect the std.out and std.err of the program to a TextCtrl. This worked fine except for the GUI getting blocked during execution.
Running the script from the thread with the redirection-class still blocks the GUI. In order not to block the GUI I need to turn the redirection off. All std.out/err from both the script and the gui then goes into the console.
Here is the class that redirects and how I call it.
# For redirecting stdout/stderr to txtctrl.
class RedirectText(object):
def __init__(self,aWxTextCtrl):
self.out=aWxTextCtrl
def write(self,string):
self.out.WriteText(string)
self.redir=RedirectText(self.bottom_text)
sys.stdout=self.redir
sys.stderr=self.redir
sys.stdin=self.redir
I've tried using some kind of a communication class from the thread to the GUI without success. That is, the GUI still gets blocked.
Does anyone have some hints or a solution for this problem, that is to get the stdout/err from the script to the GUI without blocking the GUI?
Yeah. From the thread, use wx.CallAfter to send the text to the GUI to a thread-safe way. Then it can take the text and display it. Another way to do it would be to use subprocess and communicate with that. There's an example of that here:
http://www.blog.pythonlibrary.org/2010/06/05/python-running-ping-traceroute-and-more/
There are also some methods listed in the comments of this article:
http://www.blog.pythonlibrary.org/2009/01/01/wxpython-redirecting-stdout-stderr/
Unfortunately, my commenting system at that time didn't do a good job with indentation.
Another solution I have used with success would be to use python logging instead of stdout/stderr. In order to do that, you write a subclass that extends logging.Handler, to customize the font and the text color to be presented in a wx.TextCtrl in your wx application:
import logging
from logging import Handler
class WxHandler(Handler):
def __init__(self, logCtrl):
"""
Initialize the handler.
logCtrl = an instance of wx.TextCtrl
"""
self.logCtrl = logCtrl
Handler.__init__(self)
def flush(self):
pass
def emit(self, record):
"""
Emit a record.
If a formatter is specified, it is used to format the record.
The record is then written to the stream with a trailing newline. If
exception information is present, it is formatted using
traceback.print_exception and appended to the stream. If the stream
has an 'encoding' attribute, it is used to encode the message before
output to the stream.
"""
try:
lastPos = self.logCtrl.GetLastPosition()
msg = self.format(record)
self.logCtrl.WriteText(msg)
self.logCtrl.WriteText('\r\n')
f = wx.Font(10, wx.MODERN, wx.NORMAL, wx.NORMAL, False, u'Arial', wx.FONTENCODING_ISO8859_1)
if record.levelno == logging.INFO:
textColour = wx.Colour(0, 0, 205)
elif record.levelno == logging.WARN:
textColour = wx.Colour(250, 128, 114)
elif record.levelno >= logging.ERROR:
textColour = wx.Colour(220, 20, 60)
else:
textColour = wx.Colour(0, 0, 0)
self.logCtrl.SetStyle(lastPos, lastPos + len(msg), wx.TextAttr(textColour, wx.NullColour, f))
except:
self.handleError(record)
In order to configure the logger:
def configureWxLogger(logCtrl, loggingLevel):
"""
Wx Logger config
"""
logger = logging.getLogger()
logger.setLevel(loggingLevel)
ch = WxHandler(logCtrl)
formatter = logging.Formatter("%(asctime)-20s - %(levelname)-8s - %(message)s")
formatter.datefmt = '%d/%m/%Y-%H:%M:%S'
ch.setFormatter(formatter)
logger.addHandler(ch)
return logger
And, finally, to bind the text control to the log output:
self.logCtrl = wx.TextCtrl(self, -1, "", size=(600, 200), style=wx.TE_MULTILINE|wx.TE_RICH2)
wxLoggingHelper.configureWxLogger(self.logCtrl, logging.DEBUG)

Categories