I am trying to create logs for errors. This is the logger i am using.
import logging
import os
def create_log(source):
logging.basicConfig(filename="logs/"+source+".log",
format='%(asctime)s::%(levelname)s::%(message)s',
filemode='a')
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def info_logger(message):
logger.info(message)
def error_logger(message):
print(message)
logger.error(message)
I am calling this logger in a for loop where i am doing some operation and trying to create logs for each iteration
for i in data["source_id"]:
--Some task here--
log_file_name = str(source_dict["source_id"]) + "_" + source_dict["source_name"] + "_"+str(datetime.today().strftime("%Y-%m-%d_%H_%M_%S"))
create_log(log_file_name)
for the first iteration, log file is getting created. But for other iterations, the same log file is getting appended. I want to make seperate log files for each iteration. Any idea how can i do that?
You can try this
import logging
debug = logging.FileHandler("debug.log")
debug.setLevel(logging.DEBUG)
error = logging.FileHandler("error.log")
error.setLevel(logging.ERROR)
warning = logging.FileHandler("warning.log")
warning.setLevel(logging.WARNING)
console = logging.StreamHandler()
logging.basicConfig( # noqa
level=logging.INFO,
format="[%(asctime)s]:%(levelname)s %(name)s :%(module)s/%(funcName)s,%(lineno)d: %(message)s",
handlers=[debug, error, warning, console]
)
logger = logging.getLogger()
logger.debug("This is debug [error+warning]")
logger.error("This is error [error only]")
logger.warning("This is warn [error+warning]")
Related
I'm trying to create a logging mechanism inside Databricks Python notebook. Tried using below code to achieve the same -
import logging
def create_logger(name,log_path=None):
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s - %(levelname)-8s - %(message)s")
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
if log_path is not None:
file_handler = logging.FileHandler(log_path)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
return logger
However, whenever I'm trying to call the function as below -
from datetime import date, datetime
current_date = date.today()
current_timestamp = datetime.strftime(datetime.now(),"%Y%m%d%H%M%S")
name = "temp_logs"
log_path = f"abfss://{storageContainer}#{storageAccount}.dfs.core.windows.net/{target_dir}/logs/{current_date}/{name}_{current_timestamp}.txt"
logger = create_logger(name = name,log_path = log_path)
This is giving error as -
[Errno 2] No such file or directory: /databricks/driver/abfss:/temp-ontainer#teststorage.dfs.core.windows.net/test/logs/2021-09-13/temp_logs_20210913101150.txt'
Is there a way to handle this (without using mount point location) ?
We can try using “BlobStorageRotatingFileHandler” by importing it as below:
from azure_storage_logging.handlers import BlobStorageRotatingFileHandler
Can refer to the python document about azure-storage-logging as it provides functionality to send output from the standard Python logging APIs to Microsoft Azure Storage.
Sample code as below:
import logging
from azure_storage_logging.handlers import TableStorageHandler
# configure the handler and add it to the logger
logger = logging.getLogger('example')
handler = TableStorageHandler(account_name='mystorageaccountname',
account_key='mystorageaccountkey',
extra_properties=('%(hostname)s',
'%(levelname)s'))
logger.addHandler(handler)
# output log messages
logger.info('info message')
logger.warning('warning message')
logger.error('error message')
Use the above logs to record the error messages.
And for "directory not found" we need to check the path. Please have a look on the documentation
I would like to have:
a main.log file with all logs above DEBUG level to be captured from main and imported modules
the console should show only ERROR level logs from main and its imported submodules.
Note: I may have no control on the error handling logs of the imported submodules.
Here is the main.py code for this:
# main.py importing a submodule
import logging
import submodule
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# log to console
c_handler = logging.StreamHandler()
console_format = logging.Formatter("[%(levelname)s] %(message)s")
c_handler.setFormatter(console_format)
c_handler.setLevel = logging.INFO
logger.addHandler(c_handler)
logger.error("This is an error!!! Logged to console")
# log to file from main
logfile = "./logging/main.log"
f_handler = logging.FileHandler(filename=logfile)
f_format = logging.Formatter("%(asctime)s: %(name)-18s [%(levelname)-8s] %(message)s")
f_handler.setFormatter(f_format)
f_handler.setLevel = logging.DEBUG
logger.addHandler(f_handler)
logger.debug("This is a debug error. Not logged to console, but should log to file")
... and the submodule.py code ...
# submodule.py
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(levelname)s:%(name)s:%(message)s')
# log to console
c_handler = logging.StreamHandler()
c_handler.setFormatter(formatter)
logger.addHandler(c_handler)
logger.info("This is an info message from submodule, should be recorded in main.log!")
logger.debug("This is a debug message from submodule, also should be recorded in main.log!!")
When I run main.py:
[ERROR] This is an error!!! Logged to console shows up correctly in the console
But...
Console also shows...
INFO:submodule:This is an info message from submodule, should be recorded in main.log!
[DEBUG] This is a debug error. Not logged to console, but should log to file
The main.log file only shows yy-mm-dd hh:mm:ss: __main__ [DEBUG ] This is a debug error. Not logged to console, but should log to file only. It does not show logs from the submodule.py
Appreciate knowing:
Where am I going wrong?
What would be the code correction needed?
EDIT: Based on #Dan D. suggestion changed submodule.py as follows:
# submodule.py
import logging
logger = logging.getLogger(__name__)
def logsomething():
logger.info("This is an info message from submodule, should be recorded in main.log!")
logger.debug("This is a debug message from submodule, also should be recorded in main.log!!")
... and the program logs to console and file appropriately.
Q. If I want to change to message format for the submodule.py only, can this be done through main.py?
Your submodule should just be:
import logging
logger = logging.getLogger(__name__)
logger.info("This is an info message from submodule, should be recorded in main.log!")
logger.debug("This is a debug message from submodule, also should be recorded in main.log!!")
Then your main module should be:
# main.py importing a submodule
import logging
logger = logging.getLogger(__name__)
# log to console
c_handler = logging.StreamHandler()
console_format = logging.Formatter("[%(levelname)s] %(message)s")
c_handler.setFormatter(console_format)
c_handler.setLevel(logging.INFO)
logging.getLogger().addHandler(c_handler)
# log to file from main
logfile = "./logging/main.log"
f_handler = logging.FileHandler(filename=logfile)
f_format = logging.Formatter("%(asctime)s: %(name)-18s [%(levelname)-8s] %(message)s")
f_handler.setFormatter(f_format)
f_handler.setLevel(logging.DEBUG)
logging.getLogger().addHandler(f_handler)
logging.getLogger().setLevel(logging.DEBUG)
import submodule
logger.error("This is an error!!! Logged to console")
logger.debug("This is a debug error. Not logged to console, but should log to file")
Edit: The handlers have to be added before the code in the submodule runs. To effect this the import submodule was moved after the code that sets up the handlers.
Normally modules shouldn't have any top level logging calls so all the imports can be done at the top and then callables that use logging are called indirectly by the code in the if __name__=="__main__": after it sets up logging.
I am trying to print logs using logger module in python.
Following is the code I am keeping on the top of file.
if __name__ == '__main__':
LOG_FILENAME = '/home/akash/exdion-pdf-extracter/doc/epod.log'
logging.basicConfig(
filename=LOG_FILENAME,
level=logging.DEBUG,
)
There are different files with function calls from one another. I have used the following line to display a line in the logger.
#staticmethod
def initiate_pdf_processing(ct_doc, pt_doc, feature, startAndEndKeyList):
logging.info("testing logger")
...
There are multiple instances of the similar above logger function. But I can't receive the logger output in the designated file. The code and files are huge. However there are a few error generated by the code which are getting printed in the log file.
Use below code bit out of the main namespace. This way, you are defining a logger and creating a log file as global file, and you can call the logger anywhere in the code. A logger code bit below is how I usually code.
logfile = '<your_file_name>.log'
if(os.path.isfile(logfile)):
os.remove(logfile)
file_handler = logging.FileHandler(logfile)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(logging.Formatter(
'%(asctime)s %(pathname)s [%(process)d]: %(levelname)s:: %(message)s'))
logger = logging.getLogger('wbs-server-log')
logger.setLevel(logging.DEBUG)
logger.addHandler(file_handler)
The issue might be that you have to initialize logging above if __name__ == '__main__' block. That way logging will be initialized when you import this as module.
Suggestion for initializing logging:
import logging
log = logging.getLogger(PACKAGE_NAME)
stream_handler = logging.StreamHandler(stream=open(LOG_FILE_NAME, 'a'))
stream_handler.setLevel(logging.DEBUG)
log.addHandler(stream_handler)
log.debug('your message here')
After this you can tweak log message formatting with logging.Formatter.
Apparently, I shouldn't be using ScrapyFileLogObserver anymore (http://doc.scrapy.org/en/1.0/topics/logging.html). But I still want to be able to save my log messages to a file, and I still want all the standard Scrapy console information to be saved to the file too.
From reading up on how to use the logging module, this is the code that I have tried to use:
class BlahSpider(CrawlSpider):
name = 'blah'
allowed_domains = ['blah.com']
start_urls = ['https://www.blah.com/blahblahblah']
rules = (
Rule(SgmlLinkExtractor(allow=r'whatever'), callback='parse_item', follow=True),
)
def __init__(self):
CrawlSpider.__init__(self)
self.logger = logging.getLogger()
self.logger.setLevel(logging.DEBUG)
logging.basicConfig(filename='debug_log.txt', filemode='w', format='%(asctime)s %(levelname)s: %(message)s',
level=logging.DEBUG)
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
simple_format = logging.Formatter('%(levelname)s: %(message)s')
console.setFormatter(simple_format)
self.logger.addHandler(console)
self.logger.info("Something")
def parse_item(self):
i = BlahItem()
return i
It runs fine, and it saves the "Something" to the file. However, all of the stuff that I see in the command prompt window, all of the stuff that used to be saved to the file when I used ScrapyFileLogObserver, is not saved now.
I thought that my "console" handler with "logging.StreamHandler()" was supposed to deal with that, but this is just what I had read and I don't really understand how it works.
Can anyone point out what I am missing or where I have gone wrong?
Thank you.
I think the problem is that you've used both basicConfig and addHandler.
Configure two handlers separately:
self.logger = logging.getLogger()
self.logger.setLevel(logging.DEBUG)
logFormatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s')
# file handler
fileHandler = logging.FileHandler("debug_log.txt")
fileHandler.setLevel(logging.DEBUG)
fileHandler.setFormatter(logFormatter)
self.logger.addHandler(fileHandler)
# console handler
consoleHandler = logging.StreamHandler()
consoleHandler.setLevel(logging.DEBUG)
consoleHandler.setFormatter(logFormatter)
self.logger.addHandler(consoleHandler)
See also:
logger configuration to log to file and print to stdout
you can log all scrapy logs to file by first disabling root handle in scrapy.utils.log.configure_logging and then adding your own log handler.
In settings.py file of scrapy project add the following code:
import logging
from logging.handlers import RotatingFileHandler
from scrapy.utils.log import configure_logging
LOG_ENABLED = False
# Disable default Scrapy log settings.
configure_logging(install_root_handler=False)
# Define your logging settings.
log_file = '/tmp/logs/CRAWLER_logs.log'
root_logger = logging.getLogger()
root_logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
rotating_file_log = RotatingFileHandler(log_file, maxBytes=10485760, backupCount=1)
rotating_file_log.setLevel(logging.DEBUG)
rotating_file_log.setFormatter(formatter)
root_logger.addHandler(rotating_file_log)
Also we customize log level (DEBUG to INFO) and formatter as required.
Hope this helps!
I have been writing simple scripts and I am trying to use logger to generate log for each functions in the scripts.
1) based on the function name I create a logger filehandler and I try to put logs using that handler. I also delete the previous existing file with the same name.
3) at the end of the function I close the handler.
My problem are:
1)even though I close the handler, the next time I run the same function I get an error that the file I am trying to delete is (as a part of setting the logger file handler) is still being used.
2) Also the logger prints everything to console which I dont want, I just want it to write everything to the file.
Here are the logger functions:
def setLogger(path):
"""
#purpose: Intializes basic logging directory and file
"""
LOG_FILENAME = path + "\\" + "log.txt"
#logging.basicConfig(filename=LOG_FILENAME,
# format='%(levelname)s %(asctime)s %(message)s',level=logging.INFO
# )
logger = logging.getLogger()
logger.setLevel(logging.INFO)
file_handler = logging.FileHandler(LOG_FILENAME)
file_handler.setLevel(logging.INFO)
formatter = logging.Formatter("%(levelname)s %(asctime)s %(message)s")
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
return logger
def unsetLogger(logger):
"""
#purpose: performs a basic shutdown of logger
"""
logger.handlers[0].close()
logger.removeHandler(logger.handlers[0])
logging.shutdown
The way i use them is:
for eg:
def fun():
os.remove(path)
logger = setLogger(path)
` logging.info("hi") #this writes to file and prints on the console as well
unsetLogger(logger)
if I run the function fun() once, its all good. but if i run it again, I get that can't delete error for the log file.
Thanks in Advance.
learningNinja
After making some slight modifications, I came up with the following test to try to reproduce your error, but I don't get any errors.
import os
import logging
def setLogger(path):
"""
#purpose: Intializes basic logging directory and file
"""
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# Simplified log file path (I just use full value passed in, and don't append "\log.txt")
file_handler = logging.FileHandler(path)
file_handler.setLevel(logging.INFO)
formatter = logging.Formatter("%(levelname)s %(asctime)s %(message)s")
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
return logger
def unsetLogger(logger):
"""
#purpose: performs a basic shutdown of logger
"""
logger.handlers[0].close()
logger.removeHandler(logger.handlers[0])
logging.shutdown
def fun():
try:
# Was getting error trying to remove a file that didn't exist on
# first execution...
os.remove("log.txt")
except:
pass
logger = setLogger("log.txt")
logging.info("hi")
unsetLogger(logger)
fun()
fun()
fun()
See if there is anything I'm doing differently than your actual code and maybe that might help you.