My Stack:
Google App Engine Standard
Python (2.7)
Goal:
To create named logs in Google Stackdriver Logging, https://console.cloud.google.com/logs/viewer
Docs - Stackdriver Logging:
https://google-cloud-python.readthedocs.io/en/latest/logging/usage.html
Code:
from google.cloud import logging as stack_logging
from google.cloud.logging.resource import Resource
import threading
class StackdriverLogging:
def __init__(self, resource=Resource(type='project', labels={'project_id': 'project_id'}), project_id='project_id'):
self.resource = resource
self.client = stack_logging.Client(project=project_id)
def delete_logger(self, logger_name):
logger = self.client.logger(logger_name)
logger.delete()
def async_log(self, logger_name, sev, msg):
t = threading.Thread(target=self.log, args=(logger_name, sev, msg,))
t.start()
def log(self, logger_name, sev, msg):
logger = self.client.logger(logger_name)
if isinstance(msg, str):
logger.log_text(msg, severity=sev, resource=self.resource)
elif isinstance(msg, dict):
logger.log_struct(msg, severity=sev, resource=self.resource)
class hLog(webapp2.RequestHandler):
def get(self):
stackdriver_logger = StackdriverLogging()
stackdriver_logger.async_log("my_new_log", "WARNING", msg="Hello")
stackdriver_logger.async_log("my_new_log", "INFO", msg="world")
ERROR:
Found 1 RPC request(s) without matching response
If this is not possible in Google App Engine Standard (Python) any way to get this code to work:
from google.cloud import logging
client = logging.Client()
# client = logging.Client.from_service_account_json('credentials.json')
logger = client.logger("my_new_log")
logger.log_text("hello world")
If credentials are required, I like to use the project service account.
Any help would be appreciated. Thank you.
I usually tie the Python logging module directly into Google Stackdriver Logging.
To do this, I create a log_helper module:
from google.cloud import logging as gc_logging
import logging
logging_client = gc_logging.Client()
logging_client.setup_logging(logging.INFO)
from logging import *
Which I then import into other files like this:
import log_helper as logging
After which you can use the module like you would the default python logging module.
To create named logs with the default python logging module, use different loggers for different namespaces:
import log_helper as logging
test_logger = logging.getLogger('test')
test_logger.setLevel(logging.INFO)
test_logger.info('is the name of this logger')
Output:
INFO:test:is the name of this logger
Related
I would like to send LogRecord attributes (like processName, threadName, exc_info etc) as part of logging information on GCP in json format. How to do that? I am using below python code to send logs to GCP (with jsonpayload)
(thanks #ianyoung code)
import logging
import google.cloud.logging
import json
client = google.cloud.logging.Client()
client.setup_logging()
logger = logging.getLogger('test')
data_dict = {"hello": "world"}
logging.info(json.dumps(data_dict))
I'm currently managing my API using Celery tasks and a kubernetes cluster on Google Cloud Platform.
Celery is automatically logging input and output of each task. This is something I want but I would like to use the possibility of google-cloud-logging to log input and output as jsonPayload.
I use for all other log the following:
from google.cloud.logging.handlers import CloudLoggingHandler
from google.cloud.logging_v2.handlers import setup_logging
# Imports the Cloud Logging client library
import google.cloud.logging
# Instantiates a client
client = google.cloud.logging.Client()
handler = CloudLoggingHandler(client)
setup_logging(handler)
import logging
logger = logging.getLogger(__name__)
data_dict = {"my": "data"}
logger.info("this is an example", extra={"json_fields": data_dict})
And I use Celery with the following template:
app = Celery(**my_params)
#app.task
def task_test(data):
# Update dictonary with new data
data["key1"] = "value1"
return data
...
detection_task = celery.signature('tasks.task_test', args=([[{"hello": "world"}]]))
r = detection_task.apply_async()
data = r.get()
Here's an example of log I receive from Celery:
The blurred part correspond to the dict/json I would like to have in a jsonPayload instead of a textPayload.
(Also note that this log is marked as error on GCP but INFO from celery)
Any idea how I could connect python built-in logging, celery logger and gcp logger ?
To connect your Python logger with GCP Logger:
import logging
import google.cloud.logging
from google.cloud.logging.handlers import CloudLoggingHandler, setup_logging
client = google.cloud.logging.Client()
handler = CloudLoggingHandler(client, name="your_log_name")
cloud_logger = logging.getLogger('cloudLogger')
# configure cloud_logger
cloud_logger.addHandler(handler)
To connect this logger to Celery's logger:
def initialize_log(logger=None,loglevel=logging.DEBUG, **kwargs):
logger = logging.getLogger('celery')
handler = # use GCP handler defined above
handler.setLevel(loglevel)
logger.addHandler(handler)
return logger
from celery.signals import after_setup_task_logger
after_setup_task_logger.connect(initialize_log)
from celery.signals import after_setup_logger
after_setup_logger.connect(initialize_log)
I had tried the code below for getting the exception in https://learn.microsoft.com/en-us/azure/azure-monitor/app/opencensus-python
from opencensus.ext.azure.log_exporter import AzureLogHandler
logger = logging.getLogger(__name__)
# TODO: replace the all-zero GUID with your instrumentation key.
logger.addHandler(AzureLogHandler(
connection_string='InstrumentationKey=00000000-0000-0000-0000-000000000000')
)
properties = {'custom_dimensions': {'key_1': 'value_1', 'key_2': 'value_2'}}
# Use properties in exception logs
try:
result = 1 / 0 # generate a ZeroDivisionError
except Exception:
logger.exception('Captured an exception.', extra=properties)
It is working. I can catch the exception.
However, I want to ask if there is an easy way to catch the exception automatically in the python flask?
Since I try the below code, it just gives me a request record, not the exception.
app = Flask(__name__)
app.logger.addHandler(file_handler)
handler = AzureEventHandler(
connection_string="InstrumentationKey={}".format(app.config['APPINSIGHTS_INSTRUMENTATIONKEY']))
handler.setFormatter(logging.Formatter(
'%(asctime)s %(levelname)s: %(message)s '
'[in %(pathname)s:%(lineno)d]'))
handler.setLevel(logging.ERROR)
app.logger.addHandler(handler)
Thank you for helping
Your code should like below. For more details, you can check offical sample code.
Flask "To-Do" Sample Application
import logging
import sys
from flask import Flask
sys.path.append('..')
from config import Config
from flask_sqlalchemy import SQLAlchemy
from opencensus.ext.azure import metrics_exporter
from opencensus.ext.azure.log_exporter import AzureLogHandler
from opencensus.ext.flask.flask_middleware import FlaskMiddleware
from opencensus.trace import config_integration
logger = logging.getLogger(__name__)
app = Flask(__name__)
app.config.from_object(Config)
db = SQLAlchemy(app)
# Import here to avoid circular imports
from app import routes # noqa isort:skip
# Trace integrations for sqlalchemy library
config_integration.trace_integrations(['sqlalchemy'])
# Trace integrations for requests library
config_integration.trace_integrations(['requests'])
# FlaskMiddleware will track requests for the Flask application and send
# request/dependency telemetry to Azure Monitor
middleware = FlaskMiddleware(app)
# Processor function for changing the role name of the app
def callback_function(envelope):
envelope.tags['ai.cloud.role'] = "To-Do App"
return True
# Adds the telemetry processor to the trace exporter
middleware.exporter.add_telemetry_processor(callback_function)
# Exporter for metrics, will send metrics data
exporter = metrics_exporter.new_metrics_exporter(
enable_standard_metrics=False,
connection_string='InstrumentationKey=' + Config.INSTRUMENTATION_KEY)
# Exporter for logs, will send logging data
logger.addHandler(
AzureLogHandler(
connection_string='InstrumentationKey=' + Config.INSTRUMENTATION_KEY
)
)
if __name__ == '__main__':
app.run(host='localhost', port=5000, threaded=True, debug=True)
I'm struggling to find out why my log entries are being duplicated in Cloud Logging.
I use a custom dummy handler that does nothing, and I'm also using a named logger.
Here's my code:
import google.cloud.logging
import logging
class MyLogHandler(logging.StreamHandler):
def emit(self, record):
pass
# Setting Up App Engine's Logging
client = google.cloud.logging.Client()
client.get_default_handler()
client.setup_logging()
# Setting Up my custom logger/handler
my_handler = MyLogHandler()
logging.getLogger('my_logger').addHandler(my_handler)
logging.getLogger('my_logger').setLevel(logging.DEBUG)
logging.getLogger('my_logger').debug('Why this message is being duplicated?') # please note that i'm logging into 'my_logger' logger, I'm not using root logger for this message
In the first place, I think this message shouldn't even show in Cloud Logging because i'm using a named logger called 'my_logger' and cloud logging is attached to root logger only, but anyway...
The above code is imported into my app.py which bootstraps a Flask app on app engine.
Here is a screenshot of the issue:
This guy has a similar issue: Duplicate log entries with Google Cloud Stackdriver logging of Python code on Kubernetes Engine
But I tried every workaround suggested in that topic and didn't work either.
Is there something I'm missing here? Thanks in advance.
from google.cloud.logging.handlers import AppEngineHandler
root_logger = logging.getLogger()
# use the GCP appengine handlers only in order to prevent logs from getting written to STDERR
root_logger.handlers = [handler for handler in root_logger.handlers
if isinstance(handler, AppEngineHandler)]
I am trying to add some monitoring to a simple REST web service with flask and mongoengine and have come across what I think is a lack of understanding on my part of how imports and mongoengine is working in flask applications.
I'm following pymongo's documentation on monitoring : https://pymongo.readthedocs.io/en/3.7.2/api/pymongo/monitoring.html
I defined the following CommandListener in a separate file:
import logging
from pymongo import monitoring
log = logging.getLogger('my_logger')
class CommandLogger(monitoring.CommandListener):
def started(self, event):
log.debug("Command {0.command_name} with request id "
"{0.request_id} started on server "
"{0.connection_id}".format(event))
monitoring.register(CommandLogger())
I made an application_builder.py file to create my flask App, code looks something like this:
from flask_restful import Api
from flask import Flask
from command_logger import CommandLogger # <----
from db import initialize_db
from routes import initialize_routes
def create_app():
app = Flask(__name__)
api = Api(app)
initialize_db(app)
initialize_routes(api)
return app
The monitoring only seems to works if I import : CommandLogger in application_builder.py. I'd like to understand what is going on here, how does the import affect the monitoring registration?
Also I'd like to extract monitoring.register(CommandLogger()) as a function and call it at a latter stage in my code something like def register(): monitoring.register(CommandLogger())
But this doesn't seem to work, "registration' only works when it is in the same file as the CommandLogger class...
From the MongoEngine's doc, it seems important that the listener gets registered before connecting mongoengine
To use pymongo.monitoring with MongoEngine, you need to make sure that
you are registering the listeners before establishing the database
connection (i.e calling connect)
This worked for me. I'm just initializing/registering it the same way as I did other modules to avoid circular imports.
# admin/logger.py
import logging
from pymongo import monitoring
log = logging.getLogger()
log.setLevel(logging.DEBUG)
logging.basicConfig(level=logging.DEBUG)
class CommandLogger(monitoring.CommandListener):
# def methods...
class ServerLogger(monitoring.ServerListener):
# def methods
class HeartbeatLogger(monitoring.ServerHeartbeatListener):
# def methods
def initialize_logger():
monitoring.register(CommandLogger())
monitoring.register(ServerLogger())
monitoring.register(HeartbeatLogger())
monitoring.register(TopologyLogger())
# /app.py
from flask import Flask
from admin.toolbar import initialize_debugtoolbar
from admin.admin import initialize_admin
from admin.views import initialize_views
from admin.logger import initialize_logger
from database.db import initialize_db
from flask_restful import Api
from resources.errors import errors
app = Flask(__name__)
# imports requiring app
from resources.routes import initialize_routes
api = Api(app, errors=errors)
# Logger before db
initialize_logger()
# Database and Routes
initialize_db(app)
initialize_routes(api)
# Admin and Development
initialize_admin(app)
initialize_views()
initialize_debugtoolbar(app)
# /run.py
from app import app
app.run(debug=True)
then in any module...
from admin.logger import log
from db.models import User
# inside some class/view/queryset or however your objects are written...
log.info('Saving an item through MongoEngine...')
User(name='Foo').save()
What I'm trying to figure out now is how to integrate Flask DebuggerToolbar's Logging panel with the monitoring messages from these listeners...