I'm having a few problems with using the HTTPHandler of the python logging to push messages to a customer django app. I have a separate daemon that is part of my infrastructure that I would like for it to push logs to django so I've kinda got everything all in one place.
I'm using:
Ubuntu 10.04
Django 1.2.4
PostgreSQL 8.4
python 2.6.5
This is the model
from django.db import models
# Create your models here.
class application(models.Model):
app_name = models.CharField(max_length= 20)
description = models.CharField(max_length = 500, null=True)
date = models.DateField()
def __unicode__(self):
return ("%s logs - %s") % (self.app_name, self.date.strftime("%d-%m-%Y"))
class log_entry(models.Model):
application = models.ForeignKey(application)
thread_name = models.CharField(max_length = 200,null = True)
name = models.CharField(max_length = 200,null = True)
thread = models.CharField(max_length=50, null = True)
created = models.FloatField(null = True)
process = models.IntegerField(null = True)
args = models.CharField(max_length = 200,null = True)
module = models.CharField(max_length = 256,null = True)
filename = models.CharField(max_length = 256,null = True)
levelno = models.IntegerField(null = True)
msg = models.CharField(max_length = 4096,null = True)
pathname = models.CharField(max_length = 1024,null = True)
lineno = models.IntegerField(null = True)
exc_text = models.CharField(max_length = 200, null = True)
exc_info = models.CharField(max_length = 200, null = True)
func_name = models.CharField(max_length = 200, null = True)
relative_created = models.FloatField(null = True)
levelname = models.CharField(max_length=10,null = True)
msecs = models.FloatField(null = True)
def __unicode__(self):
return self.levelname + " - " + self.msg
This is the view
# Create your views here.
from django.shortcuts import render_to_response, get_list_or_404, get_object_or_404
from django.http import HttpResponse, HttpResponseRedirect
from django.views.decorators.csrf import csrf_protect, csrf_exempt
from inthebackgroundSite.log.models import log_entry, application
import datetime
#csrf_exempt
def log(request):
print request.POST
for element in request.POST:
print ('%s : %s') % (element, request.POST[element])
data = request.POST
today = datetime.date.today()
print today
app = application.objects.filter(app_name__iexact = request.POST["name"], date__iexact=today)
if not app:
print "didnt find a matching application. adding one now.."
print data["name"]
print today
app = application.objects.create(app_name = data["name"],
description = None,
date = today)
app.save()
if not app:
print "after save you cant get at it!"
newApplication = app
print app
print "found application"
newEntry = log_entry.objects.create(application = app,
thread_name = data["threadName"] ,
name = data["name"],
thread = data["thread"],
created = data["created"],
process = data["process"],
args = "'" + data["args"] + "'",
module = data["module"],
filename = data["filename"],
levelno = data["levelno"],
msg = data["msg"],
pathname = data["pathname"],
lineno = data["lineno"],
exc_text = data["exc_text"],
exc_info = data["exc_info"],
func_name = data["funcName"],
relative_created = data["relativeCreated"],
levelname = data["levelname"],
msecs = data["msecs"],
)
print newEntry
#newEntry.save()
return HttpResponse("OK")
and this is the call in the python code to send a message.
import os
import logging
import logging.handlers
import time
if __name__ == '__main__':
formatter = logging.Formatter("%(name)s %(levelno)s %(levelname)s %(pathname)s %(filename)s%(module)s %(funcName)s %(lineno)d %(created)f %(asctime)s %(msecs)d %(thread)d %(threadName)s %(process)d %(processName)s %(message)s ")
log = logging.getLogger("ShoutGen")
#logLevel = "debug"
#log.setLevel(logLevel)
http = logging.handlers.HTTPHandler("192.168.0.5:9000", "/log/","POST")
http.setFormatter(formatter)
log.addHandler(http)
log.critical("Finished MountGen init")
time.sleep(20)
http.close()
Now the first time I send a message with empty tables. It works fine, a new app row gets created and a new log message gets created. But on the second time I call it, I get
<QueryDict: {u'msecs': [u'224.281072617'], u'args': [u'()'], u'name': [u'ShoutGen'], u'thread': [u'140445579720448'], u'created': [u'1299046203.22'], u'process': [u'16172'], u'threadName': [u'MainThread'], u'module': [u'logtest'], u'filename': [u'logtest.py'], u'levelno': [u'50'], u'processName': [u'MainProcess'], u'pathname': [u'logtest.py'], u'lineno': [u'19'], u'exc_text': [u'None'], u'exc_info': [u'None'], u'funcName': [u'<module>'], u'relativeCreated': [u'7.23600387573'], u'levelname': [u'CRITICAL'], u'msg': [u'Finished MountGen init']}>
msecs : 224.281072617
args : ()
name : ShoutGen
thread : 140445579720448
created : 1299046203.22
process : 16172
threadName : MainThread
module : logtest
filename : logtest.py
levelno : 50
processName : MainProcess
pathname : logtest.py
lineno : 19
exc_text : None
exc_info : None
funcName : <module>
relativeCreated : 7.23600387573
levelname : CRITICAL
msg : Finished MountGen init
2011-03-02
[sql] SELECT ...
FROM "log_application"
WHERE (UPPER("log_application"."date"::text) = UPPER(2011-03-02)
AND UPPER("log_application"."app_name"::text) = UPPER(ShoutGen))
[sql] (5.10ms) Found 1 matching rows
[<application: ShoutGen logs - 02-03-2011>]
found application
[sql] SELECT ...
FROM "log_log_entry" LIMIT 21
[sql] (4.05ms) Found 2 matching rows
[sql] (9.14ms) 2 queries with 0 duplicates
[profile] Total time to render was 0.44s
Traceback (most recent call last):
File "/usr/local/lib/python2.6/dist-packages/django/core/servers/basehttp.py", line 281, in run
self.finish_response()
File "/usr/local/lib/python2.6/dist-packages/django/core/servers/basehttp.py", line 321, in finish_response
self.write(data)
File "/usr/local/lib/python2.6/dist-packages/django/core/servers/basehttp.py", line 417, in write
self._write(data)
File "/usr/lib/python2.6/socket.py", line 300, in write
self.flush()
File "/usr/lib/python2.6/socket.py", line 286, in flush
self._sock.sendall(buffer)
error: [Errno 32] Broken pipe
and no extra rows inserted into log_log_entry table. So I don't really know why this is happening at this point.
I've looked around and apparently the Broken pipe traceback isn't a problem, just something that browsers do. But I'm not using a browser so I'm not sure what the issue is.
It may be that the exception is causing a transaction to roll back and undo your changes. Are you using TransactionMiddleware? You could try the transaction.autocommit decorator on your view.
If the "broken pipe" error keeps happening, it's worth finding out why. The HTTPHandler does a normal POST and waits for the response ("OK" from your view) in its emit() call, and it shouldn't break the connection until after this.
You could try doing an equivalent post to your view from a test script, using httplib and urllib as HTTPHandler itself does. Basically, just urlencode a dict for the POST data, as if it were a LogRecord's dict.
Related
When I am trying to run my python code in lambda passing the handler to the function.module getting the below error, any suggestions how i could resolve this?
the below file test_client_visitor is triggered to call the client_visitor and send an email to the clients accordingly, when i run thd python file test_client_visitor in my local i get the email triggered successfully but in lambda facing the issue.
file_name: test_client_visitor
function = __import__('client_visitor')
handler = function.scan_clients
class TestFunction(unittest.TestCase):
def test_function(self):
file = open('event.json', 'rb')
try:
ba = bytearray(file.read())
event = jsonpickle.decode(ba)
print('## EVENT')
print(jsonpickle.encode(event))
context = {'requestid': '1234'}
result = handler(event, context)
print(result)
self.assertTrue(result, 'Emails could not be sent!')
finally:
file.close()
file.close()
if __name__ == '__main__':
unittest.main()
file_name: client_visitor.py
import datetime
import boto3
from aws_ses import send_bulk_templated_email
# boto3.set_stream_logger('botocore', level='DEBUG')
from mongodb import get_mongo_db
def process_clients(clients, developers, clients_to_be_notified, days):
if not clients:
pass
check_date = datetime.datetime.now() + datetime.timedelta(days)
for client in clients:
client_id_ = client['client_id']
if 'developer_id' in client:
developers[client_id_] = client['developer_id']
else:
if 'secrets' in client:
secrets = client['secrets']
for secret in secrets:
if 'not_on_or_after' in secret and secret['not_on_or_after'] < check_date.timestamp():
clients_to_be_notified.append({'client_id': client_id_,
'expiration_date': datetime.datetime.fromtimestamp(
secret['not_on_or_after']).strftime('%m/%d/%Y')})
print("adding client to notify List", client_id_, ":", client['sort'])
def notify_clients(clients_to_be_notified, developers):
developer_id_list = []
for client_secret in clients_to_be_notified:
developer_id_list.append(developers[client_secret['client_id']])
if developer_id_list:
db = get_mongo_db()
if db:
users = list(db.users.find({'guid': {'$in': developer_id_list}}, {'email', 'guid'}))
need_to_send_email = False
for user in users:
for client_secret in clients_to_be_notified:
if developers[client_secret['client_id']] == user['guid']:
client_secret['email'] = user['email']
need_to_send_email = True
break
if need_to_send_email:
return send_bulk_templated_email(clients_to_be_notified)
else:
return False
return True
def scan_clients(event, context):
local = False
if 'local' in event:
local = event['local'] == 'True'
if local:
dynamodb = boto3.resource('dynamodb', endpoint_url="http://localhost:8000")
else:
dynamodb = boto3.resource('dynamodb')
days = 30
if 'days' in event:
days = int(event['days'])
print(f"Scanning Clients with {days} or less to secret expiration")
table = dynamodb.Table('****')
scan_kwargs = {
'ProjectionExpression': 'client_id, sort, developer_id, secrets, approved'
}
test = False
if 'test' in event:
test = event['test'] == 'True'
done = False
start_key = None
developers = {}
clients_to_be_notified = []
if test:
developers['idm-portal1'] = '***'
clients_to_be_notified = [{'client_id': 'idm-portal1', 'expiration_date': '04/17/2021'}]
while not done:
if start_key:
scan_kwargs['ExclusiveStartKey'] = start_key
response = table.scan(**scan_kwargs)
process_clients(response.get('Items', []), developers, clients_to_be_notified, days)
start_key = response.get('LastEvaluatedKey', None)
done = start_key is None
print("total developers ", len(developers), " total clients_to_be_notified ", len(clients_to_be_notified))
return notify_clients(clients_to_be_notified, developers)
if __name__ == '__main__':
scan_clients(event={'days': 30, 'local': False, 'test': True}, context=None)
Response
{
"errorMessage": "Unable to import module 'test_client_visitor': No module named 'test_client_visitor'",
"errorType": "Runtime.ImportModuleError",
"stackTrace": []
}
Your file must be named test_client_visitor.py. The way lambda runs the code is by trying to import the main file and call the handler function. See the AWS docs to set up a handler for Python.
The reason you didn't run into this issue locally is because I assume you are calling python directly on the command line — python test_client_visitor. When you import a module in Python, the file has to end in the .py extension.
Able to fix this issue with right packaging of the contents to zip, avoided the creation of extra folder with the below command.
Command:
cd folder; zip -r ../filename.zip *
Thankyou everyone for your inputs.
I use pycharm to write a python3 web app project using tornado web framework,
The listing service has been built already. I need to build the remaining two components: the user service and the public API layer. The implementation of the listing service can serve as a good starting point to learn more about how to structure a web application using the Tornado web framework.
I am required to use tornado's built in framework for HTTP request.
error occurs at listening ( app.listen(options.port)) when I tried to run the program:
Traceback (most recent call last):
File "D:/Bill/python/Tornado/99-python-exercise-master/listing_service.py", line 203, in <module>
app.listen(options.port)
File "C:\Program Files\Python38\lib\site-packages\tornado\web.py", line 2116, in listen
server.listen(port, address)
File "C:\Program Files\Python38\lib\site-packages\tornado\tcpserver.py", line 152, in listen
self.add_sockets(sockets)
File "C:\Program Files\Python38\lib\site-packages\tornado\tcpserver.py", line 165, in add_sockets
self._handlers[sock.fileno()] = add_accept_handler(
File "C:\Program Files\Python38\lib\site-packages\tornado\netutil.py", line 279, in add_accept_handler
io_loop.add_handler(sock, accept_handler, IOLoop.READ)
File "C:\Program Files\Python38\lib\site-packages\tornado\platform\asyncio.py", line 100, in add_handler
self.asyncio_loop.add_reader(fd, self._handle_events, fd, IOLoop.READ)
File "C:\Program Files\Python38\lib\asyncio\events.py", line 501, in add_reader
raise NotImplementedError
NotImplementedError
code:
import tornado.web
import tornado.log
import tornado.options
import sqlite3
import logging
import json
import time
class App(tornado.web.Application):
def __init__(self, handlers, **kwargs):
super().__init__(handlers, **kwargs)
# Initialising db connection
self.db = sqlite3.connect("listings.db")
self.db.row_factory = sqlite3.Row
self.init_db()
def init_db(self):
cursor = self.db.cursor()
# Create table
cursor.execute(
"CREATE TABLE IF NOT EXISTS 'listings' ("
+ "id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,"
+ "user_id INTEGER NOT NULL,"
+ "listing_type TEXT NOT NULL,"
+ "price INTEGER NOT NULL,"
+ "created_at INTEGER NOT NULL,"
+ "updated_at INTEGER NOT NULL"
+ ");"
)
self.db.commit()
class BaseHandler(tornado.web.RequestHandler):
def write_json(self, obj, status_code=200):
self.set_header("Content-Type", "application/json")
self.set_status(status_code)
self.write(json.dumps(obj))
# /listings
class ListingsHandler(BaseHandler):
#tornado.gen.coroutine
def get(self):
# Parsing pagination params
page_num = self.get_argument("page_num", 1)
page_size = self.get_argument("page_size", 10)
try:
page_num = int(page_num)
except:
logging.exception("Error while parsing page_num: {}".format(page_num))
self.write_json({"result": False, "errors": "invalid page_num"}, status_code=400)
return
try:
page_size = int(page_size)
except:
logging.exception("Error while parsing page_size: {}".format(page_size))
self.write_json({"result": False, "errors": "invalid page_size"}, status_code=400)
return
# Parsing user_id param
user_id = self.get_argument("user_id", None)
if user_id is not None:
try:
user_id = int(user_id)
except:
self.write_json({"result": False, "errors": "invalid user_id"}, status_code=400)
return
# Building select statement
select_stmt = "SELECT * FROM listings"
# Adding user_id filter clause if param is specified
if user_id is not None:
select_stmt += " WHERE user_id=?"
# Order by and pagination
limit = page_size
offset = (page_num - 1) * page_size
select_stmt += " ORDER BY created_at DESC LIMIT ? OFFSET ?"
# Fetching listings from db
if user_id is not None:
args = (user_id, limit, offset)
else:
args = (limit, offset)
cursor = self.application.db.cursor()
results = cursor.execute(select_stmt, args)
listings = []
for row in results:
fields = ["id", "user_id", "listing_type", "price", "created_at", "updated_at"]
listing = {
field: row[field] for field in fields
}
listings.append(listing)
self.write_json({"result": True, "listings": listings})
#tornado.gen.coroutine
def post(self):
# Collecting required params
user_id = self.get_argument("user_id")
listing_type = self.get_argument("listing_type")
price = self.get_argument("price")
# Validating inputs
errors = []
user_id_val = self._validate_user_id(user_id, errors)
listing_type_val = self._validate_listing_type(listing_type, errors)
price_val = self._validate_price(price, errors)
time_now = int(time.time() * 1e6) # Converting current time to microseconds
# End if we have any validation errors
if len(errors) > 0:
self.write_json({"result": False, "errors": errors}, status_code=400)
return
# Proceed to store the listing in our db
cursor = self.application.db.cursor()
cursor.execute(
"INSERT INTO 'listings' "
+ "('user_id', 'listing_type', 'price', 'created_at', 'updated_at') "
+ "VALUES (?, ?, ?, ?, ?)",
(user_id_val, listing_type_val, price_val, time_now, time_now)
)
self.application.db.commit()
# Error out if we fail to retrieve the newly created listing
if cursor.lastrowid is None:
self.write_json({"result": False, "errors": ["Error while adding listing to db"]}, status_code=500)
return
listing = dict(
id=cursor.lastrowid,
user_id=user_id_val,
listing_type=listing_type_val,
price=price_val,
created_at=time_now,
updated_at=time_now
)
self.write_json({"result": True, "listing": listing})
def _validate_user_id(self, user_id, errors):
try:
user_id = int(user_id)
return user_id
except Exception as e:
logging.exception("Error while converting user_id to int: {}".format(user_id))
errors.append("invalid user_id")
return None
def _validate_listing_type(self, listing_type, errors):
if listing_type not in {"rent", "sale"}:
errors.append("invalid listing_type. Supported values: 'rent', 'sale'")
return None
else:
return listing_type
def _validate_price(self, price, errors):
# Convert string to int
try:
price = int(price)
except Exception as e:
logging.exception("Error while converting price to int: {}".format(price))
errors.append("invalid price. Must be an integer")
return None
if price < 1:
errors.append("price must be greater than 0")
return None
else:
return price
# /listings/ping
class PingHandler(tornado.web.RequestHandler):
#tornado.gen.coroutine
def get(self):
self.write("pong!")
def make_app(options):
return App([
(r"/listings/ping", PingHandler),
(r"/listings", ListingsHandler),
], debug=options.debug)
if __name__ == "__main__":
# Define settings/options for the web app
# Specify the port number to start the web app on (default value is port 6000)
tornado.options.define("port", default=6000)
# Specify whether the app should run in debug mode
# Debug mode restarts the app automatically on file changes
tornado.options.define("debug", default=True)
# Read settings/options from command line
tornado.options.parse_command_line()
# Access the settings defined
options = tornado.options.options
# Create web app
app = make_app(options)
app.listen(options.port)
logging.info("Starting listing service. PORT: {}, DEBUG: {}".format(options.port, options.debug))
# Start event loop
tornado.ioloop.IOLoop.instance().start()
How to fix this problem?
Python 3.8 made a backwards-incompatible change to the asyncio package used by Tornado. Applications that use Tornado on Windows with Python 3.8 must call asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) at the beginning of their main file/function. (as documented on the home page of tornadoweb.org)
We have about 5,000 objects of class Domain in Google App Engine, and we want to export the list of domains to CSV. Each domain is linked to an object of class DomainStateData:
class DomainStateData(db.Expando, ExpandoEntity):
plan = db.ReferenceProperty(Plan)
plan_expiration = db.DateTimeProperty()
trial_expiration = db.DateTimeProperty()
date_created = db.DateTimeProperty(auto_now_add=True, indexed=True)
last_modified = db.DateTimeProperty(auto_now=True)
class Domain(db.Expando, ExpandoEntity, SocialIconsEntity):
"""
Domain Model
"""
domain = db.StringProperty(required=True)
...
_state_data = db.ReferenceProperty(DomainStateData)
#property
def state_data(self):
try:
if not self._state_data:
# try to get it, if not, build it
sd = DomainStateData.get_by_key_name(self.key().name())
if not sd:
sd = DomainStateData(key_name=self.key().name()).put()
self._state_data = sd
self.put()
return self._state_data
else:
return self._state_data
except ReferencePropertyResolveError:
self._state_data = DomainStateData(key_name=self.key().name()).put()
self.put()
return self._state_data
I wrote a code which exports 100 domains to CSV (it takes 5 seconds), but if I try to fetch all the 5,000 domains I get a timeout, which is 60 seconds. Is it possible to fetch all the DomainStateData objects together without a timeout? Here is my code that exports the domains to CSV:
import sys
sys.path.insert(0, 'libs')
import webapp2
import datetime
import csv
from models import Domain
class ExportAllDomainsToCsvHandler(webapp2.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'text/csv'
self.response.headers['Content-Disposition'] = 'attachment; filename="All Domains [{0}].csv"'.format(str(datetime.date.today()))
writer = csv.writer(self.response.out)
writer.writerow(["Domain", "Current state", "Plan expiration date", "Trial expiration date", "Current oauth user"])
all_domains = Domain.all().fetch(100)
all_domains.sort(key=lambda domain: (0 if domain.state_data.plan_expiration is None else 1, domain.state_data.plan_expiration, 0 if domain.state_data.trial_expiration is None else 1, domain.state_data.trial_expiration, domain.domain))
for domain in all_domains:
if (domain.state_data.plan_expiration is None):
domain_plan_expiration = "No plan expiration date"
else:
domain_plan_expiration = domain.state_data.plan_expiration.strftime('%Y-%m-%d')
if (domain.state_data.trial_expiration is None):
domain_trial_expiration = "No trial expiration date"
else:
domain_trial_expiration = domain.state_data.trial_expiration.strftime('%Y-%m-%d')
writer.writerow([domain.domain, domain.cur_state.name, domain_plan_expiration, domain_trial_expiration, domain.admin])
app = webapp2.WSGIApplication([
("/csv/export_all_domains_to_csv", ExportAllDomainsToCsvHandler)
], debug=True)
OK, I found a solution. I fetched all the DomainStateData objects directly from the database and now it takes 35 seconds to create the CSV with all the domains. Here is my code, I didn't change the models:
import sys
sys.path.insert(0, 'libs')
import webapp2
import datetime
import csv
from models import DomainStateData, Domain
class ExportAllDomainsToCsvHandler(webapp2.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'text/csv'
self.response.headers['Content-Disposition'] = 'attachment; filename="All Domains [{0}].csv"'.format(str(datetime.date.today()))
writer = csv.writer(self.response.out)
writer.writerow(["Domain", "Current state", "Plan expiration date", "Trial expiration date", "Current oauth user"])
all_domain_state_data_dict = dict()
all_domain_state_data = DomainStateData.all().fetch(1000000)
all_domains = Domain.all().fetch(1000000)
for domain_state_data in all_domain_state_data:
all_domain_state_data_dict[domain_state_data.key().name()] = domain_state_data
for domain in all_domains:
if (domain.key().name() in all_domain_state_data_dict):
domain.__state_data = all_domain_state_data_dict[domain.key().name()]
all_domains.sort(key=lambda domain: (0 if domain.__state_data.plan_expiration is None else 1, domain.__state_data.plan_expiration, 0 if domain.__state_data.trial_expiration is None else 1, domain.__state_data.trial_expiration, domain.domain))
for domain in all_domains:
if (domain.__state_data.plan_expiration is None):
domain_plan_expiration = "No plan expiration date"
else:
domain_plan_expiration = domain.__state_data.plan_expiration.strftime('%Y-%m-%d')
if (domain.__state_data.trial_expiration is None):
domain_trial_expiration = "No trial expiration date"
else:
domain_trial_expiration = domain.__state_data.trial_expiration.strftime('%Y-%m-%d')
writer.writerow([domain.domain, domain.cur_state.name, domain_plan_expiration, domain_trial_expiration, domain.admin])
app = webapp2.WSGIApplication([
("/csv/export_all_domains_to_csv", ExportAllDomainsToCsvHandler)
], debug=True)
Short Description
Using a local (sqlite3) database I can save and extract information, however when using my production sever (postgresql 8.4 on a different machine) Django throws a "can't adapt type" error. Can someone help point me in a good direction to start debugging this?
Background
The python project uses Django 1.3 for an ORM to multiple databases (production and debug). The multiple database connection works on in all cases but this one. In searching SO and Google, most of these errors were fixed by re-running syncdb. I have done this on both my production and debug server and still get the same error. After looking at the models, accessors and mutators I cannot see what is different about this function. I have included the suspect functions, my model, and error message. Any help would be greatly appreciated. I'll be glad to post any other, non sensitive, information needed.
System Information
* Postgresql 8.4 - Ubuntu Server
* Django Project (Client / Users)- Windows XP (as shown), Mac OSX, Windows 7 (64bit)
* Python 2.7.1
* Psycopg2
* Django 1.3
Models.py
from django.db import models
# Create your models here.
class Card_Test(models.Model):
name = models.TextField(max_length=100)
description = models.TextField(max_length=200)
units = models.TextField(max_length=500)
result_tags = models.TextField(max_length=500)
def __unicode__(self):
return self.name
class Status_Type(models.Model):
status = models.CharField(max_length=25)
def __unicode__(self):
return self.status
class Card_Test_List(models.Model):
card_id = models.ForeignKey('Card')
card_test_id = models.ForeignKey('Card_Test')
card_test_sub_id = models.PositiveIntegerField()
status = models.ForeignKey('Status_Type')
result = models.TextField()
ran_on = models.DateField()
ran_by = models.CharField(max_length=50)
run_number = models.PositiveIntegerField()
def __unicode__(self):
return self.card_test_id.__unicode__()
class Card_Type(models.Model):
card_type = models.CharField(max_length=25)
def __unicode__(self):
return self.card_type
class Card(models.Model):
serial_number = models.CharField(max_length=25)
card_type = models.ForeignKey('Card_Type')
status = models.ForeignKey('Status_Type')
card_tests = models.ManyToManyField('Card_Test', through='Card_Test_List')
def __unicode__(self):
return self.serial_number
def print_all_cards(self):
print Card.objects.all()
class System_Test_Type(models.Model):
test_type = models.CharField(max_length=25)
def __unicode__(self):
return self.test_type
class System_Test(models.Model):
name = models.CharField(max_length=100)
description = models.CharField(max_length=100)
units = models.CharField(max_length=100)
def __unicode__(self):
return self.name
class System_Test_List(models.Model):
chassis_id = models.ForeignKey('Chassis')
system_test_id = models.ForeignKey('System_Test')
result = models.CharField(max_length=25)
ran_on = models.DateField()
ran_by = models.CharField(max_length=50)
test_type = models.ForeignKey('System_Test_Type')
def __unicode__(self):
return self.system_test_id.__unicode__()
class Chassis(models.Model):
serial_number = models.CharField(max_length=25)
slot_311 = models.ForeignKey(Card, related_name='slot1_card')
slot_175 = models.ForeignKey(Card, related_name='slot2_card')
slot_345 = models.ForeignKey(Card, related_name='slot3_card')
slot_346 = models.ForeignKey(Card, related_name='slot4_card')
slot_344 = models.ForeignKey(Card, related_name='slot5_card')
slot_178 = models.ForeignKey(Card, related_name='slot6_card')
backplane_serial = models.TextField(max_length=25)
site_location = models.TextField(max_length=25)
status_env_70c = models.ForeignKey('Status_Type', related_name='70C')
status_env_10c = models.ForeignKey('Status_Type', related_name='10C')
status_assembly = models.ForeignKey('Status_Type', related_name='assembly')
status_final = models.ForeignKey('Status_Type', related_name='final')
system_test = models.ManyToManyField(System_Test, through='System_Test_List')
def __unicode__(self):
return self.serial_number
Settings.py Excerpt
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'production', # Or path to database file if using sqlite3.
'USER': 'tester', # Not used with sqlite3.
'PASSWORD': 'xxxxxx', # Not used with sqlite3.
'HOST': '10.10.100.30', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
},
'debug': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': curdir + '/mux_db',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
Accssors / Mutators
Works on both debug and production database
def get_chassis_by_sn(chassis_id):
try:
return Chassis.objects.using(get_database()).filter(serial_number__iexact=chassis_id)
except Chassis.DoesNotExist:
return []
Only works on debug database. See inline comments on the line if fails on
def modify_chassis_sn(current, new):
#chassis = Chassis.objects.using(get_database()).filter(serial_number__iexact=current)
try:
chassis = Chassis.objects.using(get_database()).filter(serial_number__iexact=current)
except Chassis.DoesNotExist:
err.Handle_MUX_Error('Unknown Chassis / Serial Number')
return False
chassis = chassis[0] # <--- Fails here
# Update the serial number
chassis.serial_number = new
chassis.save(using=get_database())
return True
def modify_chassis_record(sn, slots=None, bp_sn=None, site_loc=None ):
#chassis = Chassis.objects.using(get_database()).filter(serial_number__iexact=sn)
try:
c = Chassis.objects.using(get_database()).filter(serial_number__iexact=sn)
print len(c)
except Chassis.DoesNotExist:
err.Handle_MUX_Error('Unknown Chassis / Serial Number')
return False
chassis = c[0] # <--- Fails here
# Update the List of Cards (if provided)
if slots != None:
if len(slots) != 6:
err.Handle_MUX_Error('You must enter 6 serial numbers!')
return False
index = 0
db_slots = [None]*6
for slot in slots:
s = get_card_by_sn(slot)
if len(s) == 0:
err.Handle_MUX_Error('Slot ' + str(index) + ' serial number does not exist!')
return False
db_slots[index] = s[0]
index +=1
chassis.slot_311 = db_slots[0]
chassis.slot_175 = db_slots[1]
chassis.slot_345 = db_slots[2]
chassis.slot_346 = db_slots[3]
chassis.slot_344 = db_slots[4]
chassis.slot_178 = db_slots[5]
# Update the Backplane Serial Number (if provided)
if bp_sn != None:
chassis.backplane_serial = bp_sn
# Update the site Location (if provided)
if site_loc != None:
chassis.site_location = site_loc
# Save all changes to the chassis
print chassis
chassis.save(using=get_database())
return True
Error Message
Traceback (most recent call last):
File "C:\Documents and Settings\User\Desktop\Python\exe\wx_gui\Assembly_Panel.py", line 276, in SaveChassis
mux_api.modify_chassis.run(active_chassis, new_chassis_id, bp_sn=backplane_sn)
File "C:\Documents and Settings\User\Desktop\Python\exe\api_rehabilitation_suite\modify_chassis.py", line 10, in run
modify_status = db.modify_chassis_record(sn, slots=slots, bp_sn=bp_sn, site_loc=site_loc )
File "C:\Documents and Settings\User\Desktop\Python\mux_test_data\db_driver.py", line 136, in modify_chassis_record
print len(c)
File "C:\Python27\lib\site-packages\django\db\models\query.py", line 82, in __len__
self._result_cache = list(self.iterator())
File "C:\Python27\lib\site-packages\django\db\models\query.py", line 273, in iterator
for row in compiler.results_iter():
File "C:\Python27\lib\site-packages\django\db\models\sql\compiler.py", line 680, in results_iter
for rows in self.execute_sql(MULTI):
File "C:\Python27\lib\site-packages\django\db\models\sql\compiler.py", line 735, in execute_sql
cursor.execute(sql, params)
File "C:\Python27\lib\site-packages\django\db\backends\util.py", line 34, in execute
return self.cursor.execute(sql, params)
File "C:\Python27\lib\site-packages\django\db\backends\postgresql_psycopg2\base.py", line 44, in execute
return self.cursor.execute(query, args)
django.db.utils.DatabaseError: can't adapt type 'Chassis'
Turns out that sqlite3 ignores and or tries to convert a value to a string when using it in a Django filter. From the GUI I was passing in the actual chassis object rather than the serial number. Postgresql threw the error when trying to use a Chassis object as a serial number while sqlite3 ignored it and cast it as a string (which I defined in Django as the serial number).
I hope this discovery might help someone else when trying to debug this type of error.
Thanks for all who looked into this.
I'm seeking a way to let the python logger module to log to database and falls back to file system when the db is down.
So basically 2 things: How to let the logger log to database and how to make it fall to file logging when the db is down.
I recently managed to write my own database logger in Python. Since I couldn't find any example I thought I post mine here. Works with MS SQL.
Database table could look like this:
CREATE TABLE [db_name].[log](
[id] [bigint] IDENTITY(1,1) NOT NULL,
[log_level] [int] NULL,
[log_levelname] [char](32) NULL,
[log] [char](2048) NOT NULL,
[created_at] [datetime2](7) NOT NULL,
[created_by] [char](32) NOT NULL,
) ON [PRIMARY]
The class itself:
class LogDBHandler(logging.Handler):
'''
Customized logging handler that puts logs to the database.
pymssql required
'''
def __init__(self, sql_conn, sql_cursor, db_tbl_log):
logging.Handler.__init__(self)
self.sql_cursor = sql_cursor
self.sql_conn = sql_conn
self.db_tbl_log = db_tbl_log
def emit(self, record):
# Set current time
tm = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(record.created))
# Clear the log message so it can be put to db via sql (escape quotes)
self.log_msg = record.msg
self.log_msg = self.log_msg.strip()
self.log_msg = self.log_msg.replace('\'', '\'\'')
# Make the SQL insert
sql = 'INSERT INTO ' + self.db_tbl_log + ' (log_level, ' + \
'log_levelname, log, created_at, created_by) ' + \
'VALUES (' + \
'' + str(record.levelno) + ', ' + \
'\'' + str(record.levelname) + '\', ' + \
'\'' + str(self.log_msg) + '\', ' + \
'(convert(datetime2(7), \'' + tm + '\')), ' + \
'\'' + str(record.name) + '\')'
try:
self.sql_cursor.execute(sql)
self.sql_conn.commit()
# If error - print it out on screen. Since DB is not working - there's
# no point making a log about it to the database :)
except pymssql.Error as e:
print sql
print 'CRITICAL DB ERROR! Logging to database not possible!'
And usage example:
import pymssql
import time
import logging
db_server = 'servername'
db_user = 'db_user'
db_password = 'db_pass'
db_dbname = 'db_name'
db_tbl_log = 'log'
log_file_path = 'C:\\Users\\Yourname\\Desktop\\test_log.txt'
log_error_level = 'DEBUG' # LOG error level (file)
log_to_db = True # LOG to database?
class LogDBHandler(logging.Handler):
[...]
# Main settings for the database logging use
if (log_to_db):
# Make the connection to database for the logger
log_conn = pymssql.connect(db_server, db_user, db_password, db_dbname, 30)
log_cursor = log_conn.cursor()
logdb = LogDBHandler(log_conn, log_cursor, db_tbl_log)
# Set logger
logging.basicConfig(filename=log_file_path)
# Set db handler for root logger
if (log_to_db):
logging.getLogger('').addHandler(logdb)
# Register MY_LOGGER
log = logging.getLogger('MY_LOGGER')
log.setLevel(log_error_level)
# Example variable
test_var = 'This is test message'
# Log the variable contents as an error
log.error('This error occurred: %s' % test_var)
Above will log both to the database and to the file. If file is not needed - skip the 'logging.basicConfig(filename=log_file_path)' line. Everything logged using 'log' - will be logged as MY_LOGGER. If some external error appears (i.e. in the module imported or something) - error will appear as 'root', since 'root' logger is also active, and is using the database handler.
Write yourself a handler that directs the logs to the database in question. When it fails, you can remove it from the handler list of the logger. There are many ways to deal with the failure-modes.
Python logging to a database with a backup logger
Problem
I had the same problem when I ran a Django project inside the server since sometimes you need to check the logs remotely.
Solution
First, there is a need for a handler for the logger to insert logs in to the database. Before that and since my SQL is not good, an ORM is needed that I choose SQLAlchemy.
model:
# models.py
from sqlalchemy import Column, Integer, String, DateTime, Text
from sqlalchemy.ext.declarative import declarative_base
import datetime
base = declarative_base()
class Log(base):
__tablename__ = "log"
id = Column(Integer, primary_key=True, autoincrement=True)
time = Column(DateTime, nullable=False, default=datetime.datetime.now)
level_name = Column(String(10), nullable=True)
module = Column(String(200), nullable=True)
thread_name = Column(String(200), nullable=True)
file_name = Column(String(200), nullable=True)
func_name = Column(String(200), nullable=True)
line_no = Column(Integer, nullable=True)
process_name = Column(String(200), nullable=True)
message = Column(Text)
last_line = Column(Text)
This is the crud for insertion into the database:
#crud.py
import sqlalchemy
from .models import base
from traceback import print_exc
class Crud:
def __init__(self, connection_string=f'sqlite:///log_db.sqlite3',
encoding='utf-8',
pool_size=10,
max_overflow=20,
pool_recycle=3600):
self.connection_string = connection_string
self.encoding = encoding
self.pool_size = pool_size
self.max_overflow = max_overflow
self.pool_recycle = pool_recycle
self.engine = None
self.session = None
def initiate(self):
self.create_engine()
self.create_session()
self.create_tables()
def create_engine(self):
self.engine = sqlalchemy.create_engine(self.connection_string)
def create_session(self):
self.session = sqlalchemy.orm.Session(bind=self.engine)
def create_tables(self):
base.metadata.create_all(self.engine)
def insert(self, instances):
try:
self.session.add(instances)
self.session.commit()
self.session.flush()
except:
self.session.rollback()
raise
def __del__(self):
self.close_session()
self.close_all_connections()
def close_session(self):
try:
self.session.close()
except:
print_exc()
else:
self.session = None
def close_all_connections(self):
try:
self.engine.dispose()
except:
print_exc()
else:
self.engine = None
The handler:
# handler.py
from logging import Handler, getLogger
from traceback import print_exc
from .crud import Crud
from .models import Log
my_crud = Crud(
connection_string=<connection string to reach your db>,
encoding='utf-8',
pool_size=10,
max_overflow=20,
pool_recycle=3600)
my_crud.initiate()
class DBHandler(Handler):
backup_logger = None
def __init__(self, level=0, backup_logger_name=None):
super().__init__(level)
if backup_logger_name:
self.backup_logger = getLogger(backup_logger_name)
def emit(self, record):
try:
message = self.format(record)
try:
last_line = message.rsplit('\n', 1)[-1]
except:
last_line = None
try:
new_log = Log(module=record.module,
thread_name=record.threadName,
file_name=record.filename,
func_name=record.funcName,
level_name=record.levelname,
line_no=record.lineno,
process_name=record.processName,
message=message,
last_line=last_line)
# raise
my_crud.insert(instances=new_log)
except:
if self.backup_logger:
try:
getattr(self.backup_logger, record.levelname.lower())(record.message)
except:
print_exc()
else:
print_exc()
except:
print_exc()
Test to check the logger:
# test.py
from logging import basicConfig, getLogger, DEBUG, FileHandler, Formatter
from .handlers import DBHandler
basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
datefmt='%d-%b-%y %H:%M:%S',
level=DEBUG)
format = Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
backup_logger = getLogger('backup_logger')
file_handler = FileHandler('file.log')
file_handler.setLevel(DEBUG)
file_handler.setFormatter(format)
backup_logger.addHandler(file_handler)
db_logger = getLogger('logger')
db_handler = DBHandler(backup_logger_name='backup_logger')
db_handler.setLevel(DEBUG)
db_handler.setFormatter(format)
db_logger.addHandler(db_handler)
if __name__ == "__main__":
db_logger.debug('debug: hello world!')
db_logger.info('info: hello world!')
db_logger.warning('warning: hello world!')
db_logger.error('error: hello world!')
db_logger.critical('critical: hello world!!!!')
You can see the handler accepts a backup logger that can use it when the database insertion fails.
A good improvement can be logging into the database by threading.
I am digging this out again.
There is a solution with SqlAlchemy (Pyramid is NOT required for this recipe):
https://docs.pylonsproject.org/projects/pyramid-cookbook/en/latest/logging/sqlalchemy_logger.html
And you could improve logging by adding extra fields, here is a guide: https://stackoverflow.com/a/17558764/1115187
Fallback to FS
Not sure that this is 100% correct, but you could have 2 handlers:
database handler (write to DB)
file handler (write to file or stream)
Just wrap the DB-commit with a try-except. But be aware: the file will contain ALL log entries, but not only entries for which DB saving was failed.
Old question, but dropping this for others. If you want to use python logging, you can add two handlers. One for writing to file, a rotating file handler. This is robust, and can be done regardless if the dB is up or not.
The other one can write to another service/module, like a pymongo integration.
Look up logging.config on how to setup your handlers from code or json.