I wrote a small CLI todo app in Docopt but when I run it python t.py I get this exception at the end, everything seems to work fine though. and when I pass a command to the app I get no exceptions at all. One more thing, If I remove the __del__ method no exception appears but I thing we need to close the sqlite db connection. Any suggestions?
Exception AttributeError: "'Todo' object has no attribute 'db'" in <bound method Todo.__del__ of <__main__.Todo object at 0x1038dac50>> ignored
App code:
"""t, a unix command-line todo application
Usage:
t add <task>
t check <id>
t uncheck <id>
t clear
t ls [--all]
t -h | --help
t --version
Commands:
add Add a new task
check Check a new task as done
uncheck Uncheck a task as done
clear Refresh the database
ls List all tasks
Options:
-h --help Show this screen.
--version Show version.
--all List all tasks
"""
import sqlite3
import os
import datetime
from docopt import docopt
from termcolor import colored
from prettytable import PrettyTable
SMILEY = "\xF0\x9F\x98\x83" # Smiley emoji
GRIN = "\xF0\x9F\x98\x81" # Grin face emoji
def echo(msg, err=False):
"""
A simple function for printing to terminal with colors and emoji's
"""
if err:
print colored(msg + " " + GRIN, "red")
else:
print colored(msg + " " + SMILEY, "cyan")
class Todo(object):
def __init__(self):
"""
Set up the db and docopt upon creation of object
"""
self.arg = docopt(__doc__, version=0.10)
# Create a path to store the database file
db_path = os.path.expanduser("~/")
self.db_path = db_path + "/" + ".t-db"
self.init_db()
def init_db(self):
self.db = sqlite3.connect(self.db_path)
self.cursor = self.db.cursor()
self.cursor.execute('''
CREATE TABLE IF NOT EXISTS todo(id INTEGER PRIMARY KEY, task TEXT,
done INT, date_added TEXT, date_completed TEXT)
''')
self.db.commit()
def run(self):
"""
Parse the arg's using docopt and route to the respoctive methods
"""
if self.arg['add']:
self.add_task()
elif self.arg['check']:
self.check_task()
elif self.arg['uncheck']:
self.uncheck_task()
elif self.arg['clear']:
self.clear_task()
else:
if self.arg['--all']:
self.list_task()
else:
self.list_pending_tasks()
def _record_exists(self, id):
"""
Checks if the record exists in the db
"""
self.cursor.execute('''
SELECT * FROM todo WHERE id=?
''', (id,))
record = self.cursor.fetchone()
if record is None:
return False
return True
def _is_done(self, id):
"""
Checks if the task has already been marked as done
"""
self.cursor.execute('''
SELECT done FROM todo WHERE id=?
''', (id,))
record = self.cursor.fetchone()
if record == 0:
return False
return True
def add_task(self):
"""
Add a task todo to the db
"""
task = self.arg['<task>']
date = datetime.datetime.now()
date_now = "%s-%s-%s" % (date.day, date.month, date.year)
self.cursor.execute('''
INSERT INTO todo(task, done, date_added)
VALUES (?, ?, ?)
''', (str(task), 0, date_now))
self.db.commit()
echo("The task has been been added to the list")
def check_task(self):
"""
Mark a task as done
"""
task_id = self.arg['<id>']
date = datetime.datetime.now()
date_now = "%s-%s-%s" % (date.day, date.month, date.year)
if self._record_exists(task_id):
self.cursor.execute('''
UPDATE todo SET done=?, date_completed=? WHERE Id=?
''', (1, date_now, int(task_id)))
echo("Task %s has been marked as done" % str(task_id))
self.db.commit()
else:
echo("Task %s doesn't exist" % (str(task_id)), err=True)
def uncheck_task(self):
"""
Mark as done task as undone
"""
task_id = self.arg['<id>']
if self._record_exists(task_id):
self.cursor.execute('''
UPDATE todo SET done=? WHERE id=?
''', (0, int(task_id)))
echo("Task %s has been unchecked" % str(task_id))
self.db.commit()
else:
echo("Task %s doesn't exist" % str(task_id), err=True)
def list_task(self):
"""
Display all tasks in a table
"""
tab = PrettyTable(["Id", "Task Todo", "Done ?", "Date Added",
"Date Completed"])
tab.align["Id"] = "l"
tab.padding_width = 1
self.cursor.execute('''
SELECT id, task, done, date_added, date_completed FROM todo
''')
records = self.cursor.fetchall()
for each_record in records:
if each_record[2] == 0:
done = "Nop"
else:
done = "Yup"
if each_record[4] is None:
status = "Pending..."
else:
status = each_record[4]
tab.add_row([each_record[0], each_record[1], done,
each_record[3], status])
print tab
def list_pending_tasks(self):
"""
Display all pending tasks in a tabular form
"""
tab = PrettyTable(["Id", "Task Todo", "Date Added"])
tab.align["Id"] = "l"
tab.padding_width = 1
self.cursor.execute('''
SELECT id, task, date_added FROM todo WHERE done=?
''', (int(0),))
records = self.cursor.fetchall()
for each_record in records:
tab.add_row([each_record[0], each_record[1], each_record[2]])
print tab
def clear_task(self):
"""
Delete the table to refresh the app
"""
self.cursor.execute('''
DROP TABLE todo
''')
self.db.commit()
def __del__(self):
self.db.close()
def main():
"""
Entry point for console script
"""
app = Todo()
app.run()
if __name__ == "__main__":
main()
My debugging session tells me that docopt immediately bails out if it can't parse the given options (in your case, for example when no options at all are given).
So in your __init__, before self.init_db() gets called to set up self.db, docopt() is called, fails to parse the (not) given options and immediately tries to do something like exit(1) (I'm guessing here), which then in turn tries to tear down the Todo-object via the __del__-method, but the self.db member variable is not there yet.
So the "best" fix would probably be to set up the database before calling docopt, or to tell docopt that no options are OK as well.
Avoid the use of __del__. If you want to be sure all is closed, I suggest you explicitly call self.db.close() in/after your run method , alternatively register the close using atexit module, see also this similar post
Related
I'm working on a project very similar to this one: GitHub
I have a class:
class DBfunctions:
def __init__(self, dbname = '../example.db'):
self.debname = dbname
self.conn = sqlite3.connect(dbname)
def search_db(self, telegram_id):
telegram_id = (telegram_id,)
sql = 'SELECT * FROM user WHERE id = ?;'
row = self.conn.execute(sql,telegram_id)
return row
def newuser_db(self, tele_id, name, nick):
par = (tele_id, name, nick, 0)
sql = 'INSERT INTO user VALUES(?,?,?,?);'
self.conn.execute(sql, par)
self.conn.commit()
than i have the main project:
from file import DBfunctions
db = DBfunction()
def start(update: Update, context: CallbackContext): #befor edit: somethingtodo
flag = db.search_db(update.effective_user.id) # here problems start
if flag == None:
db.newuser_db(update.effective_user.id, update.effective_user.first_name, update.effective_user.username)
update.message.reply_text(
'Hi!',
reply_markup=markup,
)
else:
update.message.reply_text(
'Hey! Welcome back!',
reply_markup=markup,
)
def main():
db.setup() # this function is to create tables if not exist yet
dispatcher.add_handler(CommandHandler('start', start))
# other function but nothing correlated
if __name__ == '__main__':
main()
And than the error appears:
File "filefolder/file.py", line 29, in search_db
row = self.conn.execute(sql,telegram_id)
sqlite3.ProgrammingError: SQLite objects created in a thread can only be used in that same thread. The object was created in thread id 15004 and this is thread id 11036.
I can't figure out what i can do to fix it... and don't understand what is different from the project that I find on github (linked)
The following is a slightly censcored Python 3.7 program which apparently goes to sleep (0% cpu but still there according to htop) after a couple hours of running on Linux on my RaspberryPi. I've programmed it to react to STRG+C, after going to sleep, it still reacts to STRG+C, but I guess that still means that it's firmly asleep. It hasn't gained any memory since it started, still at 22MB,
also I've used a profiler to check that there aren't any memory leaks whatsoever.
The cursor is at the next line and the program just doesn't react anymore...
import mysql.connector, time, signal, requests
from mysql.connector import errorcode
USER = ...
PASSWORD = ...
HOST = ...
DATABASE = ...
RUNNING = True
def signal_handler(signum, frame):
global RUNNING
signal.signal(signum, signal.SIG_IGN) # ignore additional signals
print('Received shutdown signal')
print('Treating remaining players as if they just went offline...')
RUNNING = False
def time_string(timestamp, forString = False, brackets = False):
hours = int((timestamp // 3600 + (0 if forString else 2)) % 24)
minutes = int(timestamp // 60 % 60)
seconds = int(timestamp % 60)
shours = ('0' if hours < 10 else '') + str(hours)
sminutes = ('0' if minutes < 10 else '') + str(minutes)
sseconds = ('0' if seconds < 10 else '') + str(seconds)
ret = shours+':'+sminutes+':'+sseconds
if brackets:
ret = '['+ret+']'
return ret
def main():
global USER, PASSWORD, HOST, DATABASE
start_timestamp = None
end_timestamp = None
registeredWhen = dict()
startedOnline = dict()
startedPlaying = dict()
try:
conn = mysql.connector.connect(user=USER, password=PASSWORD, host=HOST, database=DATABASE)
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
conn.close()
return
conn.autocommit = True
cursor = conn.cursor(prepared=True)
cursor.execute(
"""
CREATE TABLE IF NOT EXISTS recording_periods(
player TEXT,
start INTEGER,
end INTEGER
)
"""
)
cursor.execute(
"""
CREATE TABLE IF NOT EXISTS online_periods(
player TEXT,
start INTEGER,
end INTEGER
)
"""
)
cursor.execute(
"""
CREATE TABLE IF NOT EXISTS playing_periods(
player TEXT,
start INTEGER,
end INTEGER
)
"""
)
cursor.execute(
"""
CREATE TABLE IF NOT EXISTS players_to_record(
player TEXT
)
"""
)
signal.signal(signal.SIGINT, signal_handler)
last_timestamp = time.time()-1
timestamp = last_timestamp
start_timestamp = None
end_timestamp = None
while RUNNING or [] != [x for x in startedOnline if startedOnline[x] != None] or [] != [x for x in startedPlaying if startedPlaying[x] != None]:
timestamp = int(time.time())
while timestamp == last_timestamp:
time.sleep(0.01)
timestamp = int(time.time())
cursor.execute("""SELECT player FROM players_to_record""")
nplayers = set([x[0].decode().lower() for x in cursor.fetchall()])
added = set([player for player in nplayers if player not in registeredWhen])
removed = set([player for player in registeredWhen if player not in nplayers])
for player in added:
print('Registered player '+player)
registeredWhen[player] = timestamp
startedOnline[player] = None
startedPlaying[player] = None
info = []
for player in removed:
print('Unregistered player '+player)
info.append((player, registeredWhen[player], end_timestamp))
registeredWhen.pop(player)
startedOnline.pop(player)
startedPlaying.pop(player)
cursor.executemany("""INSERT INTO recording_periods VALUES (?, ?, ?)""", info)
url = 'https://lichess.org/api/users/status?ids='
for player in registeredWhen:
url = url + player +','
url = url[:-1]
try:
resp = requests.get(url)
data = resp.json()
resp.close()
ts = time_string(timestamp, brackets=True)
tsnb = time_string(timestamp, brackets=False)
ronline = []
rplaying = []
for user in data:
name_id = user['id']
name = user['name']
online = 'online' in user and user['online']
playing = 'playing' in user and user['playing']
if start_timestamp == None:
start_timestamp = timestamp
end_timestamp = timestamp
if startedOnline[name_id] == None and online and RUNNING:
startedOnline[name_id] = timestamp
print(ts,name,'went online')
elif startedOnline[name_id] != None and ((not online) or not RUNNING):
ronline.append((name_id, startedOnline[name_id], timestamp))
st = time_string(startedOnline[name_id])
dt = time_string(timestamp - startedOnline[name_id], forString=True)
print(ts,name,'went offline, was online from',st,'to',tsnb,'for',dt)
startedOnline[name_id] = None
if startedPlaying[name_id] == None and playing and RUNNING:
startedPlaying[name_id] = timestamp
print(ts,name,'started playing')
elif startedPlaying[name_id] != None and ((not playing) or not RUNNING):
rplaying.append((name_id, startedPlaying[name_id], timestamp))
st = time_string(startedPlaying[name_id])
dt = time_string(timestamp - startedPlaying[name_id], forString=True)
print(ts,name,'stopped playing, was playing from',st,'to',tsnb,'for',dt)
startedPlaying[name_id] = None
cursor.executemany("""
INSERT INTO online_periods
VALUES(?, ?, ?)
""", ronline
)
cursor.executemany("""
INSERT INTO playing_periods
VALUES(?, ?, ?)
""", rplaying
)
except Exception as ex:
template = "An exception of type {0} occurred. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
print (message)
time.sleep(1)
st = time_string(start_timestamp)
et = time_string(end_timestamp)
dt = time_string(end_timestamp - start_timestamp, forString=True)
print('Recorded from',st,'to',et,'for',dt)
print('Writing recording periods to the database...')
cursor.executemany("""
INSERT INTO recording_periods
VALUES(?, ?, ?)
""", [(player, registeredWhen[player], end_timestamp) for player in registeredWhen]
)
print('Committing ...')
conn.commit()
print('Closing database connection...')
cursor.close()
conn.close()
print('Exit program...')
if __name__ == '__main__':
main()
Could it be the terminal? That it's not suited for running for long hours?
But I'd like to use a terminal.
Since my program prints when encountering an error but the terminal doesn't print any error messages,
my guess would be that it's either stuck in the database queries or the requests at Lichess.org.
Our router/Lichess restart from time to time, the databaseServer is up non-stop and has no other program accessing it (which might block the programs query). It's a MariaDB running on the same Raspberry Pi.
I've no idea how and why it gets stuck, please tell me that you guys do!
Thanks in advance!
I've figured it out now,
it's because there's no timeout in requests.get() be default.
When the router restarts while your requests.get() is running, it'll just wait forever.
Fixed it by adding timeout=10. What were the creators of that library thinking??
While trying to update a dns record in R53 using boto i get the following error:
Traceback (most recent call last):
File "testing.py", line 106, in <module>
updateDns(load_balancer_dns)
File "testing.py", line 102, in updateDns
change.commit()
File "/usr/lib/python2.6/site-packages/boto/route53/record.py", line 149, in commit
return self.connection.change_rrsets(self.hosted_zone_id, self.to_xml())
File "/usr/lib/python2.6/site-packages/boto/route53/connection.py", line 320, in change_rrsets
body)
boto.route53.exception.DNSServerError: DNSServerError: 505 HTTP Version Not Supported
The following is the function i use to update the dns entry:
def updateDns(load_balancer_dns):
r53 = boto.route53.connection.Route53Connection(aws_access_key_id=<access_key>,aws_secret_access_key=<secret_key>)
zone_id = r53.get_hosted_zone_by_name(<domain_name>)
print zone_id
change = boto.route53.record.ResourceRecordSets(connection=r53,hosted_zone_id=zone_id)
change.add_change_record("UPSERT", boto.route53.record.Record(name=<name>, type="CNAME", resource_records=load_balancer_dns, ttl=300))
change.commit()
print "record changed"
return None
updateDns(load_balancer_dns)
Anyone else ran into such issues earlier ?
Since this doesn't truly have an authoritative answer here's a working script I have just cobbled together:
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function
from time import sleep
import boto
def main():
"""Entrypoint."""
r53 = boto.connect_route53()
zones = r53.get_all_hosted_zones()['ListHostedZonesResponse']['HostedZones']
private_zone = find_private_zone('sub.mydomain.com.', zones)
reverse_zone = find_private_zone('100.10.in-addr.arpa.', zones)
upsert_record(r53, private_zone, 'dangus.sub.mydomain.com.', '127.0.0.1', 'A', wait=True)
delete_record(r53, private_zone, 'dangus.sub.mydomain.com.', '127.0.0.1', 'A', wait=True)
def find_private_zone(name, zones):
for zone in zones:
if zone.get('Name') == name and zone.get('Config', {}).get('PrivateZone') in [True, 'true']:
return zone
return None
def find_record(r53, zone_id, name, record_type):
records = r53.get_all_rrsets(zone_id)
for record in records:
if record.name == name and record.type == record_type:
return record
return None
def upsert_record(r53, zone, name, record, record_type, ttl=60, wait=False):
print("Inserting record {}[{}] -> {}; TTL={}".format(name, record_type, record, ttl))
recordset = boto.route53.record.ResourceRecordSets(connection=r53, hosted_zone_id=zone.get('Id').split('/')[-1])
recordset.add_change_record('UPSERT', boto.route53.record.Record(
name=name,
type=record_type,
resource_records=[record],
ttl=ttl
))
changeset = recordset.commit()
change_id = changeset['ChangeResourceRecordSetsResponse']['ChangeInfo']['Id'].split('/')[-1]
while wait:
status = r53.get_change(change_id)['GetChangeResponse']['ChangeInfo']['Status']
if status == 'INSYNC':
break
sleep(10)
def delete_record(r53, zone, name, record, record_type, wait=False):
print("Deleting record {}[{}] -> {}".format(name, record_type, record))
zone_id = zone.get('Id').split('/')[-1]
record = find_record(r53, zone_id, name, record_type)
if not record:
print("No record exists.")
return
recordset = boto.route53.record.ResourceRecordSets(connection=r53, hosted_zone_id=zone.get('Id').split('/')[-1])
recordset.add_change_record('DELETE', record)
changeset = recordset.commit()
change_id = changeset['ChangeResourceRecordSetsResponse']['ChangeInfo']['Id'].split('/')[-1]
while wait:
status = r53.get_change(change_id)['GetChangeResponse']['ChangeInfo']['Status']
if status == 'INSYNC':
break
sleep(10)
if __name__ == "__main__":
main()
Unfortunately, none of the APIs that I have worked with have a good Route 53 implementation and you eventually have to use these dictionary lookups to look into the XML actually returned by the service.
Some gotchas:
Always use the FQDN which means every record should end with a dot.
You can't just fetch the hosted zone you're looking for, you need to get all of them and search for the one that you're looking for (ie: a private zone matching the given name).
You have to parse out the ID from your hosted zone.
You have to parse out the ID from your change sets.
When deleting entries, you must first capture the current state of the record if it exists at all, and then send that value to Route 53 as part of your deletion change.
This makes the API really aggravating to use, but at least it solves RFC-1925 rule number 1: it works.
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# src: https://stackoverflow.com/a/47166985/65706
# courtesy of: Naftuli Kay
#
from __future__ import absolute_import, print_function
from time import sleep
import argparse
import boto
import pprint
import sys
def set_vars():
usage_examples = '''
clear ; poetry run python process_dns.py --action upsert --dns-zone cgfinics.com --dns-name dev.cgfinics.com --dns-value 168.10.172.10 --record-type A
clear ; poetry run python process_dns.py --action upsert --dns-zone cgfinics.com --dns-name www.dev.cgfinics.com --dns-value www.dev.cgfinics.com --record-type CNAME
clear ; poetry run python process_dns.py --action delete --dns-zone cgfinics.com --dns-name dev.cgfinics.com
'''
parser = argparse.ArgumentParser('A quick and dirty DNS upsert to aws with boto \n\n' + usage_examples)
parser.add_argument('--action', required=True, nargs='?',
help="The action to perform - upsert or delete ")
parser.add_argument('--dns-zone', required=True, nargs='?',
help="The DNS zone to process ")
parser.add_argument('--dns-name', required=True, nargs='?',
help="The DNS name to process ")
parser.add_argument('--dns-value', required=False, nargs='?',
help="The DNS value to process ")
parser.add_argument('--record-type', required=True, nargs='?',
help="The DNS record type - could be A, CNAME ")
args = parser.parse_args()
return args
def main():
"""Entrypoint."""
args = set_vars()
action = args.action
r53 = boto.connect_route53()
zones = r53.get_all_hosted_zones()['ListHostedZonesResponse']['HostedZones']
dns_zone = args.dns_zone + '.' if not args.dns_zone.endswith('.') else args.dns_zone
public_zone = find_public_zone(dns_zone , zones)
dns_name = args.dns_name + '.' if not args.dns_name.endswith('.') else args.dns_name
dns_value = args.dns_value
record_type = args.record_type
if action == "upsert":
upsert_record(r53, public_zone, dns_name, dns_value, record_type, wait=True)
sys.exit(0)
if action == "delete":
delete_record(r53, public_zone, dns_name, dns_value, record_type, wait=True)
sys.exit(0)
print("only the upser and delete actions are supported !!!")
sys.exit(1)
def find_public_zone(name, zones):
for zone in zones:
if zone.get('Name') == name and zone.get('Config', {}).get('PrivateZone') in [True, 'false']:
return zone
return None
def find_record(r53, zone_id, name, record_type):
records = r53.get_all_rrsets(zone_id)
for record in records:
if record.name == name and record.type == record_type:
return record
return None
def upsert_record(r53, zone, name, record, record_type, ttl=60, wait=False):
print("Inserting record {}[{}] -> {}; TTL={}".format(name, record_type, record, ttl))
recordset = boto.route53.record.ResourceRecordSets(connection=r53, hosted_zone_id=zone.get('Id').split('/')[-1])
recordset.add_change_record('UPSERT', boto.route53.record.Record(
name=name,
type=record_type,
resource_records=[record],
ttl=ttl
))
changeset = recordset.commit()
change_id = changeset['ChangeResourceRecordSetsResponse']['ChangeInfo']['Id'].split('/')[-1]
while wait:
status = r53.get_change(change_id)['GetChangeResponse']['ChangeInfo']['Status']
if status == 'INSYNC':
break
sleep(6)
def delete_record(r53, zone, name, record, record_type, wait=False):
print("Deleting record {}[{}] -> {}".format(name, record_type, record))
zone_id = zone.get('Id').split('/')[-1]
record = find_record(r53, zone_id, name, record_type)
if not record:
print("No record exists.")
return
recordset = boto.route53.record.ResourceRecordSets(connection=r53, hosted_zone_id=zone.get('Id').split('/')[-1])
recordset.add_change_record('DELETE', record)
changeset = recordset.commit()
change_id = changeset['ChangeResourceRecordSetsResponse']['ChangeInfo']['Id'].split('/')[-1]
while wait:
status = r53.get_change(change_id)['GetChangeResponse']['ChangeInfo']['Status']
if status == 'INSYNC':
break
sleep(10)
if __name__ == "__main__":
main()
I am having a similar issue to this, so as a "debug" exercise, I did a
print zone_id
I noticed that the object was a dict / JSON response, so I changed my code to
zone_id = self.r53.get_hosted_zone_by_name(self.domain).get("GetHostedZoneResponse").get("HostedZone").get("Id")
And this seemed to work for me - I am now getting a 403, but that at least should be easier to fix.
Disclaimer - new to Python, so not sure if this is actually the right way!
just for reference:
get the change_id, and then check the status with the boto connection (conn) until is says "INSYNC"
e.g.
def updateDns(load_balancer_dns):
r53 = boto.route53.connection.Route53Connection(aws_access_key_id=<access_key>,aws_secret_access_key=<secret_key>)
zone_id = r53.get_hosted_zone_by_name(<domain_name>)
print zone_id
change = boto.route53.record.ResourceRecordSets(connection=r53,hosted_zone_id=zone_id)
change.add_change_record("UPSERT", boto.route53.record.Record(name=<name>, type="CNAME", resource_records=load_balancer_dns, ttl=300))
_changes = change.commit()
change_id = _changes["ChangeResourceRecordSetsResponse"]["ChangeInfo"]["Id"].split("/")[-1]
while True:
status = r53.get_change(change_id)["GetChangeResponse"]["ChangeInfo"]["Status"]
if status == "INSYNC": break
sleep(10)
from pox.core import core
import pox.openflow.libopenflow_01 as of
import re
import datetime
from sqlalchemy import create_engine, ForeignKey
from sqlalchemy import Column, Date, Integer, String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship, backref
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.sql.expression import exists
log = core.getLogger()
engine = create_engine('sqlite:///nwtopology.db', echo=False)
Base = declarative_base()
Session = sessionmaker(bind=engine)
session = Session()
########################################################################
class SourcetoPort(Base):
""""""
__tablename__ = 'source_to_port'
id = Column(Integer, primary_key=True)
port_no = Column(Integer)
src_address = Column(String,index=True)
#----------------------------------------------------------------------
def __init__(self, src_address,port_no):
""""""
self.src_address = src_address
self.port_no = port_no
########################################################################
#create tables
Base.metadata.create_all(engine)
class Tutorial (object):
def __init__ (self, connection):
self.connection = connection
connection.addListeners(self)
# Use this table to keep track of which ethernet address is on
# which switch port (keys are MACs, values are ports).
self.mac_to_port = {}
self.matrix={}
#This will keep track of the traffic matrix.
#matrix[i][j]=number of times a packet from i went to j
def send_packet (self, buffer_id, raw_data, out_port, in_port):
#print "calling send_packet"
#Sends a packet out of the specified switch port.
msg = of.ofp_packet_out()
msg.in_port = in_port
msg.data = raw_data
# Add an action to send to the specified port
action = of.ofp_action_output(port = out_port)
msg.actions.append(action)
# Send message to switch
self.connection.send(msg)
def act_like_hub (self, packet, packet_in):
#flood packet on all ports
self.send_packet(packet_in.buffer_id, packet_in.data,
of.OFPP_FLOOD, packet_in.in_port)
def act_like_switch (self, packet, packet_in):
"""
Implement switch-like behavior.
"""
# Learn the port for the source MAC
#print "RECIEVED FROM PORT ",packet_in.in_port , "SOURCE ",packet.src
# create a Session
#Session = sessionmaker(bind=engine)
#session = Session()
self.mac_to_port[packet.src]=packet_in.in_port
#if self.mac_to_port.get(packet.dst)!=None:
#print "count for dst",session.query(SourcetoPort).filter_by(src_address=str(packet.dst)).count(),str(packet.dst)
#if session.query(SourcetoPort).filter_by(src_address=str(packet.dst)).count():
if session.query(exists().where(SourcetoPort.src_address == str(packet.dst))).scalar() is not None:
#send this packet
print "got info from the database"
q_res = session.query(SourcetoPort).filter_by(src_address=str(packet.dst)).one()
self.send_packet(packet_in.buffer_id, packet_in.data,q_res.port_no, packet_in.in_port)
#create a flow modification message
msg = of.ofp_flow_mod()
#set the fields to match from the incoming packet
msg.match = of.ofp_match.from_packet(packet)
#send the rule to the switch so that it does not query the controller again.
msg.actions.append(of.ofp_action_output(port=q_res.port_no))
#push the rule
self.connection.send(msg)
else:
#flood this packet out as we don't know about this node.
print "flooding the first packet"
self.send_packet(packet_in.buffer_id, packet_in.data,
of.OFPP_FLOOD, packet_in.in_port)
#self.matrix[(packet.src,packet.dst)]+=1
entry = SourcetoPort(src_address=str(packet.src) , port_no=packet_in.in_port)
#add the record to the session object
session.add(entry)
#add the record to the session object
session.commit()
def _handle_PacketIn (self, event):
"""
Handles packet in messages from the switch.
"""
packet = event.parsed # This is the parsed packet data.
if not packet.parsed:
log.warning("Ignoring incomplete packet")
return
packet_in = event.ofp # The actual ofp_packet_in message.
#self.act_like_hub(packet, packet_in)
self.act_like_switch(packet, packet_in)
def launch ():
"""
Starts the component
"""
def start_switch (event):
log.debug("Controlling %s" % (event.connection,))
Tutorial(event.connection)
core.openflow.addListenerByName("ConnectionUp", start_switch)
When I run the above code I get the following error:
The problem that I am facing is for some reason if I use
if session.query(exists().where(SourcetoPort.src_address == str(packet.dst))).scalar() is not None:
in place of count query.
#if session.query(SourcetoPort).filter_by(src_address=str(packet.dst)).count():
The querying from the database
q_res = session.query(SourcetoPort).filter_by(src_address=str(packet.dst)).first()
self.send_packet(packet_in.buffer_id, packet_in.data,q_res.port_no, packet_in.in_port)
is giving the following error:
DEBUG:core:POX 0.1.0 (betta) going up...
DEBUG:core:Running on CPython (2.7.3/Aug 1 2012 05:14:39)
DEBUG:core:Platform is Linux-3.5.0-23-generic-x86_64-with-Ubuntu-12.04-precise
INFO:core:POX 0.1.0 (betta) is up.
DEBUG:openflow.of_01:Listening on 0.0.0.0:6633
INFO:openflow.of_01:[00-00-00-00-00-02 1] connected
DEBUG:tutorial:Controlling [00-00-00-00-00-02 1]
got info from the database
ERROR:core:Exception while handling Connection!PacketIn...
Traceback (most recent call last):
File "/home/karthik/pox/pox/lib/revent/revent.py", line 234, in raiseEventNoErrors
return self.raiseEvent(event, *args, **kw)
File "/home/karthik/pox/pox/lib/revent/revent.py", line 281, in raiseEvent
rv = event._invoke(handler, *args, **kw)
File "/home/karthik/pox/pox/lib/revent/revent.py", line 159, in _invoke
return handler(self, *args, **kw)
File "/home/karthik/pox/tutorial.py", line 118, in _handle_PacketIn
self.act_like_switch(packet, packet_in)
File "/home/karthik/pox/tutorial.py", line 86, in act_like_switch
self.send_packet(packet_in.buffer_id, packet_in.data,q_res.port_no, packet_in.in_port)
AttributeError: 'NoneType' object has no attribute 'port_no'
got info from the database
ERROR:core:Exception while handling Connection!PacketIn...
This line:
if session.query(exists().where(SourcetoPort.src_address == str(packet.dst))).scalar() is not None:
Is always true. The reason is that scalar() returns None only if there are no rows. However your query looks like SELECT EXISTS (SELECT * FROM source_to_port WHERE source_to_port.src_address=?). This will always return exactly one row with one column. The result will thus be True or False, never None.
Moving on to the line before the line that throws your exception: first() returns None if there are no matches, so q_res is None. Since q_res is None, q_res.port_no on the next line raises an exception.
(Note you can use one() if you want an exception to be thrown if there is no match.)
If you are expecting a match, double-check your data and your filter_by() condition to make sure they are doing what you think they should.
However I recommend that you use one query instead of two using first() or one(). With first(), you branch based on q_res being None or not:
q_res = session.query(SourcetoPort).filter_by(src_address=str(packet.dst)).first()
if q_res is not None:
print "got info from the database"
self.send_packet(....)
...
else:
print "flooding the first packet"
...
Or with one(), you put your "flooding" branch in an exception handler:
from sqlalchemy.orm.exc import (NoResultFound, MultipleResultsFound)
try:
q_res = session.query(SourcetoPort).filter_by(src_address=str(packet.dst)).one()
except NoResultFound:
print "flooding the first packet"
...
# except MultipleResultsFound:
# print "More than one result found! WUT?!"
else:
print "got info from the database"
...
A difference between these two approaches is that one() will ensure there is one and only one result, whereas first() doesn't care if there are multiple results.
I'm seeking a way to let the python logger module to log to database and falls back to file system when the db is down.
So basically 2 things: How to let the logger log to database and how to make it fall to file logging when the db is down.
I recently managed to write my own database logger in Python. Since I couldn't find any example I thought I post mine here. Works with MS SQL.
Database table could look like this:
CREATE TABLE [db_name].[log](
[id] [bigint] IDENTITY(1,1) NOT NULL,
[log_level] [int] NULL,
[log_levelname] [char](32) NULL,
[log] [char](2048) NOT NULL,
[created_at] [datetime2](7) NOT NULL,
[created_by] [char](32) NOT NULL,
) ON [PRIMARY]
The class itself:
class LogDBHandler(logging.Handler):
'''
Customized logging handler that puts logs to the database.
pymssql required
'''
def __init__(self, sql_conn, sql_cursor, db_tbl_log):
logging.Handler.__init__(self)
self.sql_cursor = sql_cursor
self.sql_conn = sql_conn
self.db_tbl_log = db_tbl_log
def emit(self, record):
# Set current time
tm = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(record.created))
# Clear the log message so it can be put to db via sql (escape quotes)
self.log_msg = record.msg
self.log_msg = self.log_msg.strip()
self.log_msg = self.log_msg.replace('\'', '\'\'')
# Make the SQL insert
sql = 'INSERT INTO ' + self.db_tbl_log + ' (log_level, ' + \
'log_levelname, log, created_at, created_by) ' + \
'VALUES (' + \
'' + str(record.levelno) + ', ' + \
'\'' + str(record.levelname) + '\', ' + \
'\'' + str(self.log_msg) + '\', ' + \
'(convert(datetime2(7), \'' + tm + '\')), ' + \
'\'' + str(record.name) + '\')'
try:
self.sql_cursor.execute(sql)
self.sql_conn.commit()
# If error - print it out on screen. Since DB is not working - there's
# no point making a log about it to the database :)
except pymssql.Error as e:
print sql
print 'CRITICAL DB ERROR! Logging to database not possible!'
And usage example:
import pymssql
import time
import logging
db_server = 'servername'
db_user = 'db_user'
db_password = 'db_pass'
db_dbname = 'db_name'
db_tbl_log = 'log'
log_file_path = 'C:\\Users\\Yourname\\Desktop\\test_log.txt'
log_error_level = 'DEBUG' # LOG error level (file)
log_to_db = True # LOG to database?
class LogDBHandler(logging.Handler):
[...]
# Main settings for the database logging use
if (log_to_db):
# Make the connection to database for the logger
log_conn = pymssql.connect(db_server, db_user, db_password, db_dbname, 30)
log_cursor = log_conn.cursor()
logdb = LogDBHandler(log_conn, log_cursor, db_tbl_log)
# Set logger
logging.basicConfig(filename=log_file_path)
# Set db handler for root logger
if (log_to_db):
logging.getLogger('').addHandler(logdb)
# Register MY_LOGGER
log = logging.getLogger('MY_LOGGER')
log.setLevel(log_error_level)
# Example variable
test_var = 'This is test message'
# Log the variable contents as an error
log.error('This error occurred: %s' % test_var)
Above will log both to the database and to the file. If file is not needed - skip the 'logging.basicConfig(filename=log_file_path)' line. Everything logged using 'log' - will be logged as MY_LOGGER. If some external error appears (i.e. in the module imported or something) - error will appear as 'root', since 'root' logger is also active, and is using the database handler.
Write yourself a handler that directs the logs to the database in question. When it fails, you can remove it from the handler list of the logger. There are many ways to deal with the failure-modes.
Python logging to a database with a backup logger
Problem
I had the same problem when I ran a Django project inside the server since sometimes you need to check the logs remotely.
Solution
First, there is a need for a handler for the logger to insert logs in to the database. Before that and since my SQL is not good, an ORM is needed that I choose SQLAlchemy.
model:
# models.py
from sqlalchemy import Column, Integer, String, DateTime, Text
from sqlalchemy.ext.declarative import declarative_base
import datetime
base = declarative_base()
class Log(base):
__tablename__ = "log"
id = Column(Integer, primary_key=True, autoincrement=True)
time = Column(DateTime, nullable=False, default=datetime.datetime.now)
level_name = Column(String(10), nullable=True)
module = Column(String(200), nullable=True)
thread_name = Column(String(200), nullable=True)
file_name = Column(String(200), nullable=True)
func_name = Column(String(200), nullable=True)
line_no = Column(Integer, nullable=True)
process_name = Column(String(200), nullable=True)
message = Column(Text)
last_line = Column(Text)
This is the crud for insertion into the database:
#crud.py
import sqlalchemy
from .models import base
from traceback import print_exc
class Crud:
def __init__(self, connection_string=f'sqlite:///log_db.sqlite3',
encoding='utf-8',
pool_size=10,
max_overflow=20,
pool_recycle=3600):
self.connection_string = connection_string
self.encoding = encoding
self.pool_size = pool_size
self.max_overflow = max_overflow
self.pool_recycle = pool_recycle
self.engine = None
self.session = None
def initiate(self):
self.create_engine()
self.create_session()
self.create_tables()
def create_engine(self):
self.engine = sqlalchemy.create_engine(self.connection_string)
def create_session(self):
self.session = sqlalchemy.orm.Session(bind=self.engine)
def create_tables(self):
base.metadata.create_all(self.engine)
def insert(self, instances):
try:
self.session.add(instances)
self.session.commit()
self.session.flush()
except:
self.session.rollback()
raise
def __del__(self):
self.close_session()
self.close_all_connections()
def close_session(self):
try:
self.session.close()
except:
print_exc()
else:
self.session = None
def close_all_connections(self):
try:
self.engine.dispose()
except:
print_exc()
else:
self.engine = None
The handler:
# handler.py
from logging import Handler, getLogger
from traceback import print_exc
from .crud import Crud
from .models import Log
my_crud = Crud(
connection_string=<connection string to reach your db>,
encoding='utf-8',
pool_size=10,
max_overflow=20,
pool_recycle=3600)
my_crud.initiate()
class DBHandler(Handler):
backup_logger = None
def __init__(self, level=0, backup_logger_name=None):
super().__init__(level)
if backup_logger_name:
self.backup_logger = getLogger(backup_logger_name)
def emit(self, record):
try:
message = self.format(record)
try:
last_line = message.rsplit('\n', 1)[-1]
except:
last_line = None
try:
new_log = Log(module=record.module,
thread_name=record.threadName,
file_name=record.filename,
func_name=record.funcName,
level_name=record.levelname,
line_no=record.lineno,
process_name=record.processName,
message=message,
last_line=last_line)
# raise
my_crud.insert(instances=new_log)
except:
if self.backup_logger:
try:
getattr(self.backup_logger, record.levelname.lower())(record.message)
except:
print_exc()
else:
print_exc()
except:
print_exc()
Test to check the logger:
# test.py
from logging import basicConfig, getLogger, DEBUG, FileHandler, Formatter
from .handlers import DBHandler
basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
datefmt='%d-%b-%y %H:%M:%S',
level=DEBUG)
format = Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
backup_logger = getLogger('backup_logger')
file_handler = FileHandler('file.log')
file_handler.setLevel(DEBUG)
file_handler.setFormatter(format)
backup_logger.addHandler(file_handler)
db_logger = getLogger('logger')
db_handler = DBHandler(backup_logger_name='backup_logger')
db_handler.setLevel(DEBUG)
db_handler.setFormatter(format)
db_logger.addHandler(db_handler)
if __name__ == "__main__":
db_logger.debug('debug: hello world!')
db_logger.info('info: hello world!')
db_logger.warning('warning: hello world!')
db_logger.error('error: hello world!')
db_logger.critical('critical: hello world!!!!')
You can see the handler accepts a backup logger that can use it when the database insertion fails.
A good improvement can be logging into the database by threading.
I am digging this out again.
There is a solution with SqlAlchemy (Pyramid is NOT required for this recipe):
https://docs.pylonsproject.org/projects/pyramid-cookbook/en/latest/logging/sqlalchemy_logger.html
And you could improve logging by adding extra fields, here is a guide: https://stackoverflow.com/a/17558764/1115187
Fallback to FS
Not sure that this is 100% correct, but you could have 2 handlers:
database handler (write to DB)
file handler (write to file or stream)
Just wrap the DB-commit with a try-except. But be aware: the file will contain ALL log entries, but not only entries for which DB saving was failed.
Old question, but dropping this for others. If you want to use python logging, you can add two handlers. One for writing to file, a rotating file handler. This is robust, and can be done regardless if the dB is up or not.
The other one can write to another service/module, like a pymongo integration.
Look up logging.config on how to setup your handlers from code or json.