raise NotImplementedError NotImplementedError - python

I use pycharm to write a python3 web app project using tornado web framework,
The listing service has been built already. I need to build the remaining two components: the user service and the public API layer. The implementation of the listing service can serve as a good starting point to learn more about how to structure a web application using the Tornado web framework.
I am required to use tornado's built in framework for HTTP request.
error occurs at listening ( app.listen(options.port)) when I tried to run the program:
Traceback (most recent call last):
File "D:/Bill/python/Tornado/99-python-exercise-master/listing_service.py", line 203, in <module>
app.listen(options.port)
File "C:\Program Files\Python38\lib\site-packages\tornado\web.py", line 2116, in listen
server.listen(port, address)
File "C:\Program Files\Python38\lib\site-packages\tornado\tcpserver.py", line 152, in listen
self.add_sockets(sockets)
File "C:\Program Files\Python38\lib\site-packages\tornado\tcpserver.py", line 165, in add_sockets
self._handlers[sock.fileno()] = add_accept_handler(
File "C:\Program Files\Python38\lib\site-packages\tornado\netutil.py", line 279, in add_accept_handler
io_loop.add_handler(sock, accept_handler, IOLoop.READ)
File "C:\Program Files\Python38\lib\site-packages\tornado\platform\asyncio.py", line 100, in add_handler
self.asyncio_loop.add_reader(fd, self._handle_events, fd, IOLoop.READ)
File "C:\Program Files\Python38\lib\asyncio\events.py", line 501, in add_reader
raise NotImplementedError
NotImplementedError
code:
import tornado.web
import tornado.log
import tornado.options
import sqlite3
import logging
import json
import time
class App(tornado.web.Application):
def __init__(self, handlers, **kwargs):
super().__init__(handlers, **kwargs)
# Initialising db connection
self.db = sqlite3.connect("listings.db")
self.db.row_factory = sqlite3.Row
self.init_db()
def init_db(self):
cursor = self.db.cursor()
# Create table
cursor.execute(
"CREATE TABLE IF NOT EXISTS 'listings' ("
+ "id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,"
+ "user_id INTEGER NOT NULL,"
+ "listing_type TEXT NOT NULL,"
+ "price INTEGER NOT NULL,"
+ "created_at INTEGER NOT NULL,"
+ "updated_at INTEGER NOT NULL"
+ ");"
)
self.db.commit()
class BaseHandler(tornado.web.RequestHandler):
def write_json(self, obj, status_code=200):
self.set_header("Content-Type", "application/json")
self.set_status(status_code)
self.write(json.dumps(obj))
# /listings
class ListingsHandler(BaseHandler):
#tornado.gen.coroutine
def get(self):
# Parsing pagination params
page_num = self.get_argument("page_num", 1)
page_size = self.get_argument("page_size", 10)
try:
page_num = int(page_num)
except:
logging.exception("Error while parsing page_num: {}".format(page_num))
self.write_json({"result": False, "errors": "invalid page_num"}, status_code=400)
return
try:
page_size = int(page_size)
except:
logging.exception("Error while parsing page_size: {}".format(page_size))
self.write_json({"result": False, "errors": "invalid page_size"}, status_code=400)
return
# Parsing user_id param
user_id = self.get_argument("user_id", None)
if user_id is not None:
try:
user_id = int(user_id)
except:
self.write_json({"result": False, "errors": "invalid user_id"}, status_code=400)
return
# Building select statement
select_stmt = "SELECT * FROM listings"
# Adding user_id filter clause if param is specified
if user_id is not None:
select_stmt += " WHERE user_id=?"
# Order by and pagination
limit = page_size
offset = (page_num - 1) * page_size
select_stmt += " ORDER BY created_at DESC LIMIT ? OFFSET ?"
# Fetching listings from db
if user_id is not None:
args = (user_id, limit, offset)
else:
args = (limit, offset)
cursor = self.application.db.cursor()
results = cursor.execute(select_stmt, args)
listings = []
for row in results:
fields = ["id", "user_id", "listing_type", "price", "created_at", "updated_at"]
listing = {
field: row[field] for field in fields
}
listings.append(listing)
self.write_json({"result": True, "listings": listings})
#tornado.gen.coroutine
def post(self):
# Collecting required params
user_id = self.get_argument("user_id")
listing_type = self.get_argument("listing_type")
price = self.get_argument("price")
# Validating inputs
errors = []
user_id_val = self._validate_user_id(user_id, errors)
listing_type_val = self._validate_listing_type(listing_type, errors)
price_val = self._validate_price(price, errors)
time_now = int(time.time() * 1e6) # Converting current time to microseconds
# End if we have any validation errors
if len(errors) > 0:
self.write_json({"result": False, "errors": errors}, status_code=400)
return
# Proceed to store the listing in our db
cursor = self.application.db.cursor()
cursor.execute(
"INSERT INTO 'listings' "
+ "('user_id', 'listing_type', 'price', 'created_at', 'updated_at') "
+ "VALUES (?, ?, ?, ?, ?)",
(user_id_val, listing_type_val, price_val, time_now, time_now)
)
self.application.db.commit()
# Error out if we fail to retrieve the newly created listing
if cursor.lastrowid is None:
self.write_json({"result": False, "errors": ["Error while adding listing to db"]}, status_code=500)
return
listing = dict(
id=cursor.lastrowid,
user_id=user_id_val,
listing_type=listing_type_val,
price=price_val,
created_at=time_now,
updated_at=time_now
)
self.write_json({"result": True, "listing": listing})
def _validate_user_id(self, user_id, errors):
try:
user_id = int(user_id)
return user_id
except Exception as e:
logging.exception("Error while converting user_id to int: {}".format(user_id))
errors.append("invalid user_id")
return None
def _validate_listing_type(self, listing_type, errors):
if listing_type not in {"rent", "sale"}:
errors.append("invalid listing_type. Supported values: 'rent', 'sale'")
return None
else:
return listing_type
def _validate_price(self, price, errors):
# Convert string to int
try:
price = int(price)
except Exception as e:
logging.exception("Error while converting price to int: {}".format(price))
errors.append("invalid price. Must be an integer")
return None
if price < 1:
errors.append("price must be greater than 0")
return None
else:
return price
# /listings/ping
class PingHandler(tornado.web.RequestHandler):
#tornado.gen.coroutine
def get(self):
self.write("pong!")
def make_app(options):
return App([
(r"/listings/ping", PingHandler),
(r"/listings", ListingsHandler),
], debug=options.debug)
if __name__ == "__main__":
# Define settings/options for the web app
# Specify the port number to start the web app on (default value is port 6000)
tornado.options.define("port", default=6000)
# Specify whether the app should run in debug mode
# Debug mode restarts the app automatically on file changes
tornado.options.define("debug", default=True)
# Read settings/options from command line
tornado.options.parse_command_line()
# Access the settings defined
options = tornado.options.options
# Create web app
app = make_app(options)
app.listen(options.port)
logging.info("Starting listing service. PORT: {}, DEBUG: {}".format(options.port, options.debug))
# Start event loop
tornado.ioloop.IOLoop.instance().start()
How to fix this problem?

Python 3.8 made a backwards-incompatible change to the asyncio package used by Tornado. Applications that use Tornado on Windows with Python 3.8 must call asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) at the beginning of their main file/function. (as documented on the home page of tornadoweb.org)

Related

Python: _pickle.PicklingError: Can't pickle <class 'wtforms.form.Meta'>: attribute lookup Meta on wtforms.form failed

Background:
I built an app in which, as soon as a user browse a file and clicks on the "upload" button, a behind-the-scenes calculation takes place, which can take long minutes, and at the end the output is uploaded to GCP. I want the calculation (after clicking) not to stuck the app and that in the meantime the user will be moved to another page. At the end of the process he will receive a success/failure email.
To achieve this goal I try to use multiprocessing.
I'm having much trouble trying to turn it to multi-processed app - I keep gettin the following error message when I'm trying to start a process:
_pickle.PicklingError: Can't pickle <class 'wtforms.form.Meta'>: attribute lookup Meta on wtforms.form failed
app.py -
The main function is `upload_file:
#app.route('/')
def home():
return redirect(url_for('upload_file', page_num=1))
#app.route('/<int:page_num>', methods=['GET', 'POST'])
def upload_file(page_num=1):
form = CreateXslxForm()
traffic_data = get_page_of_traffic_data(page_num)
queue = Queue()
p = Process(target=main_func, args=(queue, form))
p.start()
proc_ret = queue.get()
if proc_ret:
upload_success(*proc_ret)
else:
return render_template(constants.GeneralConstants.LANDING_PAGE, title='URL Comparison', form=form,
traffic_data=traffic_data)
def get_page_of_traffic_data(page_num):
traffic_data = urls_comparison_users_traffic.query.paginate(per_page=5, page=int(page_num), error_out=True)
return traffic_data
def upload_success(link, traffic_row, blob_name):
add_data_to_db(traffic_row)
return render_template('upload_success.html', title='Upload File', file_path=link, filename=blob_name)
def add_data_to_db(traffic_row):
# MANUAL PRE PING
try:
db.session.execute("SELECT 1;")
db.session.commit()
except:
db.session.rollback()
finally:
db.session.close()
# SESSION COMMIT, ROLLBACK, CLOSE
try:
db.session.add(traffic_row)
db.session.commit()
send_response_mail_to_user(traffic_row)
except Exception as e:
db.session.rollback()
raise e
finally:
db.session.close()
def send_response_mail_to_user(traffic_data):
sendgrid_obj = sendgrid_handler.SendgridHandler(sendgrid_key=SENDGRID_KEY)
sendgrid_obj.mails_to_send = URLComparisonEmailBuilder.build_mails(traffic_data)
sendgrid_obj.send()
if __name__ == "__main__":
app.run(port=5000, debug=True, threaded=True)
main.py
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = r'gcp-cre.json'
storage_client = storage.Client()
def get_file_from_form(form):
# form = forms.CreateXslxForm()
timestr = time.strftime(constants.DatesFormats.DATETIME)
custom_name = f"{constants.GeneralConstants.DEST_DEFAULT_NAME}-{timestr}"
# traffic_data = get_page_of_traffic_data(page_num)
if form.validate_on_submit():
load_filename = secure_filename(form.input_file.data.filename)
valid_file_name = get_valid_filename(request.form["filename"])
if valid_file_name:
custom_name = f"{valid_file_name}"
url_comp_obj = URLCompare()
result = url_comp_obj.compare_all_urls(form.input_file.data)
row_data = {
"upload_file": form.input_file.data.filename,
"output_file": custom_name,
"gcp_link": None,
"user": 'nki-tov'
}
return custom_name, result, constants.GeneralConstants.BUCKET_NAME, row_data
def get_valid_filename(s):
s = s.strip().replace(' ', '_')
return re.sub(r'(?u)[^-\w]', '', s)
def upload_to_bucket(blob_name, file, bucket_name, row_data):
'''
Upload file to a bucket
: blob_name (str) - object name
: file_path (str)
: bucket_name (str)
'''
bucket = storage_client.get_bucket(bucket_name)
blob = bucket.blob(blob_name)
blob.upload_from_file(file)
link = f'https://console.cloud.google.com/storage/browser/_details/{bucket_name}/{blob_name};tab=live_object?authuser=0'
row_data["gcp_link"] = link
traffic_row = urls_comparison_users_traffic(**row_data)
return link, traffic_row, blob_name
def main_func(queue: Queue, form):
res = get_file_from_form(form)
if res:
queue.put(upload_to_bucket(*res))
else:
queue.put(res) # None
What am I doing wrong?
Is it something to do with the architecture?

Unable to import module - Lambda handler Error

When I am trying to run my python code in lambda passing the handler to the function.module getting the below error, any suggestions how i could resolve this?
the below file test_client_visitor is triggered to call the client_visitor and send an email to the clients accordingly, when i run thd python file test_client_visitor in my local i get the email triggered successfully but in lambda facing the issue.
file_name: test_client_visitor
function = __import__('client_visitor')
handler = function.scan_clients
class TestFunction(unittest.TestCase):
def test_function(self):
file = open('event.json', 'rb')
try:
ba = bytearray(file.read())
event = jsonpickle.decode(ba)
print('## EVENT')
print(jsonpickle.encode(event))
context = {'requestid': '1234'}
result = handler(event, context)
print(result)
self.assertTrue(result, 'Emails could not be sent!')
finally:
file.close()
file.close()
if __name__ == '__main__':
unittest.main()
file_name: client_visitor.py
import datetime
import boto3
from aws_ses import send_bulk_templated_email
# boto3.set_stream_logger('botocore', level='DEBUG')
from mongodb import get_mongo_db
def process_clients(clients, developers, clients_to_be_notified, days):
if not clients:
pass
check_date = datetime.datetime.now() + datetime.timedelta(days)
for client in clients:
client_id_ = client['client_id']
if 'developer_id' in client:
developers[client_id_] = client['developer_id']
else:
if 'secrets' in client:
secrets = client['secrets']
for secret in secrets:
if 'not_on_or_after' in secret and secret['not_on_or_after'] < check_date.timestamp():
clients_to_be_notified.append({'client_id': client_id_,
'expiration_date': datetime.datetime.fromtimestamp(
secret['not_on_or_after']).strftime('%m/%d/%Y')})
print("adding client to notify List", client_id_, ":", client['sort'])
def notify_clients(clients_to_be_notified, developers):
developer_id_list = []
for client_secret in clients_to_be_notified:
developer_id_list.append(developers[client_secret['client_id']])
if developer_id_list:
db = get_mongo_db()
if db:
users = list(db.users.find({'guid': {'$in': developer_id_list}}, {'email', 'guid'}))
need_to_send_email = False
for user in users:
for client_secret in clients_to_be_notified:
if developers[client_secret['client_id']] == user['guid']:
client_secret['email'] = user['email']
need_to_send_email = True
break
if need_to_send_email:
return send_bulk_templated_email(clients_to_be_notified)
else:
return False
return True
def scan_clients(event, context):
local = False
if 'local' in event:
local = event['local'] == 'True'
if local:
dynamodb = boto3.resource('dynamodb', endpoint_url="http://localhost:8000")
else:
dynamodb = boto3.resource('dynamodb')
days = 30
if 'days' in event:
days = int(event['days'])
print(f"Scanning Clients with {days} or less to secret expiration")
table = dynamodb.Table('****')
scan_kwargs = {
'ProjectionExpression': 'client_id, sort, developer_id, secrets, approved'
}
test = False
if 'test' in event:
test = event['test'] == 'True'
done = False
start_key = None
developers = {}
clients_to_be_notified = []
if test:
developers['idm-portal1'] = '***'
clients_to_be_notified = [{'client_id': 'idm-portal1', 'expiration_date': '04/17/2021'}]
while not done:
if start_key:
scan_kwargs['ExclusiveStartKey'] = start_key
response = table.scan(**scan_kwargs)
process_clients(response.get('Items', []), developers, clients_to_be_notified, days)
start_key = response.get('LastEvaluatedKey', None)
done = start_key is None
print("total developers ", len(developers), " total clients_to_be_notified ", len(clients_to_be_notified))
return notify_clients(clients_to_be_notified, developers)
if __name__ == '__main__':
scan_clients(event={'days': 30, 'local': False, 'test': True}, context=None)
Response
{
"errorMessage": "Unable to import module 'test_client_visitor': No module named 'test_client_visitor'",
"errorType": "Runtime.ImportModuleError",
"stackTrace": []
}
Your file must be named test_client_visitor.py. The way lambda runs the code is by trying to import the main file and call the handler function. See the AWS docs to set up a handler for Python.
The reason you didn't run into this issue locally is because I assume you are calling python directly on the command line — python test_client_visitor. When you import a module in Python, the file has to end in the .py extension.
Able to fix this issue with right packaging of the contents to zip, avoided the creation of extra folder with the below command.
Command:
cd folder; zip -r ../filename.zip *
Thankyou everyone for your inputs.

Error: objects can be used in same thread

I'm working on a project very similar to this one: GitHub
I have a class:
class DBfunctions:
def __init__(self, dbname = '../example.db'):
self.debname = dbname
self.conn = sqlite3.connect(dbname)
def search_db(self, telegram_id):
telegram_id = (telegram_id,)
sql = 'SELECT * FROM user WHERE id = ?;'
row = self.conn.execute(sql,telegram_id)
return row
def newuser_db(self, tele_id, name, nick):
par = (tele_id, name, nick, 0)
sql = 'INSERT INTO user VALUES(?,?,?,?);'
self.conn.execute(sql, par)
self.conn.commit()
than i have the main project:
from file import DBfunctions
db = DBfunction()
def start(update: Update, context: CallbackContext): #befor edit: somethingtodo
flag = db.search_db(update.effective_user.id) # here problems start
if flag == None:
db.newuser_db(update.effective_user.id, update.effective_user.first_name, update.effective_user.username)
update.message.reply_text(
'Hi!',
reply_markup=markup,
)
else:
update.message.reply_text(
'Hey! Welcome back!',
reply_markup=markup,
)
def main():
db.setup() # this function is to create tables if not exist yet
dispatcher.add_handler(CommandHandler('start', start))
# other function but nothing correlated
if __name__ == '__main__':
main()
And than the error appears:
File "filefolder/file.py", line 29, in search_db
row = self.conn.execute(sql,telegram_id)
sqlite3.ProgrammingError: SQLite objects created in a thread can only be used in that same thread. The object was created in thread id 15004 and this is thread id 11036.
I can't figure out what i can do to fix it... and don't understand what is different from the project that I find on github (linked)

Flask-SQLAlchemy TimeoutError

Flask
Python 2.7
Postgres 9
Ubuntu 14.04
I'm using Flask and SQLAlchemy, after 15 consecutive HTTP requests I get:
QueuePool limit of size 5 overflow 10 reached, connection timed out, timeout 30
Then server stops responding:
Very similar as:
Flask-SQLAlchemy TimeoutError
I don't have session engine, per my understanding Flask-SQLAlchemy should take care of it.
What do I need to configure to support more sessions and to clean existing ones periodically.
app.py
import models
api_app = Flask(__name__)
api_app.config.from_pyfile(settings.celery_config)
db = SQLAlchemy(api_app)
#api_app.teardown_appcontext
def shutdown_session(exception=None):
try:
db.session.close()
except AttributeError,e:
print str(e)
except Exception,e:
print api_app.name
print str(e)
#api_app.route('/api/1.0/job/<string:ref>/')
def get_job_by_id(ref):
"""
:param id:
:return:
"""
try:
if request.method == 'GET':
job = models.Job.query.filter(models.Job.reference == ref)
if job:
job_ = job.all()
if len(job_) == 1:
return jsonify(job_[0].serialize())
resp = Response(status=404, mimetype='application/json')
return resp
else:
resp = Response(status=405, mimetype='application/json')
return resp
except Exception,e:
print str(e)
resp = Response(status=500, mimetype='application/json')
return resp
models.py
from api_app import db
class Job(db.Model, AutoSerialize, Serializer):
__tablename__ = 'job'
__public__ = ('status','description','reference')
id = Column(Integer, primary_key=True, server_default=text("nextval('job_id_seq'::regclass)"))
status = Column(String(40), nullable=False)
description = Column(String(200))
reference = Column(String(50))
def serialize(self):
d = Serializer.serialize(self)
del d['id']
return d
based on #spicyramen comment:
increase SQLALCHEMY_POOL_SIZE The size of the database pool.

query from sqlalchemy returns AttributeError: 'NoneType' object

from pox.core import core
import pox.openflow.libopenflow_01 as of
import re
import datetime
from sqlalchemy import create_engine, ForeignKey
from sqlalchemy import Column, Date, Integer, String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship, backref
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.sql.expression import exists
log = core.getLogger()
engine = create_engine('sqlite:///nwtopology.db', echo=False)
Base = declarative_base()
Session = sessionmaker(bind=engine)
session = Session()
########################################################################
class SourcetoPort(Base):
""""""
__tablename__ = 'source_to_port'
id = Column(Integer, primary_key=True)
port_no = Column(Integer)
src_address = Column(String,index=True)
#----------------------------------------------------------------------
def __init__(self, src_address,port_no):
""""""
self.src_address = src_address
self.port_no = port_no
########################################################################
#create tables
Base.metadata.create_all(engine)
class Tutorial (object):
def __init__ (self, connection):
self.connection = connection
connection.addListeners(self)
# Use this table to keep track of which ethernet address is on
# which switch port (keys are MACs, values are ports).
self.mac_to_port = {}
self.matrix={}
#This will keep track of the traffic matrix.
#matrix[i][j]=number of times a packet from i went to j
def send_packet (self, buffer_id, raw_data, out_port, in_port):
#print "calling send_packet"
#Sends a packet out of the specified switch port.
msg = of.ofp_packet_out()
msg.in_port = in_port
msg.data = raw_data
# Add an action to send to the specified port
action = of.ofp_action_output(port = out_port)
msg.actions.append(action)
# Send message to switch
self.connection.send(msg)
def act_like_hub (self, packet, packet_in):
#flood packet on all ports
self.send_packet(packet_in.buffer_id, packet_in.data,
of.OFPP_FLOOD, packet_in.in_port)
def act_like_switch (self, packet, packet_in):
"""
Implement switch-like behavior.
"""
# Learn the port for the source MAC
#print "RECIEVED FROM PORT ",packet_in.in_port , "SOURCE ",packet.src
# create a Session
#Session = sessionmaker(bind=engine)
#session = Session()
self.mac_to_port[packet.src]=packet_in.in_port
#if self.mac_to_port.get(packet.dst)!=None:
#print "count for dst",session.query(SourcetoPort).filter_by(src_address=str(packet.dst)).count(),str(packet.dst)
#if session.query(SourcetoPort).filter_by(src_address=str(packet.dst)).count():
if session.query(exists().where(SourcetoPort.src_address == str(packet.dst))).scalar() is not None:
#send this packet
print "got info from the database"
q_res = session.query(SourcetoPort).filter_by(src_address=str(packet.dst)).one()
self.send_packet(packet_in.buffer_id, packet_in.data,q_res.port_no, packet_in.in_port)
#create a flow modification message
msg = of.ofp_flow_mod()
#set the fields to match from the incoming packet
msg.match = of.ofp_match.from_packet(packet)
#send the rule to the switch so that it does not query the controller again.
msg.actions.append(of.ofp_action_output(port=q_res.port_no))
#push the rule
self.connection.send(msg)
else:
#flood this packet out as we don't know about this node.
print "flooding the first packet"
self.send_packet(packet_in.buffer_id, packet_in.data,
of.OFPP_FLOOD, packet_in.in_port)
#self.matrix[(packet.src,packet.dst)]+=1
entry = SourcetoPort(src_address=str(packet.src) , port_no=packet_in.in_port)
#add the record to the session object
session.add(entry)
#add the record to the session object
session.commit()
def _handle_PacketIn (self, event):
"""
Handles packet in messages from the switch.
"""
packet = event.parsed # This is the parsed packet data.
if not packet.parsed:
log.warning("Ignoring incomplete packet")
return
packet_in = event.ofp # The actual ofp_packet_in message.
#self.act_like_hub(packet, packet_in)
self.act_like_switch(packet, packet_in)
def launch ():
"""
Starts the component
"""
def start_switch (event):
log.debug("Controlling %s" % (event.connection,))
Tutorial(event.connection)
core.openflow.addListenerByName("ConnectionUp", start_switch)
When I run the above code I get the following error:
The problem that I am facing is for some reason if I use
if session.query(exists().where(SourcetoPort.src_address == str(packet.dst))).scalar() is not None:
in place of count query.
#if session.query(SourcetoPort).filter_by(src_address=str(packet.dst)).count():
The querying from the database
q_res = session.query(SourcetoPort).filter_by(src_address=str(packet.dst)).first()
self.send_packet(packet_in.buffer_id, packet_in.data,q_res.port_no, packet_in.in_port)
is giving the following error:
DEBUG:core:POX 0.1.0 (betta) going up...
DEBUG:core:Running on CPython (2.7.3/Aug 1 2012 05:14:39)
DEBUG:core:Platform is Linux-3.5.0-23-generic-x86_64-with-Ubuntu-12.04-precise
INFO:core:POX 0.1.0 (betta) is up.
DEBUG:openflow.of_01:Listening on 0.0.0.0:6633
INFO:openflow.of_01:[00-00-00-00-00-02 1] connected
DEBUG:tutorial:Controlling [00-00-00-00-00-02 1]
got info from the database
ERROR:core:Exception while handling Connection!PacketIn...
Traceback (most recent call last):
File "/home/karthik/pox/pox/lib/revent/revent.py", line 234, in raiseEventNoErrors
return self.raiseEvent(event, *args, **kw)
File "/home/karthik/pox/pox/lib/revent/revent.py", line 281, in raiseEvent
rv = event._invoke(handler, *args, **kw)
File "/home/karthik/pox/pox/lib/revent/revent.py", line 159, in _invoke
return handler(self, *args, **kw)
File "/home/karthik/pox/tutorial.py", line 118, in _handle_PacketIn
self.act_like_switch(packet, packet_in)
File "/home/karthik/pox/tutorial.py", line 86, in act_like_switch
self.send_packet(packet_in.buffer_id, packet_in.data,q_res.port_no, packet_in.in_port)
AttributeError: 'NoneType' object has no attribute 'port_no'
got info from the database
ERROR:core:Exception while handling Connection!PacketIn...
This line:
if session.query(exists().where(SourcetoPort.src_address == str(packet.dst))).scalar() is not None:
Is always true. The reason is that scalar() returns None only if there are no rows. However your query looks like SELECT EXISTS (SELECT * FROM source_to_port WHERE source_to_port.src_address=?). This will always return exactly one row with one column. The result will thus be True or False, never None.
Moving on to the line before the line that throws your exception: first() returns None if there are no matches, so q_res is None. Since q_res is None, q_res.port_no on the next line raises an exception.
(Note you can use one() if you want an exception to be thrown if there is no match.)
If you are expecting a match, double-check your data and your filter_by() condition to make sure they are doing what you think they should.
However I recommend that you use one query instead of two using first() or one(). With first(), you branch based on q_res being None or not:
q_res = session.query(SourcetoPort).filter_by(src_address=str(packet.dst)).first()
if q_res is not None:
print "got info from the database"
self.send_packet(....)
...
else:
print "flooding the first packet"
...
Or with one(), you put your "flooding" branch in an exception handler:
from sqlalchemy.orm.exc import (NoResultFound, MultipleResultsFound)
try:
q_res = session.query(SourcetoPort).filter_by(src_address=str(packet.dst)).one()
except NoResultFound:
print "flooding the first packet"
...
# except MultipleResultsFound:
# print "More than one result found! WUT?!"
else:
print "got info from the database"
...
A difference between these two approaches is that one() will ensure there is one and only one result, whereas first() doesn't care if there are multiple results.

Categories