I have two implementations of database queue (they use different tables) and want them to use objects of the same class. So, they both look really similar:
class AbstractDBQueue(object):
def __init__(self, tablename):
self.tablename = tablename
self.metadata = MetaData()
self.engine = create_engine('mysql+mysqldb://%s:%s#%s:%d/%s' % (
settings.DATABASE.get('USER'),
settings.DATABASE.get('PASSWORD'),
settings.DATABASE.get('HOST') or '127.0.0.1',
settings.DATABASE.get('PORT') or 3306,
settings.DATABASE.get('NAME')
), encoding='cp1251', echo=True, pool_recycle=7200)
self.metadata.bind = self.engine
self.session = sessionmaker(bind=self.engine)()
def setup_table(self, table, entity_name):
self.table = table
newcls = type(entity_name, (SMSMessage, ), {})
mapper(newcls, table)
return newcls
def put(self, message=None, many_messages=[]):
if message:
self.session.add(message)
else:
for m in many_messages:
self.session.add(m)
self.session.commit()
def get(self, limit=None):
if limit:
q = self.session.query(self.SMSClass).limit(limit)
else:
q = self.session.query(self.SMSClass)
smslist = []
for sms in q:
smslist.append(sms)
self.session.expunge_all()
return smslist
class DBQueue(AbstractDBQueue):
"""
MySQL database driver with queue interface
"""
def __init__(self):
self.tablename = settings.DATABASE.get('QUEUE_TABLE')
super(DBQueue, self).__init__(self.tablename)
self.logger = logging.getLogger('DBQueue')
self.SMSClass = self.setup_table(Table(self.tablename, self.metadata, autoload=True), "SMSQueue")
class DBWorkerQueue(AbstractDBQueue):
"""
MySQL database driver with queue interface for separate workers queue
"""
def __init__(self):
self.tablename = settings.DATABASE.get('WORKER_TABLE')
super(DBWorkerQueue, self).__init__(self.tablename)
self.logger = logging.getLogger('DBQueue')
self.SMSClass = self.setup_table(Table(self.tablename, self.metadata, autoload=True), "SMSWorkerQueue")
def _install(self):
self.metadata.create_all(self.engine)
SMSMessage is the name of the class I want to use. The map_class_to_table() function is a hack I've found in SQLAlchemy documentation: http://www.sqlalchemy.org/trac/wiki/UsageRecipes/EntityName
But it doesn't seems to help - when the first queue instance maps SMSMessage to it's table, then all objects I pass to second queue's put() are implicitly casted to first queue's mapped class, and second database is still empty after session.commit().
I need to use both queues at the same time, maybe even using threads (I think, pool connection will be useful), but I just can't make this work. Could you help, please?
I think your problem relates to the tablename variable. It's a class variable which gets defined when you create the class, and then does not change. So, it will be the same for both of your instances, when they access it with self.tablename. To fix this, move it inside the init function, and make it a self.tablename. That will initialize it each time you create a new object.
Related
I have a main database and a replica (they are the same in the test environment):
core_db = PooledPostgresqlExtDatabase(**DB_COFIG)
replica_db = PooledPostgresqlExtDatabase(**DB_REPLICA_COFIG)
A controllers that executes a query in different databases depending on the model
class BaseController:
def _get_logs(self):
query = self.model.select()
if is_instance(self.model, ModelToReplica)
query = query.bind(replica_db)
return list(query)
class ReplicaExampleController(BaseLogsController):
model = ModelToReplica
def process(self):
return self._get_logs()
class BaseExampleController(BaseLogsController):
model = BaseModel
def process(self):
return self._get_logs()
Controllers are linked to two urls:
/get_core_result/ # Returns the result from BaseExampleController (core_db)
/get_replica_result/ # Returns the result from ReplicaExampleController (replica_db)
I want to check that each of the corners accesses the right base. And I know that the reference to the database object is stored in the request object. How do I get it from the test? I'm using a PyTest. I understand that I probably need to use mock, but I don't understand how.
Unfortunately, this is all I have so far:
class TestSwitchDB:
def test_switch_db_to_replica():
url_core = url_for('core_db_controller')
core_result = self.client.get(url_core)
url_replica = url_for('replica_db_controller')
replica_result = self.client.get(url_replica)
In test_switch_db_to_replica you can mock the db query and patch it, something
with patch(BaseController._get_logs) as mock_get_logs:
mock_get_logs.return_value="your expected return"
As a reference: https://docs.python.org/3/library/unittest.mock-examples.html
In google app engine, say I have a Parent and a Child Entity:
class Parent(ndb.Model):
pass
class Child(ndb.Model):
parent_key = ndb.KeyProperty(indexed = True)
... other properties I don't need to fetch ...
I have a list of parents' keys, say parents_list, and I'm trying to answer efficiently: what parent in parents_list has a child.
Ideally, I would run this query:
children_list = Child.query().filter(Child.parent_key = parents_list).fetch(projection = 'parent_key')
It does not work because of the projection property (parent_key) being in the equality filter. So I would have to retrieve all properties, which seems inefficient.
Is there a way to efficiently solve this?
Your child model should actually be
class Child(ndb.Model):
parent_key = ndb.KeyProperty(kind="Parent", indexed = True)
If you were doing this in Python2, you could use an ndb_tasklet (see code below; note that I haven't executed this code myself so it's not guaranteed to work; it's just here to serve as a guide but I have used tasklets in the past before). If python3, try and create async queries
class Parent(ndb.Model):
#classmethod
def parentsWithChildren(cls, parents_list):
#ndb.tasklet
def child_callback(parent_key):
q = Child.query(Child.parent_key == parent_key)
output = yield q.fetch_async(projection = 'parent_key')
raise ndb.Return ((parentKey, output))
# For each key in parents_list, invoke the callback - child_callback which returns a result only if the parent_key matches
# ndb.tasklet is asynchronous so code is run in parallel
final_output = [ child_callback(a) for a in parents_list]
return final_output
Looking for examples of class patterns people use to wrap PyODBC, I found this example here at SO: single database connection throughout the python application.
I don't understand how the DBConnection class works in the original example. How is DBConnector being initialized if--
cls.connection = DBConnector().create_connection()
--doesn't pass the required init values to DBConnector()? When I try to add them I get TypeError: DBConnection() takes no arguments
import pyodbc
class DBConnector(object):
def __new__(cls, *args, **kwargs):
if not hasattr(cls, 'instance'):
cls.instance = super(DBConnector, cls).__new__(cls)
return cls.instance
def __init__(self, ipaddress, port, dsn, remotedsn):
self.ipaddress = ipaddress
self.port = port
self.dsn = dsn
self.remotedsn = remotedsn
self.dsnstr_default = 'OpenMode=F;OLE DB Services=-2;'
self.dbconn = None
# creates new connection
def create_connection(self):
return pyodbc.connect(self.dsnstr_default,
IPAddress = self.ipaddress,
Port = self.port,
DSN = self.dsn,
RemoteDSN = self.remotedsn,
autocommit=True)
# For explicitly opening database connection
def __enter__(self):
self.dbconn = self.create_connection()
return self.dbconn
def __exit__(self):
self.dbconn.close()
class DBConnection(object):
connection = None
#classmethod
def get_connection(cls, new=False, *args, **kwargs): << ADDED THIS
if new or not cls.connection:
cls.connection = DBConnector(*args, **kwargs).create_connection()
/\/\/\/\/\/\/\/\/\/SEE NOTE
return cls.connection
#classmethod
def GetCompanyInfo(cls):
"""execute query on singleton db connection"""
connection = cls.get_connection()
try:
connection.setencoding('utf-8')
cursor = connection.cursor()
except pyodbc.ProgrammingError:
connection = cls.get_connection(new=True)
cursor = connection.cursor()
# Start Query
cursor.execute("SELECT CompanyName, EIN, SSN FROM Company")
for cname, ein, ssn in cursor.fetchall():
result = (cname, ein, ssn)
# End Query
cursor.close()
return result
Did some homework...
I can find several examples explaining the Singleton class pattern, so I've got the Connector class to work:
a = DBConnector('127.0.0.1', '4500', 'pyauto_local', None)
a.create_connection()
# <pyodbc.Connection at 0x5772110>
a = DBConnector('127.0.0.1', '4500', 'pyauto_local', None)
a.__enter__()
# <pyodbc.Connection at 0x605f278>
a.__exit__()
I did some testing...
I had a successful test manually inserting the connection parameters into the get_connection method:
cls.connection = DBConnector('127.0.0.1', '4500', 'pyauto_local', None).create_connection()
/\/\/\/\/\/\/ NOTED ABOVE
# Testing
cx = DBConnection()
cx.GetCompanyInfo()
# ('Zep', '12-3456789', None)
Now I'm curious
I could be done if I put the connection and queries all in the one class--Monster class.
OR I understand the Car > Blue Car OOP pattern better, and in another post here at SO is an example of extending the Singleton class. This makes more sense to me.
NOW I'm really curious how the original was supposed to work:
#classmethod
def get_connection(cls, new=False):
"""Creates return new Singleton database connection"""
if new or not cls.connection:
cls.connection = DBConnector().create_connection()
return cls.connection
How to get the parameters into DBConnection class without INIT? The post is also tagged with Django, so they may have skipped an assumed Django context? Or Django gives it for free somehow?
I have the following example code which uses either MongoEngine and Peewee as DB backends.
import mongoengine, peewee
from mongomodels import *
from mysqlmodels import *
class Parser(object):
def __init__(self, line, dbBackend):
if dbBackend in ["MongoDB","MySQL"]:
self.line = line
self.DB = dbBackend
user = self.createUser()
car = self.createCar(user)
parking = self.createParking(car)
else:
raise Exception()
def createUser(self):
if self.DB == "MongoDB":
newUserID = self._createMongoUser(self.line['firstname'], self.line['lastname'], '...')
else:
newUserID = self._createMySQLUser(self.line['firstname'], self.line['lastname'], '...')
return newUserID
def _createMongoUser(self, firstname, lastname, '...'):
try:
_user = MongoUserModel.objects.get(firstname=firstname, lastname=lastname)
except mongoengine.errors.DoesNotExist as e:
user = MongoUserModel(firstname=firstname, password)
_user = user.save()
finally:
return _user
def _createMySQLUser(self, firstname, lastname, '...'):
try:
_user = MySQLUserModel.get(MySQLUserModel.fistname == firstname, MySQLUserModel.lastname == lastname )
except Exception as e:
user = MySQLUserModel(fistname=fistname, lastname=lastname)
_user = user.save()
finally:
return _user
def createCar(self, user):
pass
def createParking(self, car):
pass
Is there any good practice / trick / module to keep my code DRY and to avoid redefining two methods to create my Models?
Should I can create a new abstraction class 'UserModel' as does PDO in PHP?
This is something I went through recently - I swapped from a mongo backend to postgres. When I set up the original project I had some models and a DataLayer. The datalayer (dl) had quite a simple interface that I used throughout my app.
# note: this is half python / half pseudocode
class Model1(object):
__collection__ = 'model1'
__tablename__ = 'model1'
# field definitions etc
class MongoDataLayer(object):
def __init__(self, mongo_db_connection):
self.conn = mongo_db_connection
def load(self, model, conditions):
raw = self.conn[model.__collection__].find(...)
return model(**raw)
def persist(self, obj):
self.conn[obj.__collection__].save(obj.as_dict())
class SQLDataLayer(object):
def __init__(self, sa_session_factory):
self.Session = sa_session_factory
self.session = self.Session()
def load(self, model, conditions):
return self.session.query(model).find_by(conditions).one() # ...etc
def persist(self, obj):
self.conn[obj.__collection__].save(obj)
# connections - mongo and postgres (I use SQLAlchemy)
dl_mongo = MongoDataLayer(db...)
dl_sql = SQLDataLayer(Session...)
# using them - you don't care which one you have
m = dl_mongo.load(models.Model1)
dl_mongo.persist(m)
m = dl_sql.load(models.Model1)
dl_sql.persist(m)
In my app I load up the dl in the initial load and then inject it into the app whenever data access needs to happen. The app itself then knows about models but not the details of how to load / save them.
Maybe not the best way to do it but it's worked well for me. Would be interested to hear how other people deal with it.
I am new to Python, and I'm starting to learn the basics of the code structure. I've got a basic app that I'm working on up on my Github.
For my simple app, I'm create a basic "Evernote-like" service which allows the user to create and edit a list of notes. In the early design, I have a Note object and a Notepad object, which is effectively a list of notes. Presently, I have the following file structure:
Notes.py
|
|------ Notepad (class)
|------ Note (class)
From my current understanding and implementation, this translates into the "Notes" module having a Notepad class and Note class, so when I do an import, I'm saying "from Notes import Notepad / from Notes import Note".
Is this the right approach? I feel, out of Java habit, that I should have a folder for Notes and the two classes as individual files.
My goal here is to understand what the best practice is.
As long as the classes are rather small put them into one file.
You can still move them later, if necessary.
Actually, it is rather common for larger projects to have a rather deep hierarchy but expose a more flat one to the user. So if you move things later but would like still have notes.Note even though the class Note moved deeper, it would be simple to just import note.path.to.module.Note into notes and the user can get it from there. You don't have to do that but you can. So even if you change your mind later but would like to keep the API, no problem.
I've been working in a similar application myself. I can't say this is the best possible approach, but it served me well. The classes are meant to interact with the database (context) when the user makes a request (http request, this is a webapp).
# -*- coding: utf-8 -*-
import json
import datetime
class Note ():
"""A note. This class is part of the data model and is instantiated every
time there access to the database"""
def __init__(self, noteid = 0, note = "", date = datetime.datetime.now(), context = None):
self.id = noteid
self.note = note
self.date = date
self.ctx = context #context holds the db connection and some globals
def get(self):
"""Get the current object from the database. This function needs the
instance to have an id"""
if id == 0:
raise self.ctx.ApplicationError(404, ("No note with id 0 exists"))
cursor = self.ctx.db.conn.cursor()
cursor.execute("select note, date from %s.notes where id=%s" %
(self.ctx.db.DB_NAME, str(self.id)))
data = cursor.fetchone()
if not data:
raise self.ctx.ApplicationError(404, ("No note with id "
+ self.id + " was found"))
self.note = data[0]
self.date = data[1]
return self
def insert(self, user):
"""This function inserts the object to the database. It can be an empty
note. User must be authenticated to add notes (authentication handled
elsewhere)"""
cursor = self.ctx.db.conn.cursor()
query = ("insert into %s.notes (note, owner) values ('%s', '%s')" %
(self.ctx.db.DB_NAME, str(self.note), str(user['id'])))
cursor.execute(query)
return self
def put(self):
"""Modify the current note in the database"""
cursor = self.ctx.db.conn.cursor()
query = ("update %s.notes set note = '%s' where id = %s" %
(self.ctx.db.DB_NAME, str(self.note), str(self.id)))
cursor.execute(query)
return self
def delete(self):
"""Delete the current note, by id"""
if self.id == 0:
raise self.ctx.ApplicationError(404, "No note with id 0 exists")
cursor = self.ctx.db.conn.cursor()
query = ("delete from %s.notes where id = %s" %
(self.ctx.db.DB_NAME, str(self.id)))
cursor.execute(query)
def toJson(self):
"""Returns a json string of the note object's data attributes"""
return json.dumps(self.toDict())
def toDict(self):
"""Returns a dict of the note object's data attributes"""
return {
"id" : self.id,
"note" : self.note,
"date" : self.date.strftime("%Y-%m-%d %H:%M:%S")
}
class NotesCollection():
"""This class handles the notes as a collection"""
collection = []
def get(self, user, context):
"""Populate the collection object and return it"""
cursor = context.db.conn.cursor()
cursor.execute("select id, note, date from %s.notes where owner=%s" %
(context.db.DB_NAME, str(user["id"])))
note = cursor.fetchone()
while note:
self.collection.append(Note(note[0], note[1],note[2]))
note = cursor.fetchone()
return self
def toJson(self):
"""Return a json string of the current collection"""
return json.dumps([note.toDict() for note in self.collection])
I personally use python as a "get it done" language, and don't bother myself with details. This shows in the code above. However one piece of advice: There are no private variables nor methods in python, so don't bother trying to create them. Make your life easier, code fast, get it done
Usage example:
class NotesCollection(BaseHandler):
#tornado.web.authenticated
def get(self):
"""Retrieve all notes from the current user and return a json object"""
allNotes = Note.NotesCollection().get(self.get_current_user(), settings["context"])
json = allNotes.toJson()
self.write(json)
#protected
#tornado.web.authenticated
def post(self):
"""Handles all post requests to /notes"""
requestType = self.get_argument("type", "POST")
ctx = settings["context"]
if requestType == "POST":
Note.Note(note = self.get_argument("note", ""),
context = ctx).insert(self.get_current_user())
elif requestType == "DELETE":
Note.Note(id = self.get_argument("id"), context = ctx).delete()
elif requestType == "PUT":
Note.Note(id = self.get_argument("id"),
note = self.get_argument("note"),
context = ctx).put()
else:
raise ApplicationError(405, "Method not allowed")
By using decorators I'm getting user authentication and error handling out of the main code. This makes it clearer and easier to mantain.