Pytest with sqlalchemy db conection terminates in subsequent test case - python

I am using a sqlalchemy engine connection as a pytest fixture as a way to test functions that require database interaction. When I run the test cases individually by running pytest tests/filename.py::test_get_token_data or pytest tests/filename.py::test_create the test passes; however, when running the entire test case, pytest tests/filename.py, I get the following error:
E sqlalchemy.exc.OperationalError: (psycopg2.errors.AdminShutdown) terminating connection due to administrator command
E server closed the connection unexpectedly
E This probably means the server terminated abnormally
E before or while processing the request.
(Background on this error at: http://sqlalche.me/e/13/e3q8)
To my understanding, after each test, the database is suppose to be cleared (which I've confirmed), however, I do not understand the error. My code below.
conftest.py
#pytest.fixture
def db_connection(test_config, admin_db_connection):
config = test_config()
engine = sqlalchemy.create_engine(config.DB_URL)
connection = engine.connect()
yield connection
connection.close()
# clear database
from psycopg2.extensions import AsIs # handle SQL quoting
with admin_db_connection.cursor() as curs:
curs.execute("drop database %s with (force);", (AsIs(config.DB_NAME),))
curs.execute(
"create database %s template vtag_template;", (AsIs(config.DB_NAME),)
)
filename.py
import sqlalchemy as sa
#pytest.fixture
def db_injection(db_connection):
with db_connection.begin():
some_value = db_connection.execute(
sa.sql.text(
"""insert into general.some_table as t (some_id, name, description, is_active) values (:some_id, :name, :description, :is_active) returning t.some_id;
"""
),
tenant_id='1234',
description="TEST",
is_active=True,
name="TEST",
)
tenant_id_ = some_value.first()[0]
#pytest.fixture
def some_function(db_connection):
with db_connection.begin():
some_table = db_connection.execute(
sa.sql.text(
"""
SELECT e.name
FROM general.some_table e
WHERE e.id = 1234
"""
)
)
return some_table.first()
def test_get_token_data(client, headers_with_token, db_injection, some_function):
token = some_function.name
#API is using the same db connection
response = client.get(f"/api/v2/{token}", headers=headers_with_token)
assert response.status_code == 200
def test_create(client, img, b_64, headers_with_token, db_injection):
items = Items(
user_id=1234,
template_id=67,
b_64=b_64,
)
data = {
"json": json.dumps(asdict(items)),
"file": ("some_name", img, "multipart/form-data"),
}
response = client.post(
"/api/v2/create",
data=data,
follow_redirects=True,
content_type="multipart/form-data",
headers=headers_with_token
)
assert response.status_code == 200

The issue was due to the application having an unbinded Session being used. Since most of our application us using sqlalchemy's Engine API, we decide with refactor in using raw sql via with engine.begin(). Another alternative solution could have been adding the engine into the session

Related

FASTAPI testing database not creating database

I'm trying to test my FASTAPI app. Seems to me, all settings are correct.
test_users.py
engine = create_engine(
f"postgresql"
f"://{settings.database_username}"
f":{settings.database_password}"
f"#{settings.database_hostname}"
f":{settings.database_port}"
f"/test_{settings.database_name}"
)
TestingSessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
Base.metadata.create_all(bind=engine)
def override_get_db():
try:
db = TestingSessionLocal()
yield db
finally:
db.close()
app.dependency_overrides[get_db] = override_get_db
client = TestClient(app)
def test_create_user():
response = client.post(
"/users/",
json={"email": "nikita#gmail.com", "password": "password"}
)
new_user = schemas.UserOutput(**response.json())
assert response.status_code == 201
assert new_user.email == "nikita#gmail.com"
When I run pytest, I get this error:
sqlalchemy.exc.OperationalError: (psycopg2.OperationalError) connection to server at "localhost" (::1), port 5432 failed: FATAL: database "test_social_media_api" does not exist
Why is the code not creating the database?
With engine = create_engine("postgresql://...") you define a connection to an existing PostgreSql database.
And with Base.metadata.create_all(bind=engine) you create the tables - according to your models - in the existing database.
So the code that you written does not create a database, it expects that you give it an already existing database.
And that has to do with PostgreSQL itself.
PostgreSQL runs as a server, and a PostgreSQL server can run multiple databases. And each database has to be created explicitly.
Just telling SQLAlchemy the connection string is not enough.
It's possible to create a new database from Python itself by connecting to the PostgreSQL server (see https://www.tutorialspoint.com/python_data_access/python_postgresql_create_database.htm), or alternatively you can create it manually before you run your script. E.g. by running CREATE DATABASE databasename; inside psql (or any other database tool).
However if you want to test using a running database, I would suggest using testcontainers. They will spawn a new PostgreSQL server with an empty database everytime you run the tests.
Notice, that the example from the FastAPI documentation works differently.
They just use
SQLALCHEMY_DATABASE_URL = "sqlite:///./test.db"
engine = create_engine(
SQLALCHEMY_DATABASE_URL, connect_args={"check_same_thread": False}
)
Base.metadata.create_all(bind=engine)
which creates the database.
This works, because SQLite doesn't run as a server. It's just one file that represents the full database, and if the file doesn't exist, the sqlite database adapter will assume that the database is just empty, and create a new file for you. PostgreSQL doesn't work like this though.

Insert data to a stored procedure sqlalchemy + MSSQL server in python

I am building some RestAPI's using python FastAPI and getting some data from MSSQL server using SQLAlchemy.
I am trying to insert data into existing stored procedure in MSSQL server.
The stored procedure AcctsCostCentersAddV001 will take ArDescription, EngDescription, ErrMsg as parameters and return Output as an output.
MSSQL Server Code:
CREATE Proc [dbo].[AcctsCostCentersAddV001] #ArDescription nvarchar(100), #EngDescription varchar(100), #ErrMsg nvarchar(100) Output
As
Insert Into AcctsCostCenters (ArDescription, EngDescription) Values(#ArDescription, #EngDescription)
Set #ErrMsg = 'Test'
GO
My Python code:
from fastapi import APIRouter, Request, Depends, HTTPException
from fastapi.responses import JSONResponse
# # # # # # # # # # SQL # # # # # # # # # #
from sqlalchemy import text
from sqlalchemy.exc import ProgrammingError
# # # # # # # # # # Files # # # # # # # # # #
from dependencies import get_db
from sqlalchemy.engine import create_engine
from internal.config import username, password, SQL_SERVER, database_name
#router.get("/create/CostCenter/")
async def forms(request: Request, db=Depends(get_db)):
try:
connection_string = f'mssql://{username}:{password}#{SQL_SERVER}/{database_name}?
driver=ODBC+Driver+17+for+SQL+Server'
engine = create_engine(connection_string, pool_pre_ping=True)
connection = engine.raw_connection()
try:
cursor_obj = connection.cursor()
query = "Declare #ErrMsg nvarchar(100) Exec AcctsCostCentersAddV001 'Test', 'Test', #ErrMsg Output Print #ErrMsg"
cursor_obj.execute(text(query))
results = list(cursor_obj.fetchall())
cursor_obj.close()
connection.commit()
print(results)
finally:
connection.close()
except IndexError:
raise HTTPException(status_code=404, detail="Not found")
except ProgrammingError as e:
print(e)
raise HTTPException(status_code=400, detail="Invalid Entry")
except Exception as e:
print(e)
return UnknownError(
"unknown error caused by CostCenter API request handler", error=e)
return JSONResponse(results)
For some reason, this code doesn't raise any exceptions but yet i keep getting
('The SQL contains 0 parameter markers, but 3 parameters were supplied', 'HY000')
I have even tried this
cursor_obj.execute("Declare #ErrMsg nvarchar(100) Exec AcctsCostCentersAddV001 'Test', 'Test', #ErrMsg")
but i get back
No results. Previous SQL was not a query.
i tried wrapping the query in text() but i got The first argument to execute must be a string or unicode query.
but when i go into MSSQL Server and run
Declare #ErrMsg nvarchar(100)
Exec AcctsCostCentersAddV001 'Test', 'Test', #ErrMsg Output
Print #ErrMsg
It runs without any problem.
My Env:
Ubuntu 21.04 VPS by OVH
I hope i am providing everything you guys need and let me know if missed anything, Thanks!
btw I know i am connecting to the db twice :3
(Edited):
I am actually connecting to the DB from a datapase.py file, and i am connecting again in the function just for testing.
My database.py:
import time
from colorama import Fore
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.engine import create_engine
from internal.config import username, password, SQL_SERVER, database_name
try:
connection_string = f'mssql://{username}:{password}#{SQL_SERVER}/{database_name}?driver=ODBC+Driver+17+for+SQL+Server'
engine = create_engine(connection_string, pool_pre_ping=True)
connection = engine.raw_connection()
print(f"{Fore.CYAN}DB{Fore.RESET}: Database Connected. ")
SessionLocal = sessionmaker(autocommit=True, autoflush=False, bind=engine)
Base = declarative_base()
except Exception as e:
print(f"{Fore.RED}DB{Fore.RESET}: Couldn't connect to Database.", e)

TypeError: issubclass() arg 2 must be a class or tuple of classes

I've been using the same class for months for connecting to a SQL Server database, running queries, inserting data into staging tables, etc. Just yesterday whenever my code tries to insert into a staging table, I get the error:
TypeError: issubclass() arg 2 must be a class or tuple of classes
Debugging I learned that this is happening in the method _relationships_for_fks in the automap.py module (sqlalchemy library). Specifically this block fails because referred_cls is None and this is not supported in the issubclass method.
if local_cls is not referred_cls and issubclass(
local_cls, referred_cls):
I'm on Python 3.6 and sqlalchemy version 1.2.15 (and haven't upgraded or anything lately). I have changed no code and this error has just started. Below is the class I'm using for all SQL operations in my code. Any ideas are MUCH appreciated as I cannot figure out why I keep getting this error (oh yeah and it's not always consistent - every 3 or so times, the code runs just fine). The method that fails is get_table_class called from the method save_dataframe_to_table which is called in various other places throughout my code (whenever I have to save data to a table in SQL Server I use this). The specific line of code that errors in this class is Base.prepare(engine, reflect=True).
#!/usr/bin/python
""" Connect and interact with a SQL Server database
Contains a class used for connecting and interacting with a SQL Server database.
"""
from common.Util.Logging import Logging
from common.Util.OSHelpers import get_log_filepath
import pandas
import urllib
import pyodbc
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm import sessionmaker
Base = automap_base()
class SqlDatabase:
"""Connect and interact with a SQL Server database"""
def __init__(self, server, database, driver, port, username, password, logging_obj=None):
""" Create a common.DataAccess.SqlDatabase.SqlDatabase object
Args:
server: (str) name of the SQL Server
database: (str) name of the database on the SQL Server
driver: (str) name of the driver for use in the connection string, e.g. '{ODBC Driver 13 for SQL Server}'
port: (str) SQL Server port number (typically this is 1433)
username: (str) SQL Server username (leave blank to use Windows Authentication)
password: (str) SQL Server password (leave blank to use Windows Authentication)
logging_obj: (common.Util.Logging.Logging) initialized logging object
"""
# Set class variables
if logging_obj is None:
log_filename = get_log_filepath('Python App')
logging_obj = Logging(name=__name__, log_filename=log_filename, log_level_str='INFO')
self.logging_obj = logging_obj
self.server = server
self.database = database
self.driver = driver
self.port = port
self.username = username
self.password = password
self.connection_string = 'Driver=' + self.driver \
+ ';SERVER=' + self.server \
+ ',' + self.port \
+ ';DATABASE=' + self.database \
+ ';UID=' + self.username \
+ ';PWD=' + self.password
# Test connection
self.logging_obj.log(self.logging_obj.DEBUG, "method='common.DataAccess.SqlDatabase.__init__' message='Testing connection'")
conn = self.open_connection()
conn.close()
# Log initialization success
log_msg = """
method='common.DataAccess.SqlDatabase.__init__'
message='Initialized a SqlDatabase object'
server='{server}'
database='{database}'
driver='{driver}'
port='{port}'
username='{username}'
password='{password}'
connection_string='{connection_string}'
""".format(server=self.server,
database=self.database,
driver=self.driver,
port=self.port,
username=self.username,
password='*'*len(self.password),
connection_string=self.connection_string)
self.logging_obj.log(self.logging_obj.INFO, log_msg)
def open_connection(self):
""" Open connection
Opens a connection to a SQL Server database.
Returns:
conn: (pyodbc.Connection) connection to a SQL Server database
"""
self.logging_obj.log(self.logging_obj.DEBUG, "method='common.DataAccess.SqlDatabase.open_connection' message='Opening SQL Server connection'")
try:
conn = pyodbc.connect(self.connection_string)
except Exception as ex:
self.logging_obj.log(self.logging_obj.ERROR,
"""
method='common.DataAccess.SqlDatabase.open_connection'
message='Error trying to open SQL Server connection'
exception_message='{ex_msg}'
connection_string='{cxn_str}'
server='{server}'
port='{port}'
username='{username}'
password='{password}'
database='{database}'""".format(ex_msg=str(ex),
cxn_str=self.connection_string,
server=self.server,
port=self.port,
username=self.username,
password='*'*len(self.password),
database=self.database))
raise ex
else:
self.logging_obj.log(self.logging_obj.DEBUG,
"""
method='common.DataAccess.SqlDatabase.open_connection'
message='Successfully opened SQL Server connection'
connection_string='{cxn_str}'
server='{server}'
username='{username}'
password='{password}'
database='{database}'""".format(cxn_str=self.connection_string,
server=self.server,
username=self.username,
password='*' * len(self.password),
database=self.database))
return conn
def get_engine(self):
""" Create a Sqlalchemy engine
Returns:
engine: ()
"""
self.logging_obj.log(self.logging_obj.DEBUG, "message='Creating a sqlalchemy engine'")
params = urllib.parse.quote_plus(self.connection_string)
try:
engine = sqlalchemy.create_engine("mssql+pyodbc:///?odbc_connect=%s" % params)
except Exception as ex:
self.logging_obj.log(self.logging_obj.ERROR,
"""
method='common.DataAccess.SqlDatabase.get_engine'
message='Error trying to create a sqlalchemy engine'
exception_message='{ex_msg}'
connection_string='{conn_str}'""".format(ex_msg=str(ex),
conn_str=self.connection_string))
raise ex
else:
self.logging_obj.log(self.logging_obj.DEBUG,
"""
method='common.DataAccess.SqlDatabase.get_engine'
message='Successfully created a sqlalchemy engine'
connection_string='{conn_str}'
""".format(conn_str=self.connection_string))
return engine
def get_result_set(self, query_str):
""" Get a result set as a Pandas dataframe
Gets a result set using the pandas.read_sql method.
Args:
query_str: (str) query string
Returns:
df: (pandas.DataFrame) result set
"""
log_msg = """
method='common.DataAccess.SqlDatabase.get_result_set'
message='Getting a result set'
query_str='{query_str}'
""".format(query_str=query_str)
self.logging_obj.log(self.logging_obj.INFO, log_msg)
conn = self.open_connection()
df = pandas.read_sql(query_str, conn)
conn.close()
log_msg = """
method='common.DataAccess.SqlDatabase.get_result_set'
message='Successfully got a result set'
query_str='{query_str}'
""".format(query_str=query_str)
self.logging_obj.log(self.logging_obj.INFO, log_msg)
return df
def execute_nonquery(self, query_str):
""" Execute a non-query
Executes a non-query such as a CREATE TABLE or UPDATE statement.
Args:
query_str: (str) non-query statement
Returns:
"""
log_msg = """
method='common.DataAccess.SqlDatabase.execute_nonquery'
message='Executing a non-query'
query_str='{query_str}'
""".format(query_str=query_str)
self.logging_obj.log(self.logging_obj.INFO, log_msg)
conn = self.open_connection()
curs = conn.execute(query_str)
curs.commit()
curs.close()
conn.close()
log_msg = """
method='common.DataAccess.SqlDatabase.execute_nonquery'
message='Successfully executed a non-query'
query_str='{query_str}'
""".format(query_str=query_str)
self.logging_obj.log(self.logging_obj.INFO, log_msg)
return None
def to_staging_table(self,
dataframe,
staging_table_name,
insert_index=True,
index_label=None,
if_table_exists='replace',
bulkcopy_chunksize=1000):
""" Puts a pandas.DataFrame into a staging table
Puts a pandas.DataFrame into a staging table.
This uses a bulk copy method to put data from a pandas.DataFrame into a SQL staging table.
Args:
dataframe: (pandas.DataFrame) dataframe with data to copy into a SQL server staging table
staging_table_name: (str) name of the staging table to copy data into
insert_index: (logical) indicates whether or not to insert an index
index_label: (str) indicates the column name of the index - if None, an auto-generated index will be used
if_table_exists: (str) indicates what pandas.DataFrame.to_sql method to use if the table already exists
bulkcopy_chunksize: (int) number of rows to bulk copy at once
Returns:
"""
log_msg = """
method='common.DataAccess.SqlDatabase.to_staging_table'
message='Copying data into a staging table'
staging_table_name='{staging_table_name}'
""".format(staging_table_name=staging_table_name)
self.logging_obj.log(self.logging_obj.INFO, log_msg)
engine = self.get_engine()
try:
pandas.DataFrame.to_sql(
self=dataframe,
name=staging_table_name,
con=engine,
if_exists=if_table_exists,
index=insert_index,
index_label=index_label,
chunksize=bulkcopy_chunksize)
except Exception as ex:
self.logging_obj.log(self.logging_obj.ERROR,
"""
method='common.DataAccess.SqlDatabase.to_staging_table'
message='Error trying to copy data into a staging table'
exception_message='{ex_msg}'
connection_string='{staging_table_name}'""".format(ex_msg=str(ex),
staging_table_name=staging_table_name))
raise ex
else:
self.logging_obj.log(self.logging_obj.DEBUG,
"""
method='common.DataAccess.SqlDatabase.to_staging_table'
message='Successfully Copied data into a staging table'
staging_table_name='{staging_table_name}'
""".format(staging_table_name=staging_table_name))
return None
def truncate_table(self, table_name, schema_name='dbo'):
""" Truncate a table in the SQL database
Usually used to truncate staging tables prior to populating them.
Args:
table_name: (str) name of the table to truncate
schema_name: (str) name of the schema of the table to truncate
Returns:
"""
query_str = "TRUNCATE TABLE {schema_name}.{table_name}".format(schema_name=schema_name, table_name=table_name)
self.execute_nonquery(query_str)
def get_table_class(self, table_name, engine=None):
""" Get a table's class
Args:
engine:
table_name:
Returns:
table_class:
"""
if engine is None:
engine = self.get_engine()
Base.prepare(engine, reflect=True)
base_classes = Base.classes
for index, value in enumerate(base_classes):
class_name = value.__name__
if class_name == table_name:
class_index = index
table_class = list(base_classes)[class_index]
return table_class
def save_dataframe_to_table(self,
dataframe,
table_name,
remove_id_column_before_insert=True):
""" Save a pandas DataFrame to a table in SQL Server
Args:
dataframe: (pandas.DataFrame)
table_name: (str)
Returns:
"""
engine = self.get_engine()
Session = sessionmaker(bind=engine)
session = Session()
table = self.get_table_class(table_name, engine)
if remove_id_column_before_insert:
delattr(table, table_name+"Id") # Id columns should always be <table_name>Id (USANA standard)
dataframe.columns = table.__table__.columns.keys()[1:] # Id columns should always be the first column in table (for simplicity people!)
else:
dataframe.columns = table.__table__.columns.keys()
dataframe = dataframe.where((pandas.notnull(dataframe)), None) # replace NaN with None for the bulk insert
try:
session.bulk_insert_mappings(table, dataframe.to_dict(orient="records"), render_nulls=True)
except IntegrityError as e:
session.rollback()
self.logging_obj.log(self.logging_obj.ERROR, """method='common.DataAccess.SqlDatabase.save_dataframe_to_table'
exception_message='{ex}'""".format(ex=str(e)))
finally:
session.commit()
session.close()
The only other hint/clue into this issue I'm having is that I also just start getting the following warnings (for a whole set of tables in our database). I haven't seen these warnings until yesterday.
SAWarning: This declarative base already contains a class with the same class name and module name as sqlalchemy.ext.automap.WeeklySocialSellingProductMetricsReport, and will be replaced in the string-lookup table.
I had similar problem with Oracle database and it turned out that the reason was difference in letter case of schema name. Automap converts Oracle schema names and table names to lowercase, but in the metadata.reflect(engine, schema='MYSCHEMA') I provided my schema name in uppercase.
As a result, some tables was discovered twice:
as MYSCHEMA.mytable, probably generated by plain table discovery
as myschema.mytable, probably generated by a relationship discovered from another table
and caused warnings:
sqlalchemy\ext\declarative\clsregistry.py:129: SAWarning: This declarative base already contains a class with the same class name and module name as sqlalchemy.ext.automap.my_table_name, and will be replaced in the string-lookup table.
followed by TypeError.
The solution was as simple as changing schema name to lowercase.
This script helped me to spot table duplicates:
engine = create_engine(my_connection_string)
metadata = MetaData()
metadata.reflect(engine, schema='MYSCHEMA') # I'm using WRONG letter case here.
Base = automap_base(metadata=metadata)
# prepend table name with schema name
def classname_for_table(base, tablename, table):
return str(table.fullname.replace(".","__"))
Base.prepare(classname_for_table=classname_for_table)
# and look what's going on
pprint(Base.classes.keys())

Object already attached to session

I'm trying to write unit tests for a module within a Flask app that uses it's own database connection.
The module opens its connection thus:
engine = create_engine(SQLALCHEMY_DATABASE_URI)
Session = sessionmaker(bind=engine)
session = Session()
and I then use session throughout the module.
My unit test has a fixture on conftest.py to create a new session:
#pytest.yield_fixture(scope='module')
def test_session(app):
"""
Creates a new database session for a test. Note you must use this fixture
if your test connects to db.
Here we not only support commit calls but also rollback calls in tests,
:coolguy:.
"""
connection = db.engine.connect()
transaction = connection.begin()
options = dict(bind=connection, binds={})
db_session = db.create_scoped_session(options=options)
db_session.begin_nested()
# session is actually a scoped_session
# for the `after_transaction_end` event, we need a session instance to
# listen for, hence the `session()` call
#sqlalchemy.event.listens_for(db_session(), 'after_transaction_end')
def restart_savepoint(sess, trans):
if trans.nested and not trans._parent.nested:
db_session.expire_all()
db_session.begin_nested()
db.session = db_session
yield db_session
db_session.remove()
transaction.rollback()
connection.close()
and in my test I do this:
def test_schedule_orders_by_last_update(test_session, create_test_user):
vendor = test_session.query(Vendors).filter(Vendors.name == 'Melie Bianco').first()
amazon = AmazonRequests(vendor)
amazon.schedule_orders_by_last_update()
result = test_session.query(AmazonReportRequests).filter(AmazonReportRequests.vendor == vendor).all()
assert len(result) == 1
assert result.vendor.name == vendor.name
My problem is that when I run the test it always ends with the following error:
self = <sqlalchemy.orm.session.Session object at 0x1104fab50>, state = <sqlalchemy.orm.state.InstanceState object at 0x110863f10>, obj = <AmazonReportRequests None>
def _before_attach(self, state, obj):
if state.session_id == self.hash_key:
return False
if state.session_id and state.session_id in _sessions:
raise sa_exc.InvalidRequestError(
"Object '%s' is already attached to session '%s' "
"(this is '%s')" % (state_str(state),
> state.session_id, self.hash_key))
E InvalidRequestError: Object '<AmazonReportRequests at 0x110863e90>' is already attached to session '2' (this is '1')
Shouldn't a query just retrieve the row from the database and ignore the other session?

how to solve "Process terminated because the request deadline was exceeded. (Error code 123)" in google api?

I have a projects deployed on Google App Engine having Google API (Python). Every request to any of API make a database connection , execute a procedure and return data and close the connection. I was not able to access any of API as it was showing
"Process terminated because the request deadline was exceeded. (Error code 123)" and "This request caused a new process to be started for your application, and thus caused your application code to be loaded for the first time. This request may thus take longer and use more CPU than a typical request for your application." error.
Database is also on cloud (Google Cloud SQL). As I checked there was 900 connection and more than 150 instances were up but no api request was getting handled. This happens frequently. So I restart database server and deploy API code again to solve this issue. What is the issue and how can I solve this permanently ? Here is my python code for database connectivity :-
import logging
import traceback
import os
import MySQLdb
from warnings import filterwarnings
filterwarnings('ignore', category = MySQLdb.Warning)
class TalkWithDB:
def callQueries(self,query,req_args):
try:
if (os.getenv('SERVER_SOFTWARE') and os.getenv('SERVER_SOFTWARE').startswith('Google App Engine/')):
db = MySQLdb.connect(unix_socket = UNIX_SOCKET + INSTANCE_NAME, host = HOST, db = DB, user = USER ,charset='utf8',use_unicode=True)
else:
db = MySQLdb.connect(host = HOST, port = PORT, db = DB, user = USER, passwd = PASSWORD,charset='utf8',use_unicode=True)
cursor = db.cursor()
cursor.connection.autocommit(True)
try:
sql = query+str(req_args)
logging.info("QUERY = "+ sql )
cursor.execute(sql)
procedureResult = cursor.fetchall();
if str(procedureResult) == '()':
logging.info("Procedure Returned 0 Record")
procedureResult = []
procedureResult.append({0:"NoRecord", 1:"Error"})
#procedureResult = (("NoRecord","Error",),)
elif procedureResult[0][0] == 'Session Expired'.encode(encoding='unicode-escape',errors='strict'):
procedureResult = []
procedureResult.append({0:"SessionExpired", 1:"Error"})
except Exception, err:
logging.info("ConnectDB.py : - Error in Procedure Calling : " + traceback.format_exc())
#procedureResult = (('ProcedureCallError','Error',),)
procedureResult = []
procedureResult.append({0:"ProcedureCallError", 1:"Error"})
except Exception, err:
logging.info("Error In DataBase Connection : " + traceback.format_exc())
#procedureResult = (('DataBaseConnectionError','Error',),)
procedureResult = []
procedureResult.append({0:"DataBaseConnectionError", 1:"Error"})
# disconnect from server
finally:
try:
cursor.close()
db.close()
except Exception, err:
logging.info("Error In Closing Connection : " + traceback.format_exc())
return procedureResult
Two possible improvements :
your startup code for instances may take too long, check what is the startup time and if possible use warmup requests to reduce startup times. Since increasing your idle instances seems to help, your startup time may take too long.
A better approach would be to call external services (e.g. talk to Google Calendar) in a Task Queue outside of the user request scope. This gives you a 10-min deadline instead of the 60s deadline for user requests

Categories