I am building some RestAPI's using python FastAPI and getting some data from MSSQL server using SQLAlchemy.
I am trying to insert data into existing stored procedure in MSSQL server.
The stored procedure AcctsCostCentersAddV001 will take ArDescription, EngDescription, ErrMsg as parameters and return Output as an output.
MSSQL Server Code:
CREATE Proc [dbo].[AcctsCostCentersAddV001] #ArDescription nvarchar(100), #EngDescription varchar(100), #ErrMsg nvarchar(100) Output
As
Insert Into AcctsCostCenters (ArDescription, EngDescription) Values(#ArDescription, #EngDescription)
Set #ErrMsg = 'Test'
GO
My Python code:
from fastapi import APIRouter, Request, Depends, HTTPException
from fastapi.responses import JSONResponse
# # # # # # # # # # SQL # # # # # # # # # #
from sqlalchemy import text
from sqlalchemy.exc import ProgrammingError
# # # # # # # # # # Files # # # # # # # # # #
from dependencies import get_db
from sqlalchemy.engine import create_engine
from internal.config import username, password, SQL_SERVER, database_name
#router.get("/create/CostCenter/")
async def forms(request: Request, db=Depends(get_db)):
try:
connection_string = f'mssql://{username}:{password}#{SQL_SERVER}/{database_name}?
driver=ODBC+Driver+17+for+SQL+Server'
engine = create_engine(connection_string, pool_pre_ping=True)
connection = engine.raw_connection()
try:
cursor_obj = connection.cursor()
query = "Declare #ErrMsg nvarchar(100) Exec AcctsCostCentersAddV001 'Test', 'Test', #ErrMsg Output Print #ErrMsg"
cursor_obj.execute(text(query))
results = list(cursor_obj.fetchall())
cursor_obj.close()
connection.commit()
print(results)
finally:
connection.close()
except IndexError:
raise HTTPException(status_code=404, detail="Not found")
except ProgrammingError as e:
print(e)
raise HTTPException(status_code=400, detail="Invalid Entry")
except Exception as e:
print(e)
return UnknownError(
"unknown error caused by CostCenter API request handler", error=e)
return JSONResponse(results)
For some reason, this code doesn't raise any exceptions but yet i keep getting
('The SQL contains 0 parameter markers, but 3 parameters were supplied', 'HY000')
I have even tried this
cursor_obj.execute("Declare #ErrMsg nvarchar(100) Exec AcctsCostCentersAddV001 'Test', 'Test', #ErrMsg")
but i get back
No results. Previous SQL was not a query.
i tried wrapping the query in text() but i got The first argument to execute must be a string or unicode query.
but when i go into MSSQL Server and run
Declare #ErrMsg nvarchar(100)
Exec AcctsCostCentersAddV001 'Test', 'Test', #ErrMsg Output
Print #ErrMsg
It runs without any problem.
My Env:
Ubuntu 21.04 VPS by OVH
I hope i am providing everything you guys need and let me know if missed anything, Thanks!
btw I know i am connecting to the db twice :3
(Edited):
I am actually connecting to the DB from a datapase.py file, and i am connecting again in the function just for testing.
My database.py:
import time
from colorama import Fore
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.engine import create_engine
from internal.config import username, password, SQL_SERVER, database_name
try:
connection_string = f'mssql://{username}:{password}#{SQL_SERVER}/{database_name}?driver=ODBC+Driver+17+for+SQL+Server'
engine = create_engine(connection_string, pool_pre_ping=True)
connection = engine.raw_connection()
print(f"{Fore.CYAN}DB{Fore.RESET}: Database Connected. ")
SessionLocal = sessionmaker(autocommit=True, autoflush=False, bind=engine)
Base = declarative_base()
except Exception as e:
print(f"{Fore.RED}DB{Fore.RESET}: Couldn't connect to Database.", e)
Related
I have this database connection in a fast API app.
I am using multiple binds to the same session, I followed the documentation here:
Query the db:
method1: working
db.query(Model).all()
method2: not working and throwing the following error:
db.execute("SELECT * from ...");
Exception has occurred: UnboundExecutionError
Could not locate a bind configured on SQL expression or this Session.
This is the database connection code...
Can you help me get method2 working?
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.automap import automap_base
from sqlalchemy import MetaData
from app.settings import (
db1_DATABASE_URL,
db2_DATABASE_URL
)
engine = create_engine(
db1_DATABASE_URL,
echo=False,
pool_recycle=1800,
pool_timeout=20,
pool_pre_ping=True,
pool_size=50,
max_overflow=100,
)
engine2 = create_engine(db2_DATABASE_URL)
Base = declarative_base()
mroi_Base = automap_base()
mroi_Base.prepare(engine2, reflect=True)
SessionLocal = sessionmaker(autocommit=False, autoflush=False)
SessionLocal.configure(binds={Base: engine, mroi_Base: engine2})
def get_db():
db = None
try:
db = SessionLocal()
yield db
finally:
db.close()
def get_db2():
db2 = None
try:
db2 = SessionLocal()
yield db2
finally:
db2.close()
Remove the below mentioned line as we would be configuring the engine while creating the respective session instances.
SessionLocal.configure(binds={Base: engine, mroi_Base: engine2})
Initialize the session class with respective engine values.
def get_db():
db = None
try:
db = SessionLocal(engine=engine)
yield db
finally:
db.close()
def get_db2():
db2 = None
try:
db2 = SessionLocal(engine=engine2)
yield db2
finally:
db2.close()
I am using a sqlalchemy engine connection as a pytest fixture as a way to test functions that require database interaction. When I run the test cases individually by running pytest tests/filename.py::test_get_token_data or pytest tests/filename.py::test_create the test passes; however, when running the entire test case, pytest tests/filename.py, I get the following error:
E sqlalchemy.exc.OperationalError: (psycopg2.errors.AdminShutdown) terminating connection due to administrator command
E server closed the connection unexpectedly
E This probably means the server terminated abnormally
E before or while processing the request.
(Background on this error at: http://sqlalche.me/e/13/e3q8)
To my understanding, after each test, the database is suppose to be cleared (which I've confirmed), however, I do not understand the error. My code below.
conftest.py
#pytest.fixture
def db_connection(test_config, admin_db_connection):
config = test_config()
engine = sqlalchemy.create_engine(config.DB_URL)
connection = engine.connect()
yield connection
connection.close()
# clear database
from psycopg2.extensions import AsIs # handle SQL quoting
with admin_db_connection.cursor() as curs:
curs.execute("drop database %s with (force);", (AsIs(config.DB_NAME),))
curs.execute(
"create database %s template vtag_template;", (AsIs(config.DB_NAME),)
)
filename.py
import sqlalchemy as sa
#pytest.fixture
def db_injection(db_connection):
with db_connection.begin():
some_value = db_connection.execute(
sa.sql.text(
"""insert into general.some_table as t (some_id, name, description, is_active) values (:some_id, :name, :description, :is_active) returning t.some_id;
"""
),
tenant_id='1234',
description="TEST",
is_active=True,
name="TEST",
)
tenant_id_ = some_value.first()[0]
#pytest.fixture
def some_function(db_connection):
with db_connection.begin():
some_table = db_connection.execute(
sa.sql.text(
"""
SELECT e.name
FROM general.some_table e
WHERE e.id = 1234
"""
)
)
return some_table.first()
def test_get_token_data(client, headers_with_token, db_injection, some_function):
token = some_function.name
#API is using the same db connection
response = client.get(f"/api/v2/{token}", headers=headers_with_token)
assert response.status_code == 200
def test_create(client, img, b_64, headers_with_token, db_injection):
items = Items(
user_id=1234,
template_id=67,
b_64=b_64,
)
data = {
"json": json.dumps(asdict(items)),
"file": ("some_name", img, "multipart/form-data"),
}
response = client.post(
"/api/v2/create",
data=data,
follow_redirects=True,
content_type="multipart/form-data",
headers=headers_with_token
)
assert response.status_code == 200
The issue was due to the application having an unbinded Session being used. Since most of our application us using sqlalchemy's Engine API, we decide with refactor in using raw sql via with engine.begin(). Another alternative solution could have been adding the engine into the session
I've been using the same class for months for connecting to a SQL Server database, running queries, inserting data into staging tables, etc. Just yesterday whenever my code tries to insert into a staging table, I get the error:
TypeError: issubclass() arg 2 must be a class or tuple of classes
Debugging I learned that this is happening in the method _relationships_for_fks in the automap.py module (sqlalchemy library). Specifically this block fails because referred_cls is None and this is not supported in the issubclass method.
if local_cls is not referred_cls and issubclass(
local_cls, referred_cls):
I'm on Python 3.6 and sqlalchemy version 1.2.15 (and haven't upgraded or anything lately). I have changed no code and this error has just started. Below is the class I'm using for all SQL operations in my code. Any ideas are MUCH appreciated as I cannot figure out why I keep getting this error (oh yeah and it's not always consistent - every 3 or so times, the code runs just fine). The method that fails is get_table_class called from the method save_dataframe_to_table which is called in various other places throughout my code (whenever I have to save data to a table in SQL Server I use this). The specific line of code that errors in this class is Base.prepare(engine, reflect=True).
#!/usr/bin/python
""" Connect and interact with a SQL Server database
Contains a class used for connecting and interacting with a SQL Server database.
"""
from common.Util.Logging import Logging
from common.Util.OSHelpers import get_log_filepath
import pandas
import urllib
import pyodbc
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm import sessionmaker
Base = automap_base()
class SqlDatabase:
"""Connect and interact with a SQL Server database"""
def __init__(self, server, database, driver, port, username, password, logging_obj=None):
""" Create a common.DataAccess.SqlDatabase.SqlDatabase object
Args:
server: (str) name of the SQL Server
database: (str) name of the database on the SQL Server
driver: (str) name of the driver for use in the connection string, e.g. '{ODBC Driver 13 for SQL Server}'
port: (str) SQL Server port number (typically this is 1433)
username: (str) SQL Server username (leave blank to use Windows Authentication)
password: (str) SQL Server password (leave blank to use Windows Authentication)
logging_obj: (common.Util.Logging.Logging) initialized logging object
"""
# Set class variables
if logging_obj is None:
log_filename = get_log_filepath('Python App')
logging_obj = Logging(name=__name__, log_filename=log_filename, log_level_str='INFO')
self.logging_obj = logging_obj
self.server = server
self.database = database
self.driver = driver
self.port = port
self.username = username
self.password = password
self.connection_string = 'Driver=' + self.driver \
+ ';SERVER=' + self.server \
+ ',' + self.port \
+ ';DATABASE=' + self.database \
+ ';UID=' + self.username \
+ ';PWD=' + self.password
# Test connection
self.logging_obj.log(self.logging_obj.DEBUG, "method='common.DataAccess.SqlDatabase.__init__' message='Testing connection'")
conn = self.open_connection()
conn.close()
# Log initialization success
log_msg = """
method='common.DataAccess.SqlDatabase.__init__'
message='Initialized a SqlDatabase object'
server='{server}'
database='{database}'
driver='{driver}'
port='{port}'
username='{username}'
password='{password}'
connection_string='{connection_string}'
""".format(server=self.server,
database=self.database,
driver=self.driver,
port=self.port,
username=self.username,
password='*'*len(self.password),
connection_string=self.connection_string)
self.logging_obj.log(self.logging_obj.INFO, log_msg)
def open_connection(self):
""" Open connection
Opens a connection to a SQL Server database.
Returns:
conn: (pyodbc.Connection) connection to a SQL Server database
"""
self.logging_obj.log(self.logging_obj.DEBUG, "method='common.DataAccess.SqlDatabase.open_connection' message='Opening SQL Server connection'")
try:
conn = pyodbc.connect(self.connection_string)
except Exception as ex:
self.logging_obj.log(self.logging_obj.ERROR,
"""
method='common.DataAccess.SqlDatabase.open_connection'
message='Error trying to open SQL Server connection'
exception_message='{ex_msg}'
connection_string='{cxn_str}'
server='{server}'
port='{port}'
username='{username}'
password='{password}'
database='{database}'""".format(ex_msg=str(ex),
cxn_str=self.connection_string,
server=self.server,
port=self.port,
username=self.username,
password='*'*len(self.password),
database=self.database))
raise ex
else:
self.logging_obj.log(self.logging_obj.DEBUG,
"""
method='common.DataAccess.SqlDatabase.open_connection'
message='Successfully opened SQL Server connection'
connection_string='{cxn_str}'
server='{server}'
username='{username}'
password='{password}'
database='{database}'""".format(cxn_str=self.connection_string,
server=self.server,
username=self.username,
password='*' * len(self.password),
database=self.database))
return conn
def get_engine(self):
""" Create a Sqlalchemy engine
Returns:
engine: ()
"""
self.logging_obj.log(self.logging_obj.DEBUG, "message='Creating a sqlalchemy engine'")
params = urllib.parse.quote_plus(self.connection_string)
try:
engine = sqlalchemy.create_engine("mssql+pyodbc:///?odbc_connect=%s" % params)
except Exception as ex:
self.logging_obj.log(self.logging_obj.ERROR,
"""
method='common.DataAccess.SqlDatabase.get_engine'
message='Error trying to create a sqlalchemy engine'
exception_message='{ex_msg}'
connection_string='{conn_str}'""".format(ex_msg=str(ex),
conn_str=self.connection_string))
raise ex
else:
self.logging_obj.log(self.logging_obj.DEBUG,
"""
method='common.DataAccess.SqlDatabase.get_engine'
message='Successfully created a sqlalchemy engine'
connection_string='{conn_str}'
""".format(conn_str=self.connection_string))
return engine
def get_result_set(self, query_str):
""" Get a result set as a Pandas dataframe
Gets a result set using the pandas.read_sql method.
Args:
query_str: (str) query string
Returns:
df: (pandas.DataFrame) result set
"""
log_msg = """
method='common.DataAccess.SqlDatabase.get_result_set'
message='Getting a result set'
query_str='{query_str}'
""".format(query_str=query_str)
self.logging_obj.log(self.logging_obj.INFO, log_msg)
conn = self.open_connection()
df = pandas.read_sql(query_str, conn)
conn.close()
log_msg = """
method='common.DataAccess.SqlDatabase.get_result_set'
message='Successfully got a result set'
query_str='{query_str}'
""".format(query_str=query_str)
self.logging_obj.log(self.logging_obj.INFO, log_msg)
return df
def execute_nonquery(self, query_str):
""" Execute a non-query
Executes a non-query such as a CREATE TABLE or UPDATE statement.
Args:
query_str: (str) non-query statement
Returns:
"""
log_msg = """
method='common.DataAccess.SqlDatabase.execute_nonquery'
message='Executing a non-query'
query_str='{query_str}'
""".format(query_str=query_str)
self.logging_obj.log(self.logging_obj.INFO, log_msg)
conn = self.open_connection()
curs = conn.execute(query_str)
curs.commit()
curs.close()
conn.close()
log_msg = """
method='common.DataAccess.SqlDatabase.execute_nonquery'
message='Successfully executed a non-query'
query_str='{query_str}'
""".format(query_str=query_str)
self.logging_obj.log(self.logging_obj.INFO, log_msg)
return None
def to_staging_table(self,
dataframe,
staging_table_name,
insert_index=True,
index_label=None,
if_table_exists='replace',
bulkcopy_chunksize=1000):
""" Puts a pandas.DataFrame into a staging table
Puts a pandas.DataFrame into a staging table.
This uses a bulk copy method to put data from a pandas.DataFrame into a SQL staging table.
Args:
dataframe: (pandas.DataFrame) dataframe with data to copy into a SQL server staging table
staging_table_name: (str) name of the staging table to copy data into
insert_index: (logical) indicates whether or not to insert an index
index_label: (str) indicates the column name of the index - if None, an auto-generated index will be used
if_table_exists: (str) indicates what pandas.DataFrame.to_sql method to use if the table already exists
bulkcopy_chunksize: (int) number of rows to bulk copy at once
Returns:
"""
log_msg = """
method='common.DataAccess.SqlDatabase.to_staging_table'
message='Copying data into a staging table'
staging_table_name='{staging_table_name}'
""".format(staging_table_name=staging_table_name)
self.logging_obj.log(self.logging_obj.INFO, log_msg)
engine = self.get_engine()
try:
pandas.DataFrame.to_sql(
self=dataframe,
name=staging_table_name,
con=engine,
if_exists=if_table_exists,
index=insert_index,
index_label=index_label,
chunksize=bulkcopy_chunksize)
except Exception as ex:
self.logging_obj.log(self.logging_obj.ERROR,
"""
method='common.DataAccess.SqlDatabase.to_staging_table'
message='Error trying to copy data into a staging table'
exception_message='{ex_msg}'
connection_string='{staging_table_name}'""".format(ex_msg=str(ex),
staging_table_name=staging_table_name))
raise ex
else:
self.logging_obj.log(self.logging_obj.DEBUG,
"""
method='common.DataAccess.SqlDatabase.to_staging_table'
message='Successfully Copied data into a staging table'
staging_table_name='{staging_table_name}'
""".format(staging_table_name=staging_table_name))
return None
def truncate_table(self, table_name, schema_name='dbo'):
""" Truncate a table in the SQL database
Usually used to truncate staging tables prior to populating them.
Args:
table_name: (str) name of the table to truncate
schema_name: (str) name of the schema of the table to truncate
Returns:
"""
query_str = "TRUNCATE TABLE {schema_name}.{table_name}".format(schema_name=schema_name, table_name=table_name)
self.execute_nonquery(query_str)
def get_table_class(self, table_name, engine=None):
""" Get a table's class
Args:
engine:
table_name:
Returns:
table_class:
"""
if engine is None:
engine = self.get_engine()
Base.prepare(engine, reflect=True)
base_classes = Base.classes
for index, value in enumerate(base_classes):
class_name = value.__name__
if class_name == table_name:
class_index = index
table_class = list(base_classes)[class_index]
return table_class
def save_dataframe_to_table(self,
dataframe,
table_name,
remove_id_column_before_insert=True):
""" Save a pandas DataFrame to a table in SQL Server
Args:
dataframe: (pandas.DataFrame)
table_name: (str)
Returns:
"""
engine = self.get_engine()
Session = sessionmaker(bind=engine)
session = Session()
table = self.get_table_class(table_name, engine)
if remove_id_column_before_insert:
delattr(table, table_name+"Id") # Id columns should always be <table_name>Id (USANA standard)
dataframe.columns = table.__table__.columns.keys()[1:] # Id columns should always be the first column in table (for simplicity people!)
else:
dataframe.columns = table.__table__.columns.keys()
dataframe = dataframe.where((pandas.notnull(dataframe)), None) # replace NaN with None for the bulk insert
try:
session.bulk_insert_mappings(table, dataframe.to_dict(orient="records"), render_nulls=True)
except IntegrityError as e:
session.rollback()
self.logging_obj.log(self.logging_obj.ERROR, """method='common.DataAccess.SqlDatabase.save_dataframe_to_table'
exception_message='{ex}'""".format(ex=str(e)))
finally:
session.commit()
session.close()
The only other hint/clue into this issue I'm having is that I also just start getting the following warnings (for a whole set of tables in our database). I haven't seen these warnings until yesterday.
SAWarning: This declarative base already contains a class with the same class name and module name as sqlalchemy.ext.automap.WeeklySocialSellingProductMetricsReport, and will be replaced in the string-lookup table.
I had similar problem with Oracle database and it turned out that the reason was difference in letter case of schema name. Automap converts Oracle schema names and table names to lowercase, but in the metadata.reflect(engine, schema='MYSCHEMA') I provided my schema name in uppercase.
As a result, some tables was discovered twice:
as MYSCHEMA.mytable, probably generated by plain table discovery
as myschema.mytable, probably generated by a relationship discovered from another table
and caused warnings:
sqlalchemy\ext\declarative\clsregistry.py:129: SAWarning: This declarative base already contains a class with the same class name and module name as sqlalchemy.ext.automap.my_table_name, and will be replaced in the string-lookup table.
followed by TypeError.
The solution was as simple as changing schema name to lowercase.
This script helped me to spot table duplicates:
engine = create_engine(my_connection_string)
metadata = MetaData()
metadata.reflect(engine, schema='MYSCHEMA') # I'm using WRONG letter case here.
Base = automap_base(metadata=metadata)
# prepend table name with schema name
def classname_for_table(base, tablename, table):
return str(table.fullname.replace(".","__"))
Base.prepare(classname_for_table=classname_for_table)
# and look what's going on
pprint(Base.classes.keys())
So,a after reading every page around for the last 1h i'm still not able to find a solution for this problem.
This is my connection.py file:
from sqlalchemy import create_engine, Table, Column, String, Integer, MetaData
from sqlalchemy.sql import select
class DatabaseConnectionManager:
def __init__(self):
self.host = 'localhost:3306'
self.db = 'xxx'
self.user = 'xxx'
self.pwd = 'xxx'
self.connect_string = 'mysql://{u}:{p}#{s}/{d}'.format(u=self.user,
p=self.pwd,
s=self.host,
d=self.db)
self.metadata = MetaData()
self.engine = create_engine(self.connect_string, echo=False,
pool_size=100, pool_recycle=3600)
self.conn = self.engine.connect()
def insert_table(self, inputs):
self.conn.execute(self.tbl_auctions().insert(), inputs)
# Update 1 : conn.close() removed.
#self.conn.close()
def select_servers(self):
try:
s = select([self.tbl_servers()])
result = self.conn.execute(s)
except:
raise
else:
return result
And this is my bulk_inserter.py file:
import sys
import time
import json
from connector import DatabaseConnectionManager
def build_auctions_list():
server_list = []
db = DatabaseConnectionManager()
# Loop over regions
for server, env in db.select_servers_table():
request_auction_data = json.loads(dump_auction_url(region, realm))
for auction in request_auction_data:
auction_list.append(auction)
db.insert_table(server_list)
if __name__ == '__main__':
start = time.time()
build_auctions_list()
print time.time() - start
So, the problem happens when i try to insert all the bulk data using db.insert_table(server_list) for 2 or more servers returned by the loop for server, env in db.select_servers_table():
But, if the result on that loop is for only one server, the flows happen normally without problems.
So, resuming :
this program retrieve a list of servers from a db table and dumps the json data into the db.
Bulk insert performs well if only one server is retrieved.
if two or more servers , the following error happens:
sqlalchemy.exc.OperationalError: (OperationalError) (2006, 'MySQL server has gone away')
Anyoen have any idea what could be happening? I allready incresed the timeout and buffer size on mysql config file. So i'm not sure what the problem could be...
Update #1 It seems i cant bulk insert array with more than 50k values. I'm still trying to figure out how to do it.
Im writting a flask api using flaskrestful,sqlalchemy, Postgres, nginx,uwsgi. Im a newbie to python.These are my configuration
database.py
from cuewords import app
from flask.ext.sqlalchemy import SQLAlchemy
from sqlalchemy.pool import NullPool
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine
from sqlalchemy import Column, Integer, String , Text , Boolean , DateTime, MetaData, Table ,Sequence
from sqlalchemy.dialects.postgresql import JSON
Base = declarative_base()
db_name="postgresql+psycopg2://admin:password#localhost:5434/database_name"
from sqlalchemy.orm import sessionmaker
engine = create_engine(db_name,poolclass=NullPool ,echo=True)
Session = sessionmaker(autocommit=False ,bind=engine)
connection = engine.connect()
metadata = MetaData()
api.py
class Webcontent(Resource):
def post(self):
session=Session()
...assign some params...
try:
insert_data=Websitecontent(site_url,publisher_id)
session.add(insert_data)
session.commit()
Websitecontent.update_url(insert_data.id,session)
except:
session.rollback()
raise
finally:
return "Data created "
session.close()
else:
return "some value"
Here im first saving the just the url then saving all the content of the site using boilerpipe later .Idea is to move to queue later
model.py
class Websitecontent(Base):
#classmethod
def update_url(self,id,session):
existing_record=session.query(Websitecontent).filter_by(id=int(id)).first()
data=Processing.processingContent(str(existing_record.url))
#boilerpipe processing the content here
#assigning some data to existing record in session
session.add(existing_record)
session.commit()
Websitecontent.processingWords(existing_record,session)
#classmethod
def processingWords(self,record,session)
...processing
Websitecontent.saveKeywordMapping(session,keyword_url,record)
#classmethod
def saveKeywordMapping(session,keyword_url,record)
session.commit()
session.close()
So this code works perfectly in locally but its doesnt work in production .So when i check pag_stat_activity it show the state "idle in transaction". The app hangs then i have to restart the servers. i dont get it why session.close() does not close the pool connection why its keeping psql transaction state busy . Guys any help would be really appreciated.
You are returning before closing the session:
return "Data created "
session.close()
I think returning inside finally might swallow the exception, as well.