Python Postgres psycopg2 ThreadedConnectionPool exhausted - python

I have looked into several 'too many clients' related topic here but still can't solve my problem, so I have to ask this again, for me specific case.
Basically, I set up my local Postgres server and need to do tens of thousands of queries, so I used the Python psycopg2package. Here are my codes:
import psycopg2
import pandas as pd
import numpy as np
from flashtext import KeywordProcessor
from psycopg2.pool import ThreadedConnectionPool
from concurrent.futures import ThreadPoolExecutor
df = pd.DataFrame({'S':['California', 'Ohio', 'Texas'], 'T':['Dispatcher', 'Zookeeper', 'Mechanics']})
# df = pd.concat([df]*10000) # repeat df 10000 times
DSN = "postgresql://User:password#localhost/db"
tcp = ThreadedConnectionPool(1, 800, DSN)
def do_one_query(inputS, inputT):
conn = tcp.getconn()
c = conn.cursor()
q = r"SELECT * from eridata where "State" = 'California' and "Title" = 'Dispatcher' limit 1;"
c.execute(q)
all_results = c.fetchall()
for row in all_results:
return row
tcp.putconn(conn, close=True)
cnt=0
for idx, row in df.iterrows():
cnt+=1
with ThreadPoolExecutor(max_workers=1) as pool:
ret = pool.submit(do_one_query, row["S"], row["T"])
print ret.result()
print cnt
The code runs well with a small df. If I repeat df by 10000 times, I got error message saying connection pool exhausted
. I though the connections I used have been closed by this line:
tcp.putconn(conn, close=True)
But I guess actually they are not closed? How can I get around this issue?

I've struggled to find really detailed information on how the ThreadedConnectionPool works. https://bbengfort.github.io/observations/2017/12/06/psycopg2-transactions.html ain't bad, but it turns out that its claim that getconn blocks until a connection becomes available is incorrect. Checking the code, all ThreadedConnectionPool adds is a lock around the AbstractConnectionPool methods to prevent race conditions. If more than maxconn connections are attempted used at any point, the connection pool exhausted PoolError will be raised.
If you want something a bit simpler than the accepted answer, further wrapping the methods in a Semaphore providing the blocking until a connection becomes available should do the trick:
from psycopg2.pool import ThreadedConnectionPool as _ThreadedConnectionPool
from threading import Semaphore
class ThreadedConnectionPool(_ThreadedConnectionPool):
def __init__(self, minconn, maxconn, *args, **kwargs):
self._semaphore = Semaphore(maxconn)
super().__init__(minconn, maxconn, *args, **kwargs)
def getconn(self, *args, **kwargs):
self._semaphore.acquire()
try:
return super().getconn(*args, **kwargs)
except:
self._semaphore.release()
raise
def putconn(self, *args, **kwargs):
try:
super().putconn(*args, **kwargs)
finally:
self._semaphore.release()
# closeall is inherited as is. This means the Semaphore does
# not get reset, but neither do the core structures for
# maintaining the pool in the original ThreadedConnectionPool
# so a closed pool is not intended to be reused once closed.
Note that ConnectionPools, both standard and threaded, only come with the three putconn, getconn and closeall methods, and nothing fancy like context management. So the above should cover all existing functionality.

You need to use a queue on top of your pool.
Something like the following should work:
import gevent, sys, random, psycopg2, logging
from contextlib import contextmanager
from gevent.queue import Queue
from gevent.socket import wait_read, wait_write
from psycopg2.pool import ThreadedConnectionPool
from psycopg2 import extensions, OperationalError
import sys
logger = logging.getLogger(__name__)
poolsize = 100 #number of max connections
pdsn = '' # put your dsn here
if sys.version_info[0] >= 3:
integer_types = (int,)
else:
import __builtin__
integer_types = (int, __builtin__.long)
class ConnectorError(Exception):
""" This is a base class for all CONNECTOR related exceptions """
pass
#simplified calls etc. db.fetchall(SQL, arg1, arg2...)
def cursor(): return Pcursor()
def fetchone(PSQL, *args): return Pcursor().fetchone(PSQL, *args)
def fetchall(PSQL, *args): return Pcursor().fetchall(PSQL, *args)
def execute(PSQL, *args): return Pcursor().execute(PSQL, *args)
#singleton connection pool, gets reset if a connection is bad or drops
_pgpool = None
def pgpool():
global _pgpool
if not _pgpool:
try:
_pgpool = PostgresConnectionPool(maxsize=poolsize)
except psycopg2.OperationalError as exc:
_pgpool = None
return _pgpool
class Pcursor(object):
def __init__(self, **kwargs):
#in case of a lost connection lets sit and wait till it's online
global _pgpool
if not _pgpool:
while not _pgpool:
try:
pgpool()
except:
logger.debug('Attempting Connection To Postgres...')
gevent.sleep(1)
def fetchone(self, PSQL, *args):
with _pgpool.cursor() as cursor:
try:
cursor.execute(PSQL, args)
except TypeError:
cursor.execute(PSQL, args[0])
except Exception as exc:
print(sys._getframe().f_back.f_code)
print(sys._getframe().f_back.f_code.co_name)
logger.warning(str(exc))
logger.debug(cursor.query)
return cursor.fetchone()
def fetchall(self, PSQL, *args):
with _pgpool.cursor() as cursor:
try:
cursor.execute(PSQL, args)
except TypeError:
cursor.execute(PSQL, args[0])
except Exception as exc:
print(sys._getframe().f_back.f_code)
print(sys._getframe().f_back.f_code.co_name)
logger.warning(str(exc))
logger.debug(cursor.query)
return cursor.fetchall()
def execute(self, PSQL, *args):
with _pgpool.cursor() as cursor:
try:
cursor.execute(PSQL, args)
except TypeError:
cursor.execute(PSQL, args[0])
except Exception as exc:
print(sys._getframe().f_back.f_code)
print(sys._getframe().f_back.f_code.co_name)
logger.warning(str(exc))
finally:
logger.debug(cursor.query)
return cursor.query
def fetchmany(self, PSQL, *args):
with _pgpool.cursor() as cursor:
try:
cursor.execute(PSQL, args)
except TypeError:
cursor.execute(PSQL, args[0])
while 1:
items = cursor.fetchmany()
if not items:
break
for item in items:
yield item
class AbstractDatabaseConnectionPool(object):
def __init__(self, maxsize=poolsize):
if not isinstance(maxsize, integer_types):
raise TypeError('Expected integer, got %r' % (maxsize, ))
self.maxsize = maxsize
self.pool = Queue()
self.size = 0
def create_connection(self):
#overridden by PostgresConnectionPool
raise NotImplementedError()
def get(self):
pool = self.pool
if self.size >= self.maxsize or pool.qsize():
return pool.get()
self.size += 1
try:
new_item = self.create_connection()
except:
self.size -= 1
raise
return new_item
def put(self, item):
self.pool.put(item)
def closeall(self):
while not self.pool.empty():
conn = self.pool.get_nowait()
try:
conn.close()
except Exception:
pass
#contextmanager
def connection(self, isolation_level=None):
conn = self.get()
try:
if isolation_level is not None:
if conn.isolation_level == isolation_level:
isolation_level = None
else:
conn.set_isolation_level(isolation_level)
yield conn
except:
if conn.closed:
conn = None
self.closeall()
raise
else:
if conn.closed:
raise OperationalError("Cannot commit because connection was closed: %r" % (conn, ))
finally:
if conn is not None and not conn.closed:
if isolation_level is not None:
conn.set_isolation_level(isolation_level)
self.put(conn)
#contextmanager
def cursor(self, *args, **kwargs):
isolation_level = kwargs.pop('isolation_level', None)
with self.connection(isolation_level) as conn:
try:
yield conn.cursor(*args, **kwargs)
except:
global _pgpool
_pgpool = None
del(self)
class PostgresConnectionPool(AbstractDatabaseConnectionPool):
def __init__(self,**kwargs):
try:
self.pconnect = ThreadedConnectionPool(1, poolsize, dsn=pdsn)
except:
global _pgpool
_pgpool = None
raise ConnectorError('Database Connection Failed')
maxsize = kwargs.pop('maxsize', None)
self.kwargs = kwargs
AbstractDatabaseConnectionPool.__init__(self, maxsize)
def create_connection(self):
self.conn = self.pconnect.getconn()
self.conn.autocommit = True
return self.conn
def gevent_wait_callback(conn, timeout=None):
"""A wait callback useful to allow gevent to work with Psycopg."""
while 1:
state = conn.poll()
if state == extensions.POLL_OK:
break
elif state == extensions.POLL_READ:
wait_read(conn.fileno(), timeout=timeout)
elif state == extensions.POLL_WRITE:
wait_write(conn.fileno(), timeout=timeout)
else:
raise ConnectorError("Bad result from poll: %r" % state)
extensions.set_wait_callback(gevent_wait_callback)
Then you can call your connection via this:
import db
db.Pcursor().execute(PSQL, arg1, arg2, arg3)
Basically I borrowed the gevent example of async postgres and modified it to support threadpooling via pyscopg2.
https://github.com/gevent/gevent/blob/master/examples/psycopg2_pool.py
I added what psycogreen does inside the module, so all you need to do is import and call the class. Each call to the class stacks a new query on the queue, but only uses the pool at a certain size. This way you don't run out of connections. This is essentially similar to what PGBouncer does, which I think would also eliminate your problem.
https://pgbouncer.github.io/

Your problem here is, that you actually do not return the connection to the pool, but close it forever with
tcp.putconn(conn, close=True)
See the documentation here http://initd.org/psycopg/docs/pool.html
If close is True, discard the connection from the pool.
So, if you put 800 connections into your pool, after 801 loops you will get the "exhausted error" because your connection pool size is zero.

I think the reason why you get PoolError("exhausted connections") maybe you return before close connection when all_results is not None. so, connection pool exhausted
def do_one_query(inputS, inputT):
...
for row in all_results:
return row <---- return row before putconn when all_results is not None,
tcp.putconn(conn, close=True)
for idx, row in df.iterrows():
cnt+=1
with ThreadPoolExecutor(max_workers=1) as pool:
ret = pool.submit(do_one_query, row["S"], row["T"])
print ret.result()
print cnt
I make a ugly implementation with when exhausted or connection lost, try reconnect to get new conn, like below
class PostgresConnectionPool:
def __init__(self, minconn, maxconn, *args, **kwargs):
self.pool = ThreadedConnectionPool(minconn=minconn, maxconn=maxconn, *args, **kwargs)
def get_conn(self):
try:
# check if connection lost or pool exhausted
con = self.pool.getconn()
cur = con.cursor()
cur.execute("select 1;")
except (OperationalError, PoolError) as oe:
print(f"get pg connection with err:{oe}, reconnect")
# reconnect
key = str(uuid.uuid4())
con = self.pool._connect(key)
return con

Related

Psycopg2 auto reconnect inside a class

I've got class to connect to my Database.
import psycopg2, psycopg2.extensions
from parseini import config
import pandas as pd, pandas.io.sql as sqlio
class MyDatabase:
def __init__(self, name='mydb.ini'):
self.params = config(filename=name)
self.my_connection = psycopg2.connect(**self.params)
self.my_cursor = self.my_connection.cursor()
def fetch_all_as_df(self, sql_statement):
return sqlio.read_sql_query(sql_statement, self.my_connection)
def df_to_sql(self, df):
table = 'sometable'
return sqlio.to_sql(df, table, self.my_connection)
def __del__(self):
self.my_cursor.close()
self.my_connection.close()
How could I reconnect to database and handle psycopg2.OperationalError in my case?
You could make a decorator that tries to reconnect when psycopg2.InterfaceError or psycopg2.OperationalError are raised.
That's just an example how it could work and probably needs adjustments:
import time
from functools import wraps
import psycopg2, psycopg2.extensions
def retry(fn):
#wraps(fn)
def wrapper(*args, **kw):
cls = args[0]
for x in range(cls._reconnectTries):
print(x, cls._reconnectTries)
try:
return fn(*args, **kw)
except (psycopg2.InterfaceError, psycopg2.OperationalError) as e:
print ("\nDatabase Connection [InterfaceError or OperationalError]")
print ("Idle for %s seconds" % (cls._reconnectIdle))
time.sleep(cls._reconnectIdle)
cls._connect()
return wrapper
class MyDatabase:
_reconnectTries = 5
_reconnectIdle = 2 # wait seconds before retying
def __init__(self, name='mydb.ini'):
self.my_connection = None
self.my_cursor = None
self.params = config(filename=name)
self._connect()
def _connect(self):
self.my_connection = psycopg2.connect(**self.params)
self.my_cursor = self.my_connection.cursor()
#retry
def fetch_all_as_df(self, sql_statement):
return sqlio.read_sql_query(sql_statement, self.my_connection)
#retry
def dummy(self):
self.my_cursor.execute('select 1+2 as result')
return self.my_cursor.fetchone()
#retry
def df_to_sql(self, df):
table = 'sometable'
return sqlio.to_sql(df, table, self.my_connection)
def __del__(self):
# Maybe there is a connection but no cursor, whatever close silently!
for c in (self.my_cursor, self.my_connection):
try:
c.close()
except:
pass
db = MyDatabase()
time.sleep(30) # some time to shutdown the database
print(db.dummy())
Output:
Database Connection [InterfaceError or OperationalError]
Idle for 2 seconds
Database Connection [InterfaceError or OperationalError]
Idle for 2 seconds
Database Connection [InterfaceError or OperationalError]
Idle for 2 seconds
Database Connection [InterfaceError or OperationalError]
Idle for 2 seconds
(3,)
Note: _connect itself is not decorated, so this code assumes an initial connect always works!

psycopg2 - Unkeyed Connection error in multithreading

I am implementing connection pooling with Postgres for a multithreading application, Here I am creating two pools and storing in a singleton instance to be able to access the same instance from the entire application with the class RepositoryConnPool
Then we have the second class that is for context management, this I made to be able to execute multiple query's with the same connection in a with context, I have two pools of connections because when I execute a query i want to be able to execute a select query to replica database and modification of data to the master database. I am getting the following error while executing in a multithreading environment. This is not happening while I run without multithreading. Here is the error log :
ERROR ;2020-06-03 11:05:42,826; ThreadPoolExecutor-0_4; modules.table_modify_module; table_modify_module:insert_host;60: trying to put unkeyed connection
ERROR ;2020-06-03 11:05:42,826; ThreadPoolExecutor-0_21; table_modify_module; table_modify_module:insert_host;60: trying to put unkeyed connection
ERROR ;2020-06-03 11:05:42,885; ThreadPoolExecutor-0_12; table_modify_module; table_modify_module:get_inst_detail;367: trying to put unkeyed connection
class RepositoryConnPool:
__instance = None
#This method overcharge is to make use of singleton pattern
def __new__(cls):
if cls.__instance is None:
cls.__instance = object.__new__(cls)
return cls.__instance
def __init__(self):
try:
#I get connection details from vault
inventory_master = vm.get_secret_by_base_path("xxxxxx")
inventory_repl = vm.get_secret_by_base_path("xxxxx")
self.repo_connection_pool_master = self.setup_conn_pool(inventory_master)
self.repo_connection_pool_repl = self.setup_conn_pool(inventory_repl)
except Exception as e:
logger.error("GET repository address failed: ", exc_info=e)
def setup_conn_pool(self,inventory_data):
repo_dbausername = 'dba_admin'
repo_dbapassword = inventory_data['accounts']['dba_admin']['password']
repo_host = inventory_data['fqdn']
repo_port = inventory_data['port']
try:
repo_connection_pool = psycopg2.pool.ThreadedConnectionPool(10, 30, user=repo_dbausername,
password=repo_dbapassword,
host=repo_host,
port=repo_port,
database="xxxxx")
except Exception as e:
logger.error(e)
return repo_connection_pool
#This method will fetch a connection from pool
def get_master_connection(self):
return self.repo_connection_pool_master.getconn()
def get_repl_connection(self):
return self.repo_connection_pool_repl.getconn()
class RepositoryConnection:
#Gets connection from RepositoryConnPool
def __init__(self):
self.repo_conn_pool = RepositoryConnPool()
self._repo_conn_master = self.repo_conn_pool.get_master_connection()
self._repo_conn_repl = self.repo_conn_pool.get_repl_connection()
def execute_query(self, query, commit=False, call_master = False, retry_count = 0):
cursor = None
try:
if query.strip().upper().startswith("SELECT") and call_master == False:
repo_query = query
cursor = self._repo_conn_repl.cursor()
cursor.execute(repo_query)
if commit:
self._repo_conn_repl.commit()
else:
try:
result = cursor.fetchall()
return result
except:
logging.info(f"Query executed:{query} and no result was returned")
else:
repo_query = query
cursor = self._repo_conn_master.cursor()
cursor.execute(repo_query)
if commit:
self._repo_conn_master.commit()
else:
try:
result = cursor.fetchall()
return result
except:
logging.info(f"Query executed:{query} and no result was returned")
#This needs to change to handle connection error connection in different way
except Exception as e:
if(retry_count < 3):
logger.warning(f"There was an error during execution of query, retry will be done {query} ")
return self.execute_query(query,commit,call_master = True, retry_count= retry_count+1 )
else:
logger.fatal("All 3 connection retry failed")
finally:
if cursor:
cursor.close()
#Checks if connection is up
def __enter__(self):
try:
if self._repo_conn_master and self._repo_conn_repl:
return self
else:
raise Exception()
except Exception as e:
logger.error(e)
#Returns connection to pool
def __exit__(self, exc_type, exc_val, exc_tb):
if self._repo_conn_master:
self.repo_conn_pool.repo_connection_pool_master.putconn(self._repo_conn_master)
if self._repo_conn_repl:
self.repo_conn_pool.repo_connection_pool_repl.putconn(self._repo_conn_repl)

Infinite BrokenPipeError's when interrupting a custom multiprocessing pool

I like the default python multiprocessing.Pool, but it's still a pain that it isn't easy to show the current progress being made during the pool's execution. In leui of that, I attempted to create my own, custom multiprocess pool mapper, and it looks like this;
from multiprocessing import Process, Pool, cpu_count
from iterable_queue import IterableQueue
def _proc_action(f, in_queue, out_queue):
try:
for val in in_queue:
out_queue.put(f(val))
except (KeyboardInterrupt, EOFError):
pass
def progress_pool_map(f, ls, n_procs=cpu_count()):
in_queue = IterableQueue()
out_queue = IterableQueue()
err = None
try:
procs = [Process(target=_proc_action, args=(f, in_queue, out_queue)) for _ in range(n_procs)]
[p.start() for p in procs]
for elem in ls:
in_queue.put(elem)
in_queue.close()
bar = 0
for _ in ls:
elem = next(out_queue)
bar += 1
if bar % 1000 == 0:
print(bar)
yield elem
out_queue.close()
except (KeyboardInterrupt, EOFError) as e:
in_queue.close()
out_queue.close()
print("Joining processes")
[p.join() for p in procs]
print("Closing processes")
[p.close() for p in procs]
err = e
if err:
raise err
It works fairly well, and prints a value to the console for every 1000 items processed. The progress display itself is something I can worry about in future. Right now, however, my issue is that when cancelled, the operation does anything but fail gracefully. When I try to interrupt the map, it hangs on Joining Processes, and never makes it to Closing Processes. If I try hitting Ctrl+C again, it causes an infinite spew of BrokenPipeErrors to fill the console until I send an EOF and stop my program.
Here's iterable_queue.py, for reference;
from multiprocessing.queues import Queue
from multiprocessing import get_context, Value
import queue
class QueueClosed(Exception):
pass
class IterableQueue(Queue):
def __init__(self, maxsize=0, *, ctx=None):
super().__init__(
maxsize=maxsize,
ctx=ctx if ctx is not None else get_context()
)
self.closed = Value('b', False)
def close(self):
with self.closed.get_lock():
if not self.closed.value:
self.closed.value = True
super().put((None, False))
# throws BrokenPipeError in another thread without this sleep in between
# terrible hack, must fix at some point
import time; time.sleep(0.01)
super().close()
def __iter__(self):
return self
def __next__(self):
try:
return self.get()
except QueueClosed:
raise StopIteration
def get(self, *args, **kwargs):
try:
result, is_open = super().get(*args, **kwargs)
except OSError:
raise QueueClosed
if not is_open:
super().put((None, False))
raise QueueClosed
return result
def __bool__(self):
return bool(self.closed.value)
def put(self, val, *args, **kwargs):
with self.closed.get_lock():
if self.closed.value:
raise QueueClosed
super().put((val, True), *args, **kwargs)
def get_nowait(self):
return self.get(block=False)
def put_nowait(self):
return self.put(block=False)
def empty_remaining(self, block=False):
try:
while True:
yield self.get(block=block)
except (queue.Empty, QueueClosed):
pass
def clear(self):
for _ in self.empty_remaining():
pass
def __enter__(self):
return self
def __exit__(self, *args):
self.close()

Python Multiprocessing Manager - Client unable to reconnect

I am running an application which cannot sit and wait the successful/unsuccessful connection to a Python Manager. The client application should try to send some info to the supposedly running server, and in case it fails, another measure is taken. The problem is that whenever the server is down the connection takes a lot of time to return the control to the client application, and it cannot waste time waiting for it because there is other stuff to do.
I came up with a scheme where an intermediary object is in charge of the connection but it only works once. Let's say that for the first time, when there is still no connection to the server, this intermediary object handles the connecting part without blocking the client application. If, for some reason, the server goes down and comes back again, I can't get it to work anymore.
Suppose I have the following server:
# server.py
from multiprocessing import Queue, managers
from multiprocessing.queues import Empty
import select
import threading
class RServer(object):
def __init__(self, items_buffer):
self.items_buffer = items_buffer
def receive_items(self):
while True:
(_, [], []) = select.select([self.items_buffer._reader], [], [])
while True:
try:
item = self.items_buffer.get(block=False)
# do something with item
print('item received')
except Empty:
break
class SharedObjectsManager(managers.BaseManager):
pass
if __name__ == '__main__':
items_buffer = Queue()
remote_server = RServer(items_buffer)
remote_server_th = threading.Thread(target=remote_server.receive_items)
remote_server_th.start()
SharedObjectsManager.register('items_buffer', callable=lambda: items_buffer)
shared_objects_manager = SharedObjectsManager(address=('localhost', 5001),
authkey=str.encode('my_server'),
serializer='xmlrpclib')
s = shared_objects_manager.get_server()
s.serve_forever()
And here is the intermediary object to handle the connection:
# bridge.py
from multiprocessing.managers import BaseManager
import threading
import socket
class ConnectionManager():
def __init__(self):
self.remote_manager = BaseManager(address=('localhost', 5001),
authkey=b'my_server',
serializer='xmlrpclib')
self.remote_manager.register('items_buffer')
self.items_buffer = None
self.items_buffer_lock = threading.Lock()
self.connecting = False
self.connecting_lock = threading.Lock()
self.connection_started_condition = threading.Condition()
def transmit_item(self, item):
try:
with self.items_buffer_lock:
self.items_buffer.put(item)
except (AttributeError, EOFError, IOError):
with self.connection_started_condition:
with self.connecting_lock:
if not self.connecting:
self.connecting = True
connect_th = threading.Thread(target=self.connect_to_server,
name='Client Connect')
connect_th.start()
self.connection_started_condition.notify()
raise ConnectionError('Connection Error')
def connect_to_server(self):
with self.connection_started_condition:
self.connection_started_condition.wait()
try:
self.remote_manager.connect()
except socket.error:
pass
else:
try:
with self.items_buffer_lock:
self.items_buffer = self.remote_manager.items_buffer()
except (AssertionError, socket.error):
pass
with self.connecting_lock:
self.connecting = False
class ConnectionError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
And finally the client application:
# client.py
import time
from bridge import ConnectionManager, ConnectionError
remote_buffer = ConnectionManager()
while True:
try:
remote_buffer.transmit_item({'rubish': None})
print('item sent')
except ConnectionError:
# do something else
print('item not sent')
# do other stuff
print('doing other stuff')
time.sleep(15)
I am for sure doing something wrong with the thread but I can't figure it out. Any idea?

how to do Hollywood principle between processes in python?

i was planning to change my project to multiprocesses so i can use more resources,here's my database module code
import pymysql
import threading
class tdb:
def __init__(self):
self.totalEffected = 0
pass
def start(self):
self.conn = pymysql.connect(host='xxxx', port=3306, user='root', passwd='xxxx', db='xxxx', charset='utf8')
def select(self,sql,args=None):
cur = self.conn.cursor()
cur.execute(sql,args)
result = cur.fetchall()
cur.close()
return result
def execute(self,sql,args=None):
cur = self.conn.cursor()
result = cur.execute(sql,args)
cur.close()
self.totalEffected+=result
return result
# def __commit(self,callback):
def __commitCallback(self,result):
print('commit result:',result)
self.conn.close()
def errorc(self,*args):
print('error')
def end(self):
# init()
# p.apply_async(self.conn.commit, callback=self.__commitCallback,error_callback=self.errorc)
if self.totalEffected!=0:
thread = threading.Thread(target=self.t)
thread.start()
else:
self.conn.close()
# p.apply(self.conn.commit)
# self.conn.close()
# print('result:' ,result.get())
def t(self):
self.conn.commit()
self.conn.close()
the only operation that really need to handle is conn.commit(), i use thread to do it ,so i can immediately return. i once use Pool.apply_async(),but it didn't callback, so i want to know how to make the other process call me , so i don't have to spend my time waiting recieve.

Categories