Python Postgres psycopg2 ThreadedConnectionPool exhausted

14,716

Solution 1

You need to use a queue on top of your pool.

Something like the following should work:

import gevent, sys, random, psycopg2, logging
from contextlib import contextmanager
from gevent.queue import Queue
from gevent.socket import wait_read, wait_write
from psycopg2.pool import ThreadedConnectionPool
from psycopg2 import extensions, OperationalError
import sys
logger = logging.getLogger(__name__)

poolsize = 100  #number of max connections
pdsn = '' # put your dsn here

if sys.version_info[0] >= 3:
    integer_types = (int,)
else:
    import __builtin__
    integer_types = (int, __builtin__.long)

   
class ConnectorError(Exception):
    """ This is a base class for all CONNECTOR related exceptions """
    pass

#simplified calls etc. db.fetchall(SQL, arg1, arg2...)
def cursor(): return Pcursor()
def fetchone(PSQL, *args): return Pcursor().fetchone(PSQL, *args)
def fetchall(PSQL, *args): return Pcursor().fetchall(PSQL, *args)
def execute(PSQL, *args): return Pcursor().execute(PSQL, *args)


#singleton connection pool, gets reset if a connection is bad or drops
_pgpool = None
def pgpool():
    global _pgpool
    if not _pgpool:
        try:
            _pgpool = PostgresConnectionPool(maxsize=poolsize)
        except psycopg2.OperationalError as exc:
            _pgpool = None
    return _pgpool

class Pcursor(object):

    def __init__(self, **kwargs):
        #in case of a lost connection lets sit and wait till it's online
        global _pgpool
        if not _pgpool:
            while not _pgpool:
                try:
                    pgpool()
                except:
                    logger.debug('Attempting Connection To Postgres...')
                    gevent.sleep(1)

    def fetchone(self, PSQL, *args):
        with _pgpool.cursor() as cursor:
            try:
                cursor.execute(PSQL, args)
            except TypeError:
                cursor.execute(PSQL, args[0])
            except Exception as exc:
                print(sys._getframe().f_back.f_code)
                print(sys._getframe().f_back.f_code.co_name)
                logger.warning(str(exc))
            logger.debug(cursor.query)
            return cursor.fetchone()

    def fetchall(self, PSQL, *args):
        with _pgpool.cursor() as cursor:
            try:
                cursor.execute(PSQL, args)
            except TypeError:
                cursor.execute(PSQL, args[0])
            except Exception as exc:
                print(sys._getframe().f_back.f_code)
                print(sys._getframe().f_back.f_code.co_name)
                logger.warning(str(exc))
            logger.debug(cursor.query)
            return cursor.fetchall()

    def execute(self, PSQL, *args):
        with _pgpool.cursor() as cursor:
            try:
                cursor.execute(PSQL, args)
            except TypeError:
                cursor.execute(PSQL, args[0])
            except Exception as exc:
                print(sys._getframe().f_back.f_code)
                print(sys._getframe().f_back.f_code.co_name)
                logger.warning(str(exc))
            finally:
                logger.debug(cursor.query)
                return cursor.query

    def fetchmany(self, PSQL, *args):
        with _pgpool.cursor() as cursor:
            try:
                cursor.execute(PSQL, args)
            except TypeError:
                cursor.execute(PSQL, args[0])
            while 1:
                items = cursor.fetchmany()
                if not items:
                    break
                for item in items:
                    yield item

class AbstractDatabaseConnectionPool(object):

    def __init__(self, maxsize=poolsize):
        if not isinstance(maxsize, integer_types):
            raise TypeError('Expected integer, got %r' % (maxsize, ))
        self.maxsize = maxsize
        self.pool = Queue()
        self.size = 0

    def create_connection(self):
        #overridden by PostgresConnectionPool
        raise NotImplementedError()

    def get(self):
        pool = self.pool
        if self.size >= self.maxsize or pool.qsize():
            return pool.get()

        self.size += 1
        try:
            new_item = self.create_connection()
        except:
            self.size -= 1
            raise
        return new_item

    def put(self, item):
        self.pool.put(item)

    def closeall(self):
        while not self.pool.empty():
            conn = self.pool.get_nowait()
            try:
                conn.close()
            except Exception:
                pass

    @contextmanager
    def connection(self, isolation_level=None):
        conn = self.get()
        try:
            if isolation_level is not None:
                if conn.isolation_level == isolation_level:
                    isolation_level = None
                else:
                    conn.set_isolation_level(isolation_level)
            yield conn
        except:
            if conn.closed:
                conn = None
                self.closeall()
            raise
        else:
            if conn.closed:
                raise OperationalError("Cannot commit because connection was closed: %r" % (conn, ))
        finally:
            if conn is not None and not conn.closed:
                if isolation_level is not None:
                    conn.set_isolation_level(isolation_level)
                self.put(conn)

    @contextmanager
    def cursor(self, *args, **kwargs):
        isolation_level = kwargs.pop('isolation_level', None)
        with self.connection(isolation_level) as conn:
            try:
                yield conn.cursor(*args, **kwargs)
            except:
                global _pgpool
                _pgpool = None
                del(self)


class PostgresConnectionPool(AbstractDatabaseConnectionPool):
    def __init__(self,**kwargs):
        try:
            self.pconnect = ThreadedConnectionPool(1, poolsize, dsn=pdsn)
        except:
            global _pgpool
            _pgpool = None
            raise ConnectorError('Database Connection Failed')
        maxsize = kwargs.pop('maxsize', None)
        self.kwargs = kwargs
        AbstractDatabaseConnectionPool.__init__(self, maxsize)

    def create_connection(self):
        self.conn = self.pconnect.getconn()
        self.conn.autocommit = True
        return self.conn


def gevent_wait_callback(conn, timeout=None):
    """A wait callback useful to allow gevent to work with Psycopg."""
    while 1:
        state = conn.poll()
        if state == extensions.POLL_OK:
            break
        elif state == extensions.POLL_READ:
            wait_read(conn.fileno(), timeout=timeout)
        elif state == extensions.POLL_WRITE:
            wait_write(conn.fileno(), timeout=timeout)
        else:
            raise ConnectorError("Bad result from poll: %r" % state)

extensions.set_wait_callback(gevent_wait_callback)

Then you can call your connection via this:

import db
db.Pcursor().execute(PSQL, arg1, arg2, arg3)

Basically I borrowed the gevent example of async postgres and modified it to support threadpooling via pyscopg2.

https://github.com/gevent/gevent/blob/master/examples/psycopg2_pool.py

I added what psycogreen does inside the module, so all you need to do is import and call the class. Each call to the class stacks a new query on the queue, but only uses the pool at a certain size. This way you don't run out of connections. This is essentially similar to what PGBouncer does, which I think would also eliminate your problem.

https://pgbouncer.github.io/

Solution 2

I've struggled to find really detailed information on how the ThreadedConnectionPool works. https://bbengfort.github.io/observations/2017/12/06/psycopg2-transactions.html ain't bad, but it turns out that its claim that getconn blocks until a connection becomes available is incorrect. Checking the code, all ThreadedConnectionPool adds is a lock around the AbstractConnectionPool methods to prevent race conditions. If more than maxconn connections are attempted used at any point, the connection pool exhausted PoolError will be raised.

If you want something a bit simpler than the accepted answer, further wrapping the methods in a Semaphore providing the blocking until a connection becomes available should do the trick:

from psycopg2.pool import ThreadedConnectionPool
from threading import Semaphore

class ReallyThreadedConnectionPool(ThreadedConnectionPool):
    def __init__(self, minconn, maxconn, *args, **kwargs):
        self._semaphore = Semaphore(maxconn)
        super().__init__(minconn, maxconn, *args, **kwargs)

    def getconn(self, *args, **kwargs):
        self._semaphore.acquire()
        return super().getconn(*args, **kwargs)

    def putconn(self, *args, **kwargs):
        super().putconn(*args, **kwargs)
        self._semaphore.release()

Solution 3

Your problem here is, that you actually do not return the connection to the pool, but close it forever with

tcp.putconn(conn, close=True)

See the documentation here http://initd.org/psycopg/docs/pool.html

If close is True, discard the connection from the pool.

So, if you put 800 connections into your pool, after 801 loops you will get the "exhausted error" because your connection pool size is zero.

Share:
14,716

Related videos on Youtube

user3768495
Author by

user3768495

Updated on September 15, 2022

Comments

  • user3768495
    user3768495 over 1 year

    I have looked into several 'too many clients' related topic here but still can't solve my problem, so I have to ask this again, for me specific case.

    Basically, I set up my local Postgres server and need to do tens of thousands of queries, so I used the Python psycopg2package. Here are my codes:

    import psycopg2
    import pandas as pd
    import numpy as np
    from flashtext import KeywordProcessor
    from psycopg2.pool import ThreadedConnectionPool
    from concurrent.futures import ThreadPoolExecutor
    
    df = pd.DataFrame({'S':['California', 'Ohio', 'Texas'], 'T':['Dispatcher', 'Zookeeper', 'Mechanics']})
    # df = pd.concat([df]*10000) # repeat df 10000 times
    
    DSN = "postgresql://User:password@localhost/db"
    tcp = ThreadedConnectionPool(1, 800, DSN)
    
    def do_one_query(inputS, inputT):
        conn = tcp.getconn()
        c = conn.cursor()
    
        q = r"SELECT * from eridata where "State" = 'California' and "Title" = 'Dispatcher' limit 1;"   
    
        c.execute(q)
        all_results = c.fetchall()
        for row in all_results:
            return row
        tcp.putconn(conn, close=True)
    
    cnt=0
    for idx, row in df.iterrows():
    
        cnt+=1
        with ThreadPoolExecutor(max_workers=1) as pool:
            ret = pool.submit(do_one_query,  row["S"], row["T"])
            print ret.result()
        print cnt
    

    The code runs well with a small df. If I repeat df by 10000 times, I got error message saying connection pool exhausted . I though the connections I used have been closed by this line:

    tcp.putconn(conn, close=True) But I guess actually they are not closed? How can I get around this issue?

  • Sam Palmer
    Sam Palmer over 4 years
    just want to bump this answer up some more, was a perfect solution to the misleading 'ThreadedConnectionPool'
  • eatmeimadanish
    eatmeimadanish over 4 years
    which is why i like the queue solution. Your connection doesn't fail if it's blocked waiting, and you still get your response faster than checking for open connections.
  • pasztorpisti
    pasztorpisti over 4 years
    If super().getconn raises an exception then getconn should release the semaphore and raise. With high traffic there is a possibility of having starvation/fairness related issues with the semaphore but this probably isn't an issue with most applications.
  • Rune Lyngsoe
    Rune Lyngsoe about 4 years
    @pasztorpisti All depending...you can wrap in a try/except releasing the semaphore, but if you check the code that may leave you with having made a connection but released the semaphore (not that there is much that can go wrong). So question is which is worse, having too many connections open or unreleased semaphores. Or you can take the view that it's not your problem, your code does it right, you can't be blamed for the module not doing likewise.
  • pasztorpisti
    pasztorpisti about 4 years
    @RuneLyngsoe In general I think it's reasonable to assume a "transactional" behaviour in case of functions that allocate resources. When I open a file the open function shouldn't (or mustn't?) leak resources if it raises an exception. I expect a similar behaviour from super().getconn and your getconn too. super().getconn might be buggy (and might leak resources in case of exceptions) but why would you put/leave a bug in your code and brush it off by saying "whatever, super().getconn MIGHT be buggy anyway..." :-D A semaphore increment is also a resource that shouldn't be leaked.