class PostgresConnection:
def __init__(
self,
DSN: str = DSN,
):
self.DSN = DSN
self.con = None
self._cursor = None
self._connection_pool = None
async def create_pool(self) -> None:
self._connection_pool = await asyncpg.create_pool(
dsn=self.DSN,
)
async def get_pool(self) -> Pool:
if not self._connection_pool:
await self.create_pool()
return self._connection_pool
async def fetch_all(self, query: str) -> list[dict | None]:
pool = await self.get_pool()
async with pool.acquire() as conn:
return [dict(row) for row in await conn.fetch(query)]
And i maked simple test: make 100 times 'SELECT * FROM user'
fist: 5sec
conn = await PostgresClient.get_connection()
for i in range(1_00):
await conn.fetch_all('SELECT * FROM "user"')
second: 10sec
pg_conn = PostgresConnection()
for i in range(1_00):
await pg_conn.fetch_all('SELECT * FROM "user"')
In one of the tests i used max size of pool = 100 and check how many connections was used — it was 16/17 (i checked ID of connections which pool returns). But i think thats not important in my case and i just did something wrong.
Why connection pool works 2x times slower? Hi! I have two postgres cliets and both of them use asyncpg. First creates new connection for each request, second use pool of connections.
class PostgresConnection:
def __init__(
self,
DSN: str = DSN,
):
self.DSN = DSN
self.con = None
self._cursor = None
self._connection_pool = None
async def create_pool(self) -> None:
self._connection_pool = await asyncpg.create_pool(
dsn=self.DSN,
)
async def get_pool(self) -> Pool:
if not self._connection_pool:
await self.create_pool()
return self._connection_pool
async def fetch_all(self, query: str) -> list[dict | None]:
pool = await self.get_pool()
async with pool.acquire() as conn:
return [dict(row) for row in await conn.fetch(query)]
And i maked simple test: make 100 times 'SELECT * FROM user'
fist: 5sec
conn = await PostgresClient.get_connection()
for i in range(1_00):
await conn.fetch_all('SELECT * FROM "user"')
second: 10sec
pg_conn = PostgresConnection()
for i in range(1_00):
await pg_conn.fetch_all('SELECT * FROM "user"')
In one of the tests i used max size of pool = 100 and check how many connections was used — it was 16/17 (i checked ID of connections which pool returns). But i think thats not important in my case and i just did something wrong.
Why connection pool works 2x times slower? Is the problem on db side ?
Hi! I have two postgres cliets and both of them use asyncpg. First creates new connection for each request, second use pool of connections.
Fist:
Second:
And i maked simple test: make 100 times 'SELECT * FROM user'
fist: 5sec
second: 10sec
In one of the tests i used max size of pool = 100 and check how many connections was used — it was 16/17 (i checked ID of connections which pool returns). But i think thats not important in my case and i just did something wrong.
Why connection pool works 2x times slower? Hi! I have two postgres cliets and both of them use asyncpg. First creates new connection for each request, second use pool of connections.
Fist:
Second:
And i maked simple test: make 100 times 'SELECT * FROM user'
fist: 5sec
second: 10sec
In one of the tests i used max size of pool = 100 and check how many connections was used — it was 16/17 (i checked ID of connections which pool returns). But i think thats not important in my case and i just did something wrong.
Why connection pool works 2x times slower? Is the problem on db side ?