pudo / dataset

Easy-to-use data handling for SQL data stores with support for implicit table creation, bulk loading, and transactions.
https://dataset.readthedocs.org/
MIT License
4.76k stars 297 forks source link

Why doesn't it automatically recycle connections in the connection pool? #425

Open zhang570221322 opened 1 year ago

zhang570221322 commented 1 year ago
#%%
import threading
import time
import dataset

engine_kwargs={"echo":False, "pool_size":3, "max_overflow":2, "pool_recycle":180,"pool_pre_ping":True,"pool_use_lifo":True}
config_str='postgresql://postgres:123456@127.0.0.1:5432/test'
db = dataset.connect(config_str,engine_kwargs=engine_kwargs)

print(len(db.connections))
def insert_data():
    with db as tx:
        print(len(db.connections),"insert",threading.current_thread().getName())
        tx['user'].insert(dict(name='John Doe', age=46, country='China'))
        print(len(db.connections),"end",threading.current_thread().getName())

threads = []
for _ in range(5):
    thread = threading.Thread(target=insert_data)
    threads.append(thread)
    thread.start()

for thread in threads:
    thread.join()

time.sleep(0.5) 
print(len(db.connections))
print("start block")

# QueuePool limit of size 3 overflow 2 reached, connection timed out, timeout 30.00 
print(db['user'].count())
# %%

0 1 insert Thread-1 2 insert Thread-2 3 insert Thread-3 4 insert Thread-4 5 insert Thread-5 5 end Thread-5 5 end Thread-1 5 end Thread-2 5 end Thread-3 5 end Thread-4 5 start block Traceback (most recent call last): sqlalchemy.exc.TimeoutError: QueuePool limit of size 3 overflow 2 reached, connection timed out, timeout 30.00 (Background on this error at: https://sqlalche.me/e/14/3o7r)

rrr2rrr commented 10 months ago

https://github.com/pudo/dataset/issues/418