diff --git a/backend/danswer/db/engine.py b/backend/danswer/db/engine.py index 94b5d0123..49042dd64 100644 --- a/backend/danswer/db/engine.py +++ b/backend/danswer/db/engine.py @@ -137,8 +137,8 @@ def get_sqlalchemy_engine() -> Engine: ) _SYNC_ENGINE = create_engine( connection_string, - pool_size=40, - max_overflow=10, + pool_size=5, + max_overflow=0, pool_pre_ping=POSTGRES_POOL_PRE_PING, pool_recycle=POSTGRES_POOL_RECYCLE, ) @@ -156,8 +156,8 @@ def get_sqlalchemy_async_engine() -> AsyncEngine: connect_args={ "server_settings": {"application_name": POSTGRES_APP_NAME + "_async"} }, - pool_size=40, - max_overflow=10, + pool_size=5, + max_overflow=0, pool_pre_ping=POSTGRES_POOL_PRE_PING, pool_recycle=POSTGRES_POOL_RECYCLE, ) diff --git a/backend/danswer/main.py b/backend/danswer/main.py index a5abb8f28..d064f3b72 100644 --- a/backend/danswer/main.py +++ b/backend/danswer/main.py @@ -51,7 +51,6 @@ from danswer.db.credentials import create_initial_public_credential from danswer.db.document import check_docs_exist from danswer.db.engine import get_sqlalchemy_engine from danswer.db.engine import init_sqlalchemy_engine -from danswer.db.engine import warm_up_connections from danswer.db.index_attempt import cancel_indexing_attempts_past_model from danswer.db.index_attempt import expire_index_attempts from danswer.db.llm import fetch_default_provider @@ -369,7 +368,7 @@ async def lifespan(app: FastAPI) -> AsyncGenerator: logger.notice("Generative AI Q&A disabled") # fill up Postgres connection pools - await warm_up_connections() + # await warm_up_connections() # We cache this at the beginning so there is no delay in the first telemetry get_or_generate_uuid()