mirror of
https://github.com/danswer-ai/danswer.git
synced 2025-04-08 11:58:34 +02:00
Fix slowness due to hitting async Postgres driver pool limit
This commit is contained in:
parent
6e9d7acb9c
commit
1d0ce49c05
@ -59,7 +59,7 @@ def get_sqlalchemy_engine() -> Engine:
|
||||
global _SYNC_ENGINE
|
||||
if _SYNC_ENGINE is None:
|
||||
connection_string = build_connection_string(db_api=SYNC_DB_API)
|
||||
_SYNC_ENGINE = create_engine(connection_string, pool_size=50, max_overflow=25)
|
||||
_SYNC_ENGINE = create_engine(connection_string, pool_size=40, max_overflow=10)
|
||||
return _SYNC_ENGINE
|
||||
|
||||
|
||||
@ -67,7 +67,9 @@ def get_sqlalchemy_async_engine() -> AsyncEngine:
|
||||
global _ASYNC_ENGINE
|
||||
if _ASYNC_ENGINE is None:
|
||||
connection_string = build_connection_string()
|
||||
_ASYNC_ENGINE = create_async_engine(connection_string)
|
||||
_ASYNC_ENGINE = create_async_engine(
|
||||
connection_string, pool_size=40, max_overflow=10
|
||||
)
|
||||
return _ASYNC_ENGINE
|
||||
|
||||
|
||||
@ -90,4 +92,27 @@ async def get_async_session() -> AsyncGenerator[AsyncSession, None]:
|
||||
yield async_session
|
||||
|
||||
|
||||
async def warm_up_connections(
|
||||
sync_connections_to_warm_up: int = 10, async_connections_to_warm_up: int = 10
|
||||
) -> None:
|
||||
sync_postgres_engine = get_sqlalchemy_engine()
|
||||
connections = [
|
||||
sync_postgres_engine.connect() for _ in range(sync_connections_to_warm_up)
|
||||
]
|
||||
for conn in connections:
|
||||
conn.execute(text("SELECT 1"))
|
||||
for conn in connections:
|
||||
conn.close()
|
||||
|
||||
async_postgres_engine = get_sqlalchemy_async_engine()
|
||||
async_connections = [
|
||||
await async_postgres_engine.connect()
|
||||
for _ in range(async_connections_to_warm_up)
|
||||
]
|
||||
for async_conn in async_connections:
|
||||
await async_conn.execute(text("SELECT 1"))
|
||||
for async_conn in async_connections:
|
||||
await async_conn.close()
|
||||
|
||||
|
||||
SessionFactory = sessionmaker(bind=get_sqlalchemy_engine())
|
||||
|
@ -43,6 +43,7 @@ from danswer.db.credentials import create_initial_public_credential
|
||||
from danswer.db.embedding_model import get_current_db_embedding_model
|
||||
from danswer.db.embedding_model import get_secondary_db_embedding_model
|
||||
from danswer.db.engine import get_sqlalchemy_engine
|
||||
from danswer.db.engine import warm_up_connections
|
||||
from danswer.db.index_attempt import cancel_indexing_attempts_past_model
|
||||
from danswer.db.index_attempt import expire_index_attempts
|
||||
from danswer.db.swap_index import check_index_swap
|
||||
@ -167,6 +168,9 @@ async def lifespan(app: FastAPI) -> AsyncGenerator:
|
||||
f"Using multilingual flow with languages: {MULTILINGUAL_QUERY_EXPANSION}"
|
||||
)
|
||||
|
||||
# fill up Postgres connection pools
|
||||
await warm_up_connections()
|
||||
|
||||
with Session(engine) as db_session:
|
||||
check_index_swap(db_session=db_session)
|
||||
db_embedding_model = get_current_db_embedding_model(db_session)
|
||||
|
@ -81,6 +81,7 @@ services:
|
||||
# If set to `true` will enable additional logs about Vespa query performance
|
||||
# (time spent on finding the right docs + time spent fetching summaries from disk)
|
||||
- LOG_VESPA_TIMING_INFORMATION=${LOG_VESPA_TIMING_INFORMATION:-}
|
||||
- LOG_ENDPOINT_LATENCY=${LOG_ENDPOINT_LATENCY:-}
|
||||
extra_hosts:
|
||||
- "host.docker.internal:host-gateway"
|
||||
logging:
|
||||
|
Loading…
x
Reference in New Issue
Block a user