From fb931ee4de239b8c32fe831aef3ad5e102860d85 Mon Sep 17 00:00:00 2001 From: "Richard Kuo (Danswer)" Date: Fri, 7 Feb 2025 17:28:17 -0800 Subject: [PATCH] fixes --- backend/model_server/encoders.py | 2 +- backend/model_server/main.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/backend/model_server/encoders.py b/backend/model_server/encoders.py index 502eeecef..52e1ddf8b 100644 --- a/backend/model_server/encoders.py +++ b/backend/model_server/encoders.py @@ -412,7 +412,7 @@ async def embed_text( f"event=embedding_model " f"texts={len(texts)} " f"chars={total_chars} " - f"model={provider_type} " + f"model={model_name} " f"gpu={gpu_type} " f"elapsed={elapsed:.2f}" ) diff --git a/backend/model_server/main.py b/backend/model_server/main.py index 2031d69ea..0a6b56be1 100644 --- a/backend/model_server/main.py +++ b/backend/model_server/main.py @@ -60,7 +60,7 @@ def _move_files_recursively(source: Path, dest: Path, overwrite: bool = False) - @asynccontextmanager async def lifespan(app: FastAPI) -> AsyncGenerator: gpu_type = get_gpu_type() - logger.notice(f"gpu_type={gpu_type}") + logger.notice(f"Torch GPU Detection: gpu_type={gpu_type}") app.state.gpu_type = gpu_type