From 866bc803b1944b4af6478e3b2348e73f65a8a9d2 Mon Sep 17 00:00:00 2001 From: hagen-danswer Date: Tue, 23 Jul 2024 16:12:51 -0700 Subject: [PATCH] Implemented LLM disabling for api call (#1905) --- backend/danswer/one_shot_answer/answer_question.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/backend/danswer/one_shot_answer/answer_question.py b/backend/danswer/one_shot_answer/answer_question.py index bfcc85043..fc14b2b8c 100644 --- a/backend/danswer/one_shot_answer/answer_question.py +++ b/backend/danswer/one_shot_answer/answer_question.py @@ -256,6 +256,9 @@ def stream_answer_objects( ) yield initial_response + elif packet.id == SEARCH_DOC_CONTENT_ID: + yield packet.response + elif packet.id == SECTION_RELEVANCE_LIST_ID: chunk_indices = packet.response @@ -267,9 +270,12 @@ def stream_answer_objects( ) yield LLMRelevanceFilterResponse(relevant_chunk_indices=packet.response) - - elif packet.id == SEARCH_DOC_CONTENT_ID: - yield packet.response + if query_req.skip_gen_ai_answer_generation: + # Exit early if only source docs + contexts are requested + # Putting exit here assumes that a packet with the ID + # SECTION_RELEVANCE_LIST_ID is the last one yielded before + # calling the LLM + return elif packet.id == SEARCH_EVALUATION_ID: evaluation_response = LLMRelevanceSummaryResponse(