mirror of
https://github.com/danswer-ai/danswer.git
synced 2025-09-22 17:16:20 +02:00
reworked history messages in agent config
This commit is contained in:
@@ -33,7 +33,7 @@ def agent_path_decision(state: MainState, config: RunnableConfig) -> RoutingDeci
|
|||||||
agent_a_config.perform_initial_search_path_decision
|
agent_a_config.perform_initial_search_path_decision
|
||||||
)
|
)
|
||||||
|
|
||||||
history = build_history_prompt(config["metadata"]["config"].message_history)
|
history = build_history_prompt(agent_a_config.prompt_builder)
|
||||||
|
|
||||||
logger.debug(f"--------{now_start}--------DECIDING TO SEARCH OR GO TO LLM---")
|
logger.debug(f"--------{now_start}--------DECIDING TO SEARCH OR GO TO LLM---")
|
||||||
|
|
||||||
|
@@ -62,7 +62,7 @@ def generate_initial_answer(
|
|||||||
question = agent_a_config.search_request.query
|
question = agent_a_config.search_request.query
|
||||||
persona_prompt = get_persona_prompt(agent_a_config.search_request.persona)
|
persona_prompt = get_persona_prompt(agent_a_config.search_request.persona)
|
||||||
|
|
||||||
history = build_history_prompt(agent_a_config.message_history)
|
history = build_history_prompt(agent_a_config.prompt_builder)
|
||||||
|
|
||||||
sub_question_docs = state.documents
|
sub_question_docs = state.documents
|
||||||
all_original_question_documents = state.all_original_question_documents
|
all_original_question_documents = state.all_original_question_documents
|
||||||
|
@@ -59,7 +59,7 @@ def generate_refined_answer(
|
|||||||
question = agent_a_config.search_request.query
|
question = agent_a_config.search_request.query
|
||||||
persona_prompt = get_persona_prompt(agent_a_config.search_request.persona)
|
persona_prompt = get_persona_prompt(agent_a_config.search_request.persona)
|
||||||
|
|
||||||
history = build_history_prompt(agent_a_config.message_history)
|
history = build_history_prompt(agent_a_config.prompt_builder)
|
||||||
|
|
||||||
initial_documents = state.documents
|
initial_documents = state.documents
|
||||||
revised_documents = state.refined_documents
|
revised_documents = state.refined_documents
|
||||||
|
@@ -50,7 +50,7 @@ def initial_sub_question_creation(
|
|||||||
perform_initial_search_path_decision = (
|
perform_initial_search_path_decision = (
|
||||||
agent_a_config.perform_initial_search_path_decision
|
agent_a_config.perform_initial_search_path_decision
|
||||||
)
|
)
|
||||||
history = build_history_prompt(agent_a_config.message_history)
|
history = build_history_prompt(agent_a_config.prompt_builder)
|
||||||
|
|
||||||
# Use the initial search results to inform the decomposition
|
# Use the initial search results to inform the decomposition
|
||||||
sample_doc_str = state.sample_doc_str if hasattr(state, "sample_doc_str") else ""
|
sample_doc_str = state.sample_doc_str if hasattr(state, "sample_doc_str") else ""
|
||||||
|
@@ -50,7 +50,7 @@ def refined_sub_question_creation(
|
|||||||
|
|
||||||
question = agent_a_config.search_request.query
|
question = agent_a_config.search_request.query
|
||||||
base_answer = state.initial_answer
|
base_answer = state.initial_answer
|
||||||
history = build_history_prompt(agent_a_config.message_history)
|
history = build_history_prompt(agent_a_config.prompt_builder)
|
||||||
# get the entity term extraction dict and properly format it
|
# get the entity term extraction dict and properly format it
|
||||||
entity_retlation_term_extractions = state.entity_retlation_term_extractions
|
entity_retlation_term_extractions = state.entity_retlation_term_extractions
|
||||||
|
|
||||||
|
@@ -5,9 +5,9 @@ from langchain_core.messages.tool import ToolMessage
|
|||||||
|
|
||||||
from onyx.agents.agent_search.shared_graph_utils.prompts import BASE_RAG_PROMPT_v2
|
from onyx.agents.agent_search.shared_graph_utils.prompts import BASE_RAG_PROMPT_v2
|
||||||
from onyx.agents.agent_search.shared_graph_utils.prompts import HISTORY_PROMPT
|
from onyx.agents.agent_search.shared_graph_utils.prompts import HISTORY_PROMPT
|
||||||
|
from onyx.chat.prompt_builder.answer_prompt_builder import AnswerPromptBuilder
|
||||||
from onyx.context.search.models import InferenceSection
|
from onyx.context.search.models import InferenceSection
|
||||||
from onyx.llm.interfaces import LLMConfig
|
from onyx.llm.interfaces import LLMConfig
|
||||||
from onyx.llm.models import PreviousMessage
|
|
||||||
from onyx.llm.utils import get_max_input_tokens
|
from onyx.llm.utils import get_max_input_tokens
|
||||||
from onyx.natural_language_processing.utils import get_tokenizer
|
from onyx.natural_language_processing.utils import get_tokenizer
|
||||||
from onyx.natural_language_processing.utils import tokenizer_trim_content
|
from onyx.natural_language_processing.utils import tokenizer_trim_content
|
||||||
@@ -67,12 +67,16 @@ def trim_prompt_piece(config: LLMConfig, prompt_piece: str, reserved_str: str) -
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def build_history_prompt(message_history: list[PreviousMessage] | None) -> str:
|
def build_history_prompt(prompt_builder: AnswerPromptBuilder | None) -> str:
|
||||||
if message_history is None:
|
if prompt_builder is None:
|
||||||
return ""
|
return ""
|
||||||
|
|
||||||
|
if prompt_builder.single_message_history is not None:
|
||||||
|
history = prompt_builder.single_message_history
|
||||||
|
else:
|
||||||
history = ""
|
history = ""
|
||||||
previous_message_type = None
|
previous_message_type = None
|
||||||
for message in message_history:
|
for message in prompt_builder.raw_message_history:
|
||||||
if "user" in message.message_type:
|
if "user" in message.message_type:
|
||||||
history += f"User: {message.message}\n"
|
history += f"User: {message.message}\n"
|
||||||
previous_message_type = "user"
|
previous_message_type = "user"
|
||||||
@@ -83,7 +87,4 @@ def build_history_prompt(message_history: list[PreviousMessage] | None) -> str:
|
|||||||
previous_message_type = "assistant"
|
previous_message_type = "assistant"
|
||||||
else:
|
else:
|
||||||
continue
|
continue
|
||||||
if len(history) > 0:
|
return HISTORY_PROMPT.format(history=history) if history else ""
|
||||||
return HISTORY_PROMPT.format(history=history)
|
|
||||||
else:
|
|
||||||
return ""
|
|
||||||
|
Reference in New Issue
Block a user