mirror of
https://github.com/danswer-ai/danswer.git
synced 2025-09-19 03:58:30 +02:00
reworked history messages in agent config
This commit is contained in:
@@ -33,7 +33,7 @@ def agent_path_decision(state: MainState, config: RunnableConfig) -> RoutingDeci
|
||||
agent_a_config.perform_initial_search_path_decision
|
||||
)
|
||||
|
||||
history = build_history_prompt(config["metadata"]["config"].message_history)
|
||||
history = build_history_prompt(agent_a_config.prompt_builder)
|
||||
|
||||
logger.debug(f"--------{now_start}--------DECIDING TO SEARCH OR GO TO LLM---")
|
||||
|
||||
|
@@ -62,7 +62,7 @@ def generate_initial_answer(
|
||||
question = agent_a_config.search_request.query
|
||||
persona_prompt = get_persona_prompt(agent_a_config.search_request.persona)
|
||||
|
||||
history = build_history_prompt(agent_a_config.message_history)
|
||||
history = build_history_prompt(agent_a_config.prompt_builder)
|
||||
|
||||
sub_question_docs = state.documents
|
||||
all_original_question_documents = state.all_original_question_documents
|
||||
|
@@ -59,7 +59,7 @@ def generate_refined_answer(
|
||||
question = agent_a_config.search_request.query
|
||||
persona_prompt = get_persona_prompt(agent_a_config.search_request.persona)
|
||||
|
||||
history = build_history_prompt(agent_a_config.message_history)
|
||||
history = build_history_prompt(agent_a_config.prompt_builder)
|
||||
|
||||
initial_documents = state.documents
|
||||
revised_documents = state.refined_documents
|
||||
|
@@ -50,7 +50,7 @@ def initial_sub_question_creation(
|
||||
perform_initial_search_path_decision = (
|
||||
agent_a_config.perform_initial_search_path_decision
|
||||
)
|
||||
history = build_history_prompt(agent_a_config.message_history)
|
||||
history = build_history_prompt(agent_a_config.prompt_builder)
|
||||
|
||||
# Use the initial search results to inform the decomposition
|
||||
sample_doc_str = state.sample_doc_str if hasattr(state, "sample_doc_str") else ""
|
||||
|
@@ -50,7 +50,7 @@ def refined_sub_question_creation(
|
||||
|
||||
question = agent_a_config.search_request.query
|
||||
base_answer = state.initial_answer
|
||||
history = build_history_prompt(agent_a_config.message_history)
|
||||
history = build_history_prompt(agent_a_config.prompt_builder)
|
||||
# get the entity term extraction dict and properly format it
|
||||
entity_retlation_term_extractions = state.entity_retlation_term_extractions
|
||||
|
||||
|
@@ -5,9 +5,9 @@ from langchain_core.messages.tool import ToolMessage
|
||||
|
||||
from onyx.agents.agent_search.shared_graph_utils.prompts import BASE_RAG_PROMPT_v2
|
||||
from onyx.agents.agent_search.shared_graph_utils.prompts import HISTORY_PROMPT
|
||||
from onyx.chat.prompt_builder.answer_prompt_builder import AnswerPromptBuilder
|
||||
from onyx.context.search.models import InferenceSection
|
||||
from onyx.llm.interfaces import LLMConfig
|
||||
from onyx.llm.models import PreviousMessage
|
||||
from onyx.llm.utils import get_max_input_tokens
|
||||
from onyx.natural_language_processing.utils import get_tokenizer
|
||||
from onyx.natural_language_processing.utils import tokenizer_trim_content
|
||||
@@ -67,23 +67,24 @@ def trim_prompt_piece(config: LLMConfig, prompt_piece: str, reserved_str: str) -
|
||||
)
|
||||
|
||||
|
||||
def build_history_prompt(message_history: list[PreviousMessage] | None) -> str:
|
||||
if message_history is None:
|
||||
def build_history_prompt(prompt_builder: AnswerPromptBuilder | None) -> str:
|
||||
if prompt_builder is None:
|
||||
return ""
|
||||
history = ""
|
||||
previous_message_type = None
|
||||
for message in message_history:
|
||||
if "user" in message.message_type:
|
||||
history += f"User: {message.message}\n"
|
||||
previous_message_type = "user"
|
||||
elif "assistant" in message.message_type:
|
||||
# only use the initial agent answer for the history
|
||||
if previous_message_type != "assistant":
|
||||
history += f"You/Agent: {message.message}\n"
|
||||
previous_message_type = "assistant"
|
||||
else:
|
||||
continue
|
||||
if len(history) > 0:
|
||||
return HISTORY_PROMPT.format(history=history)
|
||||
|
||||
if prompt_builder.single_message_history is not None:
|
||||
history = prompt_builder.single_message_history
|
||||
else:
|
||||
return ""
|
||||
history = ""
|
||||
previous_message_type = None
|
||||
for message in prompt_builder.raw_message_history:
|
||||
if "user" in message.message_type:
|
||||
history += f"User: {message.message}\n"
|
||||
previous_message_type = "user"
|
||||
elif "assistant" in message.message_type:
|
||||
# only use the initial agent answer for the history
|
||||
if previous_message_type != "assistant":
|
||||
history += f"You/Agent: {message.message}\n"
|
||||
previous_message_type = "assistant"
|
||||
else:
|
||||
continue
|
||||
return HISTORY_PROMPT.format(history=history) if history else ""
|
||||
|
Reference in New Issue
Block a user