mirror of
https://github.com/danswer-ai/danswer.git
synced 2025-04-08 11:58:34 +02:00
history added to agent flow
This commit is contained in:
parent
e4c93bed8b
commit
b9bd2ea4e2
@ -8,6 +8,9 @@ from onyx.agents.agent_search.deep_search_a.main.operations import logger
|
||||
from onyx.agents.agent_search.deep_search_a.main.states import MainState
|
||||
from onyx.agents.agent_search.deep_search_a.main.states import RoutingDecision
|
||||
from onyx.agents.agent_search.models import AgentSearchConfig
|
||||
from onyx.agents.agent_search.shared_graph_utils.agent_prompt_ops import (
|
||||
build_history_prompt,
|
||||
)
|
||||
from onyx.agents.agent_search.shared_graph_utils.prompts import AGENT_DECISION_PROMPT
|
||||
from onyx.agents.agent_search.shared_graph_utils.prompts import (
|
||||
AGENT_DECISION_PROMPT_AFTER_SEARCH,
|
||||
@ -29,6 +32,8 @@ def agent_path_decision(state: MainState, config: RunnableConfig) -> RoutingDeci
|
||||
agent_a_config.perform_initial_search_path_decision
|
||||
)
|
||||
|
||||
history = build_history_prompt(config["metadata"]["config"].message_history)
|
||||
|
||||
logger.debug(f"--------{now_start}--------DECIDING TO SEARCH OR GO TO LLM---")
|
||||
|
||||
if perform_initial_search_path_decision:
|
||||
@ -53,12 +58,14 @@ def agent_path_decision(state: MainState, config: RunnableConfig) -> RoutingDeci
|
||||
)
|
||||
|
||||
agent_decision_prompt = AGENT_DECISION_PROMPT_AFTER_SEARCH.format(
|
||||
question=question, sample_doc_str=sample_doc_str
|
||||
question=question, sample_doc_str=sample_doc_str, history=history
|
||||
)
|
||||
|
||||
else:
|
||||
sample_doc_str = ""
|
||||
agent_decision_prompt = AGENT_DECISION_PROMPT.format(question=question)
|
||||
agent_decision_prompt = AGENT_DECISION_PROMPT.format(
|
||||
question=question, history=history
|
||||
)
|
||||
|
||||
msg = [HumanMessage(content=agent_decision_prompt)]
|
||||
|
||||
|
@ -19,6 +19,9 @@ from onyx.agents.agent_search.deep_search_a.main.operations import (
|
||||
from onyx.agents.agent_search.deep_search_a.main.states import InitialAnswerUpdate
|
||||
from onyx.agents.agent_search.deep_search_a.main.states import MainState
|
||||
from onyx.agents.agent_search.models import AgentSearchConfig
|
||||
from onyx.agents.agent_search.shared_graph_utils.agent_prompt_ops import (
|
||||
build_history_prompt,
|
||||
)
|
||||
from onyx.agents.agent_search.shared_graph_utils.agent_prompt_ops import (
|
||||
trim_prompt_piece,
|
||||
)
|
||||
@ -58,6 +61,9 @@ def generate_initial_answer(
|
||||
agent_a_config = cast(AgentSearchConfig, config["metadata"]["config"])
|
||||
question = agent_a_config.search_request.query
|
||||
persona_prompt = get_persona_prompt(agent_a_config.search_request.persona)
|
||||
|
||||
history = build_history_prompt(agent_a_config.message_history)
|
||||
|
||||
sub_question_docs = state["documents"]
|
||||
all_original_question_documents = state["all_original_question_documents"]
|
||||
|
||||
@ -160,7 +166,7 @@ def generate_initial_answer(
|
||||
doc_context = trim_prompt_piece(
|
||||
model.config,
|
||||
doc_context,
|
||||
base_prompt + sub_question_answer_str + persona_specification,
|
||||
base_prompt + sub_question_answer_str + persona_specification + history,
|
||||
)
|
||||
|
||||
msg = [
|
||||
@ -172,6 +178,7 @@ def generate_initial_answer(
|
||||
),
|
||||
relevant_docs=format_docs(relevant_docs),
|
||||
persona_specification=persona_specification,
|
||||
history=history,
|
||||
)
|
||||
)
|
||||
]
|
||||
|
@ -16,6 +16,9 @@ from onyx.agents.agent_search.deep_search_a.main.operations import (
|
||||
from onyx.agents.agent_search.deep_search_a.main.states import MainState
|
||||
from onyx.agents.agent_search.deep_search_a.main.states import RefinedAnswerUpdate
|
||||
from onyx.agents.agent_search.models import AgentSearchConfig
|
||||
from onyx.agents.agent_search.shared_graph_utils.agent_prompt_ops import (
|
||||
build_history_prompt,
|
||||
)
|
||||
from onyx.agents.agent_search.shared_graph_utils.agent_prompt_ops import (
|
||||
trim_prompt_piece,
|
||||
)
|
||||
@ -56,6 +59,8 @@ def generate_refined_answer(
|
||||
question = agent_a_config.search_request.query
|
||||
persona_prompt = get_persona_prompt(agent_a_config.search_request.persona)
|
||||
|
||||
history = build_history_prompt(agent_a_config.message_history)
|
||||
|
||||
initial_documents = state["documents"]
|
||||
revised_documents = state["refined_documents"]
|
||||
|
||||
@ -169,13 +174,15 @@ def generate_refined_answer(
|
||||
+ sub_question_answer_str
|
||||
+ relevant_docs
|
||||
+ initial_answer
|
||||
+ persona_specification,
|
||||
+ persona_specification
|
||||
+ history,
|
||||
)
|
||||
|
||||
msg = [
|
||||
HumanMessage(
|
||||
content=base_prompt.format(
|
||||
question=question,
|
||||
history=history,
|
||||
answered_sub_questions=remove_document_citations(
|
||||
sub_question_answer_str
|
||||
),
|
||||
|
@ -12,6 +12,9 @@ from onyx.agents.agent_search.deep_search_a.main.operations import logger
|
||||
from onyx.agents.agent_search.deep_search_a.main.states import BaseDecompUpdate
|
||||
from onyx.agents.agent_search.deep_search_a.main.states import MainState
|
||||
from onyx.agents.agent_search.models import AgentSearchConfig
|
||||
from onyx.agents.agent_search.shared_graph_utils.agent_prompt_ops import (
|
||||
build_history_prompt,
|
||||
)
|
||||
from onyx.agents.agent_search.shared_graph_utils.prompts import (
|
||||
INITIAL_DECOMPOSITION_PROMPT_QUESTIONS,
|
||||
)
|
||||
@ -47,6 +50,7 @@ def initial_sub_question_creation(
|
||||
perform_initial_search_path_decision = (
|
||||
agent_a_config.perform_initial_search_path_decision
|
||||
)
|
||||
history = build_history_prompt(agent_a_config.message_history)
|
||||
|
||||
# Use the initial search results to inform the decomposition
|
||||
sample_doc_str = state.get("sample_doc_str", "")
|
||||
@ -83,13 +87,13 @@ def initial_sub_question_creation(
|
||||
|
||||
decomposition_prompt = (
|
||||
INITIAL_DECOMPOSITION_PROMPT_QUESTIONS_AFTER_SEARCH.format(
|
||||
question=question, sample_doc_str=sample_doc_str
|
||||
question=question, sample_doc_str=sample_doc_str, history=history
|
||||
)
|
||||
)
|
||||
|
||||
else:
|
||||
decomposition_prompt = INITIAL_DECOMPOSITION_PROMPT_QUESTIONS.format(
|
||||
question=question
|
||||
question=question, history=history
|
||||
)
|
||||
|
||||
# Start decomposition
|
||||
|
@ -14,6 +14,9 @@ from onyx.agents.agent_search.deep_search_a.main.states import (
|
||||
)
|
||||
from onyx.agents.agent_search.deep_search_a.main.states import MainState
|
||||
from onyx.agents.agent_search.models import AgentSearchConfig
|
||||
from onyx.agents.agent_search.shared_graph_utils.agent_prompt_ops import (
|
||||
build_history_prompt,
|
||||
)
|
||||
from onyx.agents.agent_search.shared_graph_utils.prompts import DEEP_DECOMPOSE_PROMPT
|
||||
from onyx.agents.agent_search.shared_graph_utils.utils import dispatch_separated
|
||||
from onyx.agents.agent_search.shared_graph_utils.utils import (
|
||||
@ -47,7 +50,7 @@ def refined_sub_question_creation(
|
||||
|
||||
question = agent_a_config.search_request.query
|
||||
base_answer = state["initial_answer"]
|
||||
|
||||
history = build_history_prompt(agent_a_config.message_history)
|
||||
# get the entity term extraction dict and properly format it
|
||||
entity_retlation_term_extractions = state["entity_retlation_term_extractions"]
|
||||
|
||||
@ -69,6 +72,7 @@ def refined_sub_question_creation(
|
||||
HumanMessage(
|
||||
content=DEEP_DECOMPOSE_PROMPT.format(
|
||||
question=question,
|
||||
history=history,
|
||||
entity_term_extraction_str=entity_term_extraction_str,
|
||||
base_answer=base_answer,
|
||||
answered_sub_questions="\n - ".join(addressed_question_list),
|
||||
|
@ -4,8 +4,10 @@ from langchain.schema import SystemMessage
|
||||
from langchain_core.messages.tool import ToolMessage
|
||||
|
||||
from onyx.agents.agent_search.shared_graph_utils.prompts import BASE_RAG_PROMPT_v2
|
||||
from onyx.agents.agent_search.shared_graph_utils.prompts import HISTORY_PROMPT
|
||||
from onyx.context.search.models import InferenceSection
|
||||
from onyx.llm.interfaces import LLMConfig
|
||||
from onyx.llm.models import PreviousMessage
|
||||
from onyx.llm.utils import get_max_input_tokens
|
||||
from onyx.natural_language_processing.utils import get_tokenizer
|
||||
from onyx.natural_language_processing.utils import tokenizer_trim_content
|
||||
@ -63,3 +65,25 @@ def trim_prompt_piece(config: LLMConfig, prompt_piece: str, reserved_str: str) -
|
||||
desired_length=max_tokens - len(llm_tokenizer.encode(reserved_str)),
|
||||
tokenizer=llm_tokenizer,
|
||||
)
|
||||
|
||||
|
||||
def build_history_prompt(message_history: list[PreviousMessage] | None) -> str:
|
||||
if message_history is None:
|
||||
return ""
|
||||
history = ""
|
||||
previous_message_type = None
|
||||
for message in message_history:
|
||||
if "user" in message.message_type:
|
||||
history += f"User: {message.message}\n"
|
||||
previous_message_type = "user"
|
||||
elif "assistant" in message.message_type:
|
||||
# only use the initial agent answer for the history
|
||||
if previous_message_type != "assistant":
|
||||
history += f"You/Agent: {message.message}\n"
|
||||
previous_message_type = "assistant"
|
||||
else:
|
||||
continue
|
||||
if len(history) > 0:
|
||||
return HISTORY_PROMPT.format(history=history)
|
||||
else:
|
||||
return ""
|
||||
|
@ -2,6 +2,13 @@ UNKNOWN_ANSWER = "I do not have enough information to answer this question."
|
||||
|
||||
NO_RECOVERED_DOCS = "No relevant documents recovered"
|
||||
|
||||
HISTORY_PROMPT = """\n
|
||||
For more context, here is the history of the conversation so far that preceeded this question:
|
||||
\n ------- \n
|
||||
{history}
|
||||
\n ------- \n\n
|
||||
"""
|
||||
|
||||
REWRITE_PROMPT_MULTI_ORIGINAL = """ \n
|
||||
Please convert an initial user question into a 2-3 more appropriate short and pointed search queries for retrievel from a
|
||||
document store. Particularly, try to think about resolving ambiguities and make the search queries more specific,
|
||||
@ -309,6 +316,7 @@ DEEP_DECOMPOSE_PROMPT = """ \n
|
||||
\n ------- \n
|
||||
{question}
|
||||
\n ------- \n
|
||||
{history}
|
||||
|
||||
Here is the initial sub-optimal answer:
|
||||
\n ------- \n
|
||||
@ -453,6 +461,8 @@ Here is the initial question:
|
||||
-------
|
||||
{question}
|
||||
-------
|
||||
{history}
|
||||
|
||||
Please formulate your answer as a newline-separated list of questions like so:
|
||||
<sub-question>
|
||||
<sub-question>
|
||||
@ -490,6 +500,7 @@ And here is the initial question that you should think about decomposing:
|
||||
{question}
|
||||
-------
|
||||
|
||||
{history}
|
||||
|
||||
Please formulate your answer as a newline-separated list of questions like so:
|
||||
<sub-question>
|
||||
@ -560,6 +571,7 @@ or address the request, you should choose the 'research' option.
|
||||
- If you think the question is very general and does not refer to a contents of a document store, you should choose
|
||||
the 'LLM' option.
|
||||
- Otherwise, you should choose the 'research' option.
|
||||
{history}
|
||||
|
||||
Here is the initial question:
|
||||
-------
|
||||
@ -584,6 +596,7 @@ store to answer or materially help with the request, you should choose the 'rese
|
||||
you know the answer/can handle the request, you should choose the 'LLM' option.
|
||||
- If the question asks you do do somethng ('please create...', 'write for me...', etc.), you should choose the 'LLM' option.
|
||||
- If in doubt, choose the 'research' option.
|
||||
{history}
|
||||
|
||||
Here is the initial question:
|
||||
-------
|
||||
@ -688,14 +701,14 @@ INITIAL_RAG_PROMPT = (
|
||||
""" \n
|
||||
{persona_specification}
|
||||
|
||||
Use the information provided below - and only the
|
||||
provided information - to answer the provided question.
|
||||
Use the information provided below - and only the provided information - to answer the provided question.
|
||||
|
||||
The information provided below consists of:
|
||||
1) a number of answered sub-questions - these are very important(!) and definitely should be
|
||||
considered to answer the question.
|
||||
2) a number of documents that were also deemed relevant for the question.
|
||||
|
||||
{history}
|
||||
IMPORTANT RULES:
|
||||
- If you cannot reliably answer the question solely using the provided information, say that you cannot reliably answer.
|
||||
You may give some additional facts you learned, but do not try to invent an answer.
|
||||
@ -739,6 +752,7 @@ INITIAL_RAG_PROMPT_NO_SUB_QUESTIONS = (
|
||||
Use the information provided below
|
||||
- and only the provided information - to answer the provided question.
|
||||
The information provided below consists of a number of documents that were deemed relevant for the question.
|
||||
{history}
|
||||
|
||||
IMPORTANT RULES:
|
||||
- If you cannot reliably answer the question solely using the provided information, say that you cannot reliably answer.
|
||||
@ -781,7 +795,7 @@ The information provided below consists of:
|
||||
particular to update/extend/correct the initial answer!
|
||||
information from the revised sub-questions
|
||||
3) a number of documents that were also deemed relevant for the question.
|
||||
|
||||
{history}
|
||||
IMPORTANT RULES:
|
||||
- If you cannot reliably answer the question solely using the provided information, say that you cannot reliably answer.
|
||||
You may give some additional facts you learned, but do not try to invent an answer.
|
||||
@ -838,6 +852,7 @@ provided information - to answer the provided question.
|
||||
The information provided below consists of:
|
||||
1) an initial answer that was given but found to be lacking in some way.
|
||||
2) a number of documents that were also deemed relevant for the question.
|
||||
{history}
|
||||
|
||||
IMPORTANT RULES:
|
||||
- If you cannot reliably answer the question solely using the provided information, say that you cannot reliably answer.
|
||||
|
Loading…
x
Reference in New Issue
Block a user