mirror of
https://github.com/danswer-ai/danswer.git
synced 2025-05-21 09:10:09 +02:00
agent default changes/restructuring
This commit is contained in:
parent
12d1186888
commit
69e8c5f0fc
@ -37,7 +37,7 @@ def answer_generation(
|
|||||||
|
|
||||||
agent_search_config = cast(AgentSearchConfig, config["metadata"]["config"])
|
agent_search_config = cast(AgentSearchConfig, config["metadata"]["config"])
|
||||||
question = state.question
|
question = state.question
|
||||||
docs = state.documents
|
state.documents
|
||||||
level, question_nr = parse_question_id(state.question_id)
|
level, question_nr = parse_question_id(state.question_id)
|
||||||
context_docs = state.context_documents[:AGENT_MAX_ANSWER_CONTEXT_DOCS]
|
context_docs = state.context_documents[:AGENT_MAX_ANSWER_CONTEXT_DOCS]
|
||||||
persona = get_persona_expressions(agent_search_config.search_request.persona)
|
persona = get_persona_expressions(agent_search_config.search_request.persona)
|
||||||
@ -54,7 +54,7 @@ def answer_generation(
|
|||||||
),
|
),
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
logger.debug(f"Number of verified retrieval docs: {len(docs)}")
|
logger.debug(f"Number of verified retrieval docs: {len(context_docs)}")
|
||||||
|
|
||||||
fast_llm = agent_search_config.fast_llm
|
fast_llm = agent_search_config.fast_llm
|
||||||
msg = build_sub_question_answer_prompt(
|
msg = build_sub_question_answer_prompt(
|
||||||
|
@ -39,7 +39,6 @@ def parallelize_initial_sub_question_answering(
|
|||||||
log_messages=[
|
log_messages=[
|
||||||
f"{now_start} -- Main Edge - Parallelize Initial Sub-question Answering"
|
f"{now_start} -- Main Edge - Parallelize Initial Sub-question Answering"
|
||||||
],
|
],
|
||||||
request_start_time=[],
|
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
for question_nr, question in enumerate(state.initial_decomp_questions)
|
for question_nr, question in enumerate(state.initial_decomp_questions)
|
||||||
|
@ -71,7 +71,6 @@ def generate_initial_answer(
|
|||||||
|
|
||||||
date_str = get_today_prompt()
|
date_str = get_today_prompt()
|
||||||
|
|
||||||
sub_question_docs = state.context_documents
|
|
||||||
sub_questions_cited_docs = state.cited_docs
|
sub_questions_cited_docs = state.cited_docs
|
||||||
all_original_question_documents = state.all_original_question_documents
|
all_original_question_documents = state.all_original_question_documents
|
||||||
|
|
||||||
@ -135,10 +134,6 @@ def generate_initial_answer(
|
|||||||
)
|
)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
net_new_original_question_docs = []
|
|
||||||
for all_original_question_doc in all_original_question_documents:
|
|
||||||
if all_original_question_doc not in sub_question_docs:
|
|
||||||
net_new_original_question_docs.append(all_original_question_doc)
|
|
||||||
|
|
||||||
decomp_answer_results = state.decomp_answer_results
|
decomp_answer_results = state.decomp_answer_results
|
||||||
|
|
||||||
|
@ -24,7 +24,7 @@ from onyx.chat.models import StreamStopInfo
|
|||||||
from onyx.chat.models import SubQueryPiece
|
from onyx.chat.models import SubQueryPiece
|
||||||
from onyx.chat.models import SubQuestionPiece
|
from onyx.chat.models import SubQuestionPiece
|
||||||
from onyx.chat.models import ToolResponse
|
from onyx.chat.models import ToolResponse
|
||||||
from onyx.configs.agent_configs import GRAPH_NAME
|
from onyx.configs.agent_configs import GRAPH_VERSION_NAME
|
||||||
from onyx.context.search.models import SearchRequest
|
from onyx.context.search.models import SearchRequest
|
||||||
from onyx.db.engine import get_session_context_manager
|
from onyx.db.engine import get_session_context_manager
|
||||||
from onyx.tools.tool_runner import ToolCallKickoff
|
from onyx.tools.tool_runner import ToolCallKickoff
|
||||||
@ -199,7 +199,7 @@ if __name__ == "__main__":
|
|||||||
now_start = datetime.now()
|
now_start = datetime.now()
|
||||||
logger.debug(f"Start at {now_start}")
|
logger.debug(f"Start at {now_start}")
|
||||||
|
|
||||||
if GRAPH_NAME == "a":
|
if GRAPH_VERSION_NAME == "a":
|
||||||
graph = main_graph_builder_a()
|
graph = main_graph_builder_a()
|
||||||
else:
|
else:
|
||||||
graph = main_graph_builder_a()
|
graph = main_graph_builder_a()
|
||||||
@ -226,7 +226,7 @@ if __name__ == "__main__":
|
|||||||
config.use_persistence = True
|
config.use_persistence = True
|
||||||
# config.perform_initial_search_path_decision = False
|
# config.perform_initial_search_path_decision = False
|
||||||
config.perform_initial_search_decomposition = True
|
config.perform_initial_search_decomposition = True
|
||||||
if GRAPH_NAME == "a":
|
if GRAPH_VERSION_NAME == "a":
|
||||||
input = MainInput_a(
|
input = MainInput_a(
|
||||||
base_question=config.search_request.query, log_messages=[]
|
base_question=config.search_request.query, log_messages=[]
|
||||||
)
|
)
|
||||||
|
@ -1,7 +1,14 @@
|
|||||||
import os
|
import os
|
||||||
|
|
||||||
from .chat_configs import NUM_RETURNED_HITS
|
AGENT_DEFAULT_RETRIEVAL_HITS = 25
|
||||||
|
AGENT_DEFAULT_RERANKING_HITS = 15
|
||||||
|
AGENT_DEFAULT_SUB_QUESTION_MAX_CONTEXT_HITS = 8
|
||||||
|
AGENT_DEFAULT_NUM_DOCS_FOR_INITIAL_DECOMPOSITION = 3
|
||||||
|
AGENT_DEFAULT_NUM_DOCS_FOR_REFINED_DECOMPOSITION = 5
|
||||||
|
AGENT_DEFAULT_EXPLORATORY_SEARCH_RESULTS = 3
|
||||||
|
AGENT_DEFAULT_MIN_ORIG_QUESTION_DOCS = 5
|
||||||
|
AGENT_DEFAULT_MAX_ANSWER_CONTEXT_DOCS = 10
|
||||||
|
AGENT_DEFAULT_MAX_STATIC_HISTORY_CHAR_LENGTH = 10000
|
||||||
|
|
||||||
#####
|
#####
|
||||||
# Agent Configs
|
# Agent Configs
|
||||||
@ -18,10 +25,10 @@ elif isinstance(agent_retrieval_stats_os, bool) and agent_retrieval_stats_os:
|
|||||||
AGENT_RETRIEVAL_STATS = True
|
AGENT_RETRIEVAL_STATS = True
|
||||||
|
|
||||||
agent_max_query_retrieval_results_os: int | str = os.environ.get(
|
agent_max_query_retrieval_results_os: int | str = os.environ.get(
|
||||||
"AGENT_MAX_QUERY_RETRIEVAL_RESULTS", NUM_RETURNED_HITS
|
"AGENT_MAX_QUERY_RETRIEVAL_RESULTS", AGENT_DEFAULT_RETRIEVAL_HITS
|
||||||
)
|
)
|
||||||
|
|
||||||
AGENT_MAX_QUERY_RETRIEVAL_RESULTS: int = NUM_RETURNED_HITS
|
AGENT_MAX_QUERY_RETRIEVAL_RESULTS: int = AGENT_DEFAULT_RETRIEVAL_HITS
|
||||||
try:
|
try:
|
||||||
atmqrr = int(agent_max_query_retrieval_results_os)
|
atmqrr = int(agent_max_query_retrieval_results_os)
|
||||||
AGENT_MAX_QUERY_RETRIEVAL_RESULTS = atmqrr
|
AGENT_MAX_QUERY_RETRIEVAL_RESULTS = atmqrr
|
||||||
@ -32,6 +39,7 @@ except ValueError:
|
|||||||
|
|
||||||
|
|
||||||
# Reranking agent configs
|
# Reranking agent configs
|
||||||
|
# Reranking stats - no influence on flow outside of stats collection
|
||||||
agent_reranking_stats_os: bool | str | None = os.environ.get(
|
agent_reranking_stats_os: bool | str | None = os.environ.get(
|
||||||
"AGENT_RERANKING_TEST", False
|
"AGENT_RERANKING_TEST", False
|
||||||
)
|
)
|
||||||
@ -43,12 +51,12 @@ elif isinstance(agent_reranking_stats_os, bool) and agent_reranking_stats_os:
|
|||||||
|
|
||||||
|
|
||||||
agent_reranking_max_query_retrieval_results_os: int | str = os.environ.get(
|
agent_reranking_max_query_retrieval_results_os: int | str = os.environ.get(
|
||||||
"AGENT_RERANKING_MAX_QUERY_RETRIEVAL_RESULTS", NUM_RETURNED_HITS
|
"AGENT_RERANKING_MAX_QUERY_RETRIEVAL_RESULTS", AGENT_DEFAULT_RERANKING_HITS
|
||||||
)
|
)
|
||||||
|
|
||||||
AGENT_RERANKING_MAX_QUERY_RETRIEVAL_RESULTS: int = NUM_RETURNED_HITS
|
AGENT_RERANKING_MAX_QUERY_RETRIEVAL_RESULTS: int = AGENT_DEFAULT_RERANKING_HITS
|
||||||
|
|
||||||
GRAPH_NAME: str = "a"
|
GRAPH_VERSION_NAME: str = "a"
|
||||||
|
|
||||||
try:
|
try:
|
||||||
atmqrr = int(agent_reranking_max_query_retrieval_results_os)
|
atmqrr = int(agent_reranking_max_query_retrieval_results_os)
|
||||||
@ -59,7 +67,7 @@ except ValueError:
|
|||||||
)
|
)
|
||||||
|
|
||||||
AGENT_NUM_DOCS_FOR_DECOMPOSITION_OS: int | str = os.environ.get(
|
AGENT_NUM_DOCS_FOR_DECOMPOSITION_OS: int | str = os.environ.get(
|
||||||
"AGENT_NUM_DOCS_FOR_DECOMPOSITION", "3"
|
"AGENT_NUM_DOCS_FOR_DECOMPOSITION", AGENT_DEFAULT_NUM_DOCS_FOR_INITIAL_DECOMPOSITION
|
||||||
)
|
)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@ -70,7 +78,8 @@ except ValueError:
|
|||||||
)
|
)
|
||||||
|
|
||||||
AGENT_NUM_DOCS_FOR_REFINED_DECOMPOSITION_OS: int | str = os.environ.get(
|
AGENT_NUM_DOCS_FOR_REFINED_DECOMPOSITION_OS: int | str = os.environ.get(
|
||||||
"AGENT_NUM_DOCS_FOR_REFINED_DECOMPOSITION", "10"
|
"AGENT_NUM_DOCS_FOR_REFINED_DECOMPOSITION",
|
||||||
|
AGENT_DEFAULT_NUM_DOCS_FOR_REFINED_DECOMPOSITION,
|
||||||
)
|
)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@ -83,7 +92,7 @@ except ValueError:
|
|||||||
)
|
)
|
||||||
|
|
||||||
AGENT_EXPLORATORY_SEARCH_RESULTS_OS: int | str = os.environ.get(
|
AGENT_EXPLORATORY_SEARCH_RESULTS_OS: int | str = os.environ.get(
|
||||||
"AGENT_EXPLORATORY_SEARCH_RESULTS", "3"
|
"AGENT_EXPLORATORY_SEARCH_RESULTS", AGENT_DEFAULT_EXPLORATORY_SEARCH_RESULTS
|
||||||
)
|
)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@ -94,7 +103,7 @@ except ValueError:
|
|||||||
)
|
)
|
||||||
|
|
||||||
AGENT_MIN_ORIG_QUESTION_DOCS_OS: int | str = os.environ.get(
|
AGENT_MIN_ORIG_QUESTION_DOCS_OS: int | str = os.environ.get(
|
||||||
"AGENT_MIN_ORIG_QUESTION_DOCS", "5"
|
"AGENT_MIN_ORIG_QUESTION_DOCS", AGENT_DEFAULT_MIN_ORIG_QUESTION_DOCS
|
||||||
)
|
)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@ -105,7 +114,7 @@ except ValueError:
|
|||||||
)
|
)
|
||||||
|
|
||||||
AGENT_MAX_ANSWER_CONTEXT_DOCS_OS: int | str = os.environ.get(
|
AGENT_MAX_ANSWER_CONTEXT_DOCS_OS: int | str = os.environ.get(
|
||||||
"AGENT_MAX_ANSWER_CONTEXT_DOCS", "10"
|
"AGENT_MAX_ANSWER_CONTEXT_DOCS", AGENT_DEFAULT_SUB_QUESTION_MAX_CONTEXT_HITS
|
||||||
)
|
)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@ -116,7 +125,8 @@ except ValueError:
|
|||||||
)
|
)
|
||||||
|
|
||||||
AGENT_MAX_STATIC_HISTORY_CHAR_LENGTH_OS: int | str = os.environ.get(
|
AGENT_MAX_STATIC_HISTORY_CHAR_LENGTH_OS: int | str = os.environ.get(
|
||||||
"AGENT_MAX_STATIC_HISTORY_CHAR_LENGTH_OS", "10000"
|
"AGENT_MAX_STATIC_HISTORY_CHAR_LENGTH_OS",
|
||||||
|
AGENT_DEFAULT_MAX_STATIC_HISTORY_CHAR_LENGTH,
|
||||||
)
|
)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
Loading…
x
Reference in New Issue
Block a user