renamed directories, prompts, and small citation fix

This commit is contained in:
joachim-danswer
2025-01-29 20:40:24 -08:00
committed by Evan Lohn
parent e3e855c526
commit 3ca4d532b4
61 changed files with 727 additions and 653 deletions

View File

@@ -3,10 +3,10 @@ from datetime import datetime
from langgraph.types import Send
from onyx.agents.agent_search.deep_search_a.initial__individual_sub_answer__subgraph.states import (
from onyx.agents.agent_search.deep_search_a.initial.individual_sub_answer_generation.states import (
AnswerQuestionInput,
)
from onyx.agents.agent_search.deep_search_a.util__expanded_retrieval__subgraph.states import (
from onyx.agents.agent_search.deep_search_a.shared.expanded_retrieval.states import (
ExpandedRetrievalInput,
)
from onyx.utils.logger import setup_logger

View File

@@ -2,31 +2,31 @@ from langgraph.graph import END
from langgraph.graph import START
from langgraph.graph import StateGraph
from onyx.agents.agent_search.deep_search_a.initial__individual_sub_answer__subgraph.edges import (
from onyx.agents.agent_search.deep_search_a.initial.individual_sub_answer_generation.edges import (
send_to_expanded_retrieval,
)
from onyx.agents.agent_search.deep_search_a.initial__individual_sub_answer__subgraph.nodes.answer_check import (
from onyx.agents.agent_search.deep_search_a.initial.individual_sub_answer_generation.nodes.answer_check import (
answer_check,
)
from onyx.agents.agent_search.deep_search_a.initial__individual_sub_answer__subgraph.nodes.answer_generation import (
from onyx.agents.agent_search.deep_search_a.initial.individual_sub_answer_generation.nodes.answer_generation import (
answer_generation,
)
from onyx.agents.agent_search.deep_search_a.initial__individual_sub_answer__subgraph.nodes.format_answer import (
from onyx.agents.agent_search.deep_search_a.initial.individual_sub_answer_generation.nodes.format_answer import (
format_answer,
)
from onyx.agents.agent_search.deep_search_a.initial__individual_sub_answer__subgraph.nodes.ingest_retrieval import (
from onyx.agents.agent_search.deep_search_a.initial.individual_sub_answer_generation.nodes.ingest_retrieval import (
ingest_retrieval,
)
from onyx.agents.agent_search.deep_search_a.initial__individual_sub_answer__subgraph.states import (
from onyx.agents.agent_search.deep_search_a.initial.individual_sub_answer_generation.states import (
AnswerQuestionInput,
)
from onyx.agents.agent_search.deep_search_a.initial__individual_sub_answer__subgraph.states import (
from onyx.agents.agent_search.deep_search_a.initial.individual_sub_answer_generation.states import (
AnswerQuestionOutput,
)
from onyx.agents.agent_search.deep_search_a.initial__individual_sub_answer__subgraph.states import (
from onyx.agents.agent_search.deep_search_a.initial.individual_sub_answer_generation.states import (
AnswerQuestionState,
)
from onyx.agents.agent_search.deep_search_a.util__expanded_retrieval__subgraph.graph_builder import (
from onyx.agents.agent_search.deep_search_a.shared.expanded_retrieval.graph_builder import (
expanded_retrieval_graph_builder,
)
from onyx.agents.agent_search.shared_graph_utils.utils import get_test_config

View File

@@ -5,10 +5,10 @@ from langchain_core.messages import HumanMessage
from langchain_core.messages import merge_message_runs
from langchain_core.runnables.config import RunnableConfig
from onyx.agents.agent_search.deep_search_a.initial__individual_sub_answer__subgraph.states import (
from onyx.agents.agent_search.deep_search_a.initial.individual_sub_answer_generation.states import (
AnswerQuestionState,
)
from onyx.agents.agent_search.deep_search_a.initial__individual_sub_answer__subgraph.states import (
from onyx.agents.agent_search.deep_search_a.initial.individual_sub_answer_generation.states import (
QACheckUpdate,
)
from onyx.agents.agent_search.models import AgentSearchConfig

View File

@@ -6,10 +6,10 @@ from langchain_core.callbacks.manager import dispatch_custom_event
from langchain_core.messages import merge_message_runs
from langchain_core.runnables.config import RunnableConfig
from onyx.agents.agent_search.deep_search_a.initial__individual_sub_answer__subgraph.states import (
from onyx.agents.agent_search.deep_search_a.initial.individual_sub_answer_generation.states import (
AnswerQuestionState,
)
from onyx.agents.agent_search.deep_search_a.initial__individual_sub_answer__subgraph.states import (
from onyx.agents.agent_search.deep_search_a.initial.individual_sub_answer_generation.states import (
QAGenerationUpdate,
)
from onyx.agents.agent_search.models import AgentSearchConfig
@@ -102,7 +102,9 @@ def answer_generation(
)
answer_citation_ids = get_answer_citation_ids(answer_str)
cited_docs = [context_docs[id] for id in answer_citation_ids]
cited_docs = [
context_docs[id] for id in answer_citation_ids if id < len(context_docs)
]
stop_event = StreamStopInfo(
stop_reason=StreamStopReason.FINISHED,

View File

@@ -1,7 +1,7 @@
from onyx.agents.agent_search.deep_search_a.initial__individual_sub_answer__subgraph.states import (
from onyx.agents.agent_search.deep_search_a.initial.individual_sub_answer_generation.states import (
AnswerQuestionOutput,
)
from onyx.agents.agent_search.deep_search_a.initial__individual_sub_answer__subgraph.states import (
from onyx.agents.agent_search.deep_search_a.initial.individual_sub_answer_generation.states import (
AnswerQuestionState,
)
from onyx.agents.agent_search.shared_graph_utils.models import (

View File

@@ -1,7 +1,7 @@
from onyx.agents.agent_search.deep_search_a.initial__individual_sub_answer__subgraph.states import (
from onyx.agents.agent_search.deep_search_a.initial.individual_sub_answer_generation.states import (
RetrievalIngestionUpdate,
)
from onyx.agents.agent_search.deep_search_a.util__expanded_retrieval__subgraph.states import (
from onyx.agents.agent_search.deep_search_a.shared.expanded_retrieval.states import (
ExpandedRetrievalOutput,
)
from onyx.agents.agent_search.shared_graph_utils.models import AgentChunkStats

View File

@@ -3,13 +3,13 @@ from datetime import datetime
from langgraph.types import Send
from onyx.agents.agent_search.deep_search_a.initial__individual_sub_answer__subgraph.states import (
from onyx.agents.agent_search.deep_search_a.initial.individual_sub_answer_generation.states import (
AnswerQuestionInput,
)
from onyx.agents.agent_search.deep_search_a.initial__individual_sub_answer__subgraph.states import (
from onyx.agents.agent_search.deep_search_a.initial.individual_sub_answer_generation.states import (
AnswerQuestionOutput,
)
from onyx.agents.agent_search.deep_search_a.initial__retrieval_sub_answers__subgraph.states import (
from onyx.agents.agent_search.deep_search_a.initial.initial_answer_generation.states import (
SearchSQState,
)
from onyx.agents.agent_search.shared_graph_utils.utils import make_question_id

View File

@@ -2,27 +2,27 @@ from langgraph.graph import END
from langgraph.graph import START
from langgraph.graph import StateGraph
from onyx.agents.agent_search.deep_search_a.initial__consolidate_sub_answers__subgraph.graph_builder import (
initial_sq_subgraph_builder,
)
from onyx.agents.agent_search.deep_search_a.initial__retrieval__subgraph.graph_builder import (
base_raw_search_graph_builder,
)
from onyx.agents.agent_search.deep_search_a.initial__retrieval_sub_answers__subgraph.nodes.generate_initial_answer import (
from onyx.agents.agent_search.deep_search_a.initial.initial_answer_generation.nodes.generate_initial_answer import (
generate_initial_answer,
)
from onyx.agents.agent_search.deep_search_a.initial__retrieval_sub_answers__subgraph.nodes.initial_answer_quality_check import (
from onyx.agents.agent_search.deep_search_a.initial.initial_answer_generation.nodes.initial_answer_quality_check import (
initial_answer_quality_check,
)
from onyx.agents.agent_search.deep_search_a.initial__retrieval_sub_answers__subgraph.nodes.retrieval_consolidation import (
from onyx.agents.agent_search.deep_search_a.initial.initial_answer_generation.nodes.retrieval_consolidation import (
retrieval_consolidation,
)
from onyx.agents.agent_search.deep_search_a.initial__retrieval_sub_answers__subgraph.states import (
from onyx.agents.agent_search.deep_search_a.initial.initial_answer_generation.states import (
SearchSQInput,
)
from onyx.agents.agent_search.deep_search_a.initial__retrieval_sub_answers__subgraph.states import (
from onyx.agents.agent_search.deep_search_a.initial.initial_answer_generation.states import (
SearchSQState,
)
from onyx.agents.agent_search.deep_search_a.initial.retrieval.graph_builder import (
base_raw_search_graph_builder,
)
from onyx.agents.agent_search.deep_search_a.initial.sub_answer_consolidation.graph_builder import (
initial_sq_subgraph_builder,
)
from onyx.utils.logger import setup_logger
logger = setup_logger()

View File

@@ -7,19 +7,19 @@ from langchain_core.messages import HumanMessage
from langchain_core.messages import merge_content
from langchain_core.runnables import RunnableConfig
from onyx.agents.agent_search.deep_search_a.initial__retrieval_sub_answers__subgraph.states import (
from onyx.agents.agent_search.deep_search_a.initial.initial_answer_generation.states import (
SearchSQState,
)
from onyx.agents.agent_search.deep_search_a.main__graph.models import AgentBaseMetrics
from onyx.agents.agent_search.deep_search_a.main__graph.operations import (
from onyx.agents.agent_search.deep_search_a.main.models import AgentBaseMetrics
from onyx.agents.agent_search.deep_search_a.main.operations import (
calculate_initial_agent_stats,
)
from onyx.agents.agent_search.deep_search_a.main__graph.operations import get_query_info
from onyx.agents.agent_search.deep_search_a.main__graph.operations import logger
from onyx.agents.agent_search.deep_search_a.main__graph.operations import (
from onyx.agents.agent_search.deep_search_a.main.operations import get_query_info
from onyx.agents.agent_search.deep_search_a.main.operations import logger
from onyx.agents.agent_search.deep_search_a.main.operations import (
remove_document_citations,
)
from onyx.agents.agent_search.deep_search_a.main__graph.states import (
from onyx.agents.agent_search.deep_search_a.main.states import (
InitialAnswerUpdate,
)
from onyx.agents.agent_search.models import AgentSearchConfig
@@ -127,7 +127,7 @@ def generate_initial_answer(
)
else:
decomp_answer_results = state.decomp_answer_results
decomp_answer_results = state.sub_question_results
good_qa_list: list[str] = []
@@ -220,7 +220,7 @@ def generate_initial_answer(
answer = cast(str, response)
initial_agent_stats = calculate_initial_agent_stats(
state.decomp_answer_results, state.original_question_retrieval_stats
state.sub_question_results, state.original_question_retrieval_stats
)
logger.debug(

View File

@@ -1,10 +1,10 @@
from datetime import datetime
from onyx.agents.agent_search.deep_search_a.initial__retrieval__subgraph.states import (
from onyx.agents.agent_search.deep_search_a.initial.retrieval.states import (
BaseRawSearchOutput,
)
from onyx.agents.agent_search.deep_search_a.main__graph.operations import logger
from onyx.agents.agent_search.deep_search_a.main__graph.states import (
from onyx.agents.agent_search.deep_search_a.main.operations import logger
from onyx.agents.agent_search.deep_search_a.main.states import (
ExpandedRetrievalUpdate,
)
from onyx.agents.agent_search.shared_graph_utils.models import AgentChunkStats

View File

@@ -1,10 +1,10 @@
from datetime import datetime
from onyx.agents.agent_search.deep_search_a.initial__retrieval_sub_answers__subgraph.states import (
from onyx.agents.agent_search.deep_search_a.initial.initial_answer_generation.states import (
SearchSQState,
)
from onyx.agents.agent_search.deep_search_a.main__graph.operations import logger
from onyx.agents.agent_search.deep_search_a.main__graph.states import (
from onyx.agents.agent_search.deep_search_a.main.operations import logger
from onyx.agents.agent_search.deep_search_a.main.states import (
InitialAnswerQualityUpdate,
)

View File

@@ -1,9 +1,9 @@
from datetime import datetime
from onyx.agents.agent_search.deep_search_a.initial__retrieval_sub_answers__subgraph.states import (
from onyx.agents.agent_search.deep_search_a.initial.initial_answer_generation.states import (
SearchSQState,
)
from onyx.agents.agent_search.deep_search_a.main__graph.states import LoggerUpdate
from onyx.agents.agent_search.deep_search_a.main.states import LoggerUpdate
def retrieval_consolidation(

View File

@@ -3,23 +3,23 @@ from typing import Annotated
from typing import TypedDict
from onyx.agents.agent_search.core_state import CoreState
from onyx.agents.agent_search.deep_search_a.main__graph.states import BaseDecompUpdate
from onyx.agents.agent_search.deep_search_a.main__graph.states import (
from onyx.agents.agent_search.deep_search_a.main.states import BaseDecompUpdate
from onyx.agents.agent_search.deep_search_a.main.states import (
DecompAnswersUpdate,
)
from onyx.agents.agent_search.deep_search_a.main__graph.states import (
from onyx.agents.agent_search.deep_search_a.main.states import (
ExpandedRetrievalUpdate,
)
from onyx.agents.agent_search.deep_search_a.main__graph.states import (
from onyx.agents.agent_search.deep_search_a.main.states import (
ExploratorySearchUpdate,
)
from onyx.agents.agent_search.deep_search_a.main__graph.states import (
from onyx.agents.agent_search.deep_search_a.main.states import (
InitialAnswerQualityUpdate,
)
from onyx.agents.agent_search.deep_search_a.main__graph.states import (
from onyx.agents.agent_search.deep_search_a.main.states import (
InitialAnswerUpdate,
)
from onyx.agents.agent_search.deep_search_a.util__expanded_retrieval__subgraph.models import (
from onyx.agents.agent_search.deep_search_a.shared.expanded_retrieval.models import (
ExpandedRetrievalResult,
)

View File

@@ -2,25 +2,25 @@ from langgraph.graph import END
from langgraph.graph import START
from langgraph.graph import StateGraph
from onyx.agents.agent_search.deep_search_a.initial__retrieval__subgraph.nodes.format_raw_search_results import (
from onyx.agents.agent_search.deep_search_a.initial.retrieval.nodes.format_raw_search_results import (
format_raw_search_results,
)
from onyx.agents.agent_search.deep_search_a.initial__retrieval__subgraph.nodes.generate_raw_search_data import (
from onyx.agents.agent_search.deep_search_a.initial.retrieval.nodes.generate_raw_search_data import (
generate_raw_search_data,
)
from onyx.agents.agent_search.deep_search_a.initial__retrieval__subgraph.nodes.ingest_initial_base_retrieval import (
from onyx.agents.agent_search.deep_search_a.initial.retrieval.nodes.ingest_initial_base_retrieval import (
ingest_initial_base_retrieval,
)
from onyx.agents.agent_search.deep_search_a.initial__retrieval__subgraph.states import (
from onyx.agents.agent_search.deep_search_a.initial.retrieval.states import (
BaseRawSearchInput,
)
from onyx.agents.agent_search.deep_search_a.initial__retrieval__subgraph.states import (
from onyx.agents.agent_search.deep_search_a.initial.retrieval.states import (
BaseRawSearchOutput,
)
from onyx.agents.agent_search.deep_search_a.initial__retrieval__subgraph.states import (
from onyx.agents.agent_search.deep_search_a.initial.retrieval.states import (
BaseRawSearchState,
)
from onyx.agents.agent_search.deep_search_a.util__expanded_retrieval__subgraph.graph_builder import (
from onyx.agents.agent_search.deep_search_a.shared.expanded_retrieval.graph_builder import (
expanded_retrieval_graph_builder,
)

View File

@@ -1,7 +1,7 @@
from onyx.agents.agent_search.deep_search_a.initial__retrieval__subgraph.states import (
from onyx.agents.agent_search.deep_search_a.initial.retrieval.states import (
BaseRawSearchOutput,
)
from onyx.agents.agent_search.deep_search_a.util__expanded_retrieval__subgraph.states import (
from onyx.agents.agent_search.deep_search_a.shared.expanded_retrieval.states import (
ExpandedRetrievalOutput,
)
from onyx.utils.logger import setup_logger

View File

@@ -3,7 +3,7 @@ from typing import cast
from langchain_core.runnables.config import RunnableConfig
from onyx.agents.agent_search.core_state import CoreState
from onyx.agents.agent_search.deep_search_a.util__expanded_retrieval__subgraph.states import (
from onyx.agents.agent_search.deep_search_a.shared.expanded_retrieval.states import (
ExpandedRetrievalInput,
)
from onyx.agents.agent_search.models import AgentSearchConfig

View File

@@ -1,10 +1,10 @@
from datetime import datetime
from onyx.agents.agent_search.deep_search_a.initial__retrieval__subgraph.states import (
from onyx.agents.agent_search.deep_search_a.initial.retrieval.states import (
BaseRawSearchOutput,
)
from onyx.agents.agent_search.deep_search_a.main__graph.operations import logger
from onyx.agents.agent_search.deep_search_a.main__graph.states import (
from onyx.agents.agent_search.deep_search_a.main.operations import logger
from onyx.agents.agent_search.deep_search_a.main.states import (
ExpandedRetrievalUpdate,
)
from onyx.agents.agent_search.shared_graph_utils.models import AgentChunkStats

View File

@@ -1,12 +1,12 @@
from pydantic import BaseModel
from onyx.agents.agent_search.deep_search_a.main__graph.states import (
from onyx.agents.agent_search.deep_search_a.main.states import (
ExpandedRetrievalUpdate,
)
from onyx.agents.agent_search.deep_search_a.util__expanded_retrieval__subgraph.models import (
from onyx.agents.agent_search.deep_search_a.shared.expanded_retrieval.models import (
ExpandedRetrievalResult,
)
from onyx.agents.agent_search.deep_search_a.util__expanded_retrieval__subgraph.states import (
from onyx.agents.agent_search.deep_search_a.shared.expanded_retrieval.states import (
ExpandedRetrievalInput,
)

View File

@@ -3,13 +3,13 @@ from datetime import datetime
from langgraph.types import Send
from onyx.agents.agent_search.deep_search_a.initial__individual_sub_answer__subgraph.states import (
from onyx.agents.agent_search.deep_search_a.initial.individual_sub_answer_generation.states import (
AnswerQuestionInput,
)
from onyx.agents.agent_search.deep_search_a.initial__individual_sub_answer__subgraph.states import (
from onyx.agents.agent_search.deep_search_a.initial.individual_sub_answer_generation.states import (
AnswerQuestionOutput,
)
from onyx.agents.agent_search.deep_search_a.initial__retrieval_sub_answers__subgraph.states import (
from onyx.agents.agent_search.deep_search_a.initial.initial_answer_generation.states import (
SearchSQState,
)
from onyx.agents.agent_search.shared_graph_utils.utils import make_question_id

View File

@@ -2,24 +2,24 @@ from langgraph.graph import END
from langgraph.graph import START
from langgraph.graph import StateGraph
from onyx.agents.agent_search.deep_search_a.initial__consolidate_sub_answers__subgraph.edges import (
from onyx.agents.agent_search.deep_search_a.initial.individual_sub_answer_generation.graph_builder import (
answer_query_graph_builder,
)
from onyx.agents.agent_search.deep_search_a.initial.sub_answer_consolidation.edges import (
parallelize_initial_sub_question_answering,
)
from onyx.agents.agent_search.deep_search_a.initial__consolidate_sub_answers__subgraph.nodes.ingest_initial_sub_answers import (
from onyx.agents.agent_search.deep_search_a.initial.sub_answer_consolidation.nodes.ingest_initial_sub_answers import (
ingest_initial_sub_answers,
)
from onyx.agents.agent_search.deep_search_a.initial__consolidate_sub_answers__subgraph.nodes.initial_decomposition import (
from onyx.agents.agent_search.deep_search_a.initial.sub_answer_consolidation.nodes.initial_decomposition import (
initial_sub_question_creation,
)
from onyx.agents.agent_search.deep_search_a.initial__consolidate_sub_answers__subgraph.states import (
from onyx.agents.agent_search.deep_search_a.initial.sub_answer_consolidation.states import (
SQInput,
)
from onyx.agents.agent_search.deep_search_a.initial__consolidate_sub_answers__subgraph.states import (
from onyx.agents.agent_search.deep_search_a.initial.sub_answer_consolidation.states import (
SQState,
)
from onyx.agents.agent_search.deep_search_a.initial__individual_sub_answer__subgraph.graph_builder import (
answer_query_graph_builder,
)
from onyx.utils.logger import setup_logger
logger = setup_logger()

View File

@@ -1,10 +1,10 @@
from datetime import datetime
from onyx.agents.agent_search.deep_search_a.initial__individual_sub_answer__subgraph.states import (
from onyx.agents.agent_search.deep_search_a.initial.individual_sub_answer_generation.states import (
AnswerQuestionOutput,
)
from onyx.agents.agent_search.deep_search_a.main__graph.operations import logger
from onyx.agents.agent_search.deep_search_a.main__graph.states import (
from onyx.agents.agent_search.deep_search_a.main.operations import logger
from onyx.agents.agent_search.deep_search_a.main.states import (
DecompAnswersUpdate,
)
from onyx.agents.agent_search.shared_graph_utils.operators import (
@@ -38,7 +38,7 @@ def ingest_initial_sub_answers(
documents=dedup_inference_sections(documents, []),
context_documents=dedup_inference_sections(context_documents, []),
cited_docs=dedup_inference_sections(cited_docs, []),
decomp_answer_results=answer_results,
sub_question_results=answer_results,
log_messages=[
f"{now_start} -- Main - Ingest initial processed sub questions, Time taken: {now_end - now_start}"
],

View File

@@ -6,17 +6,17 @@ from langchain_core.messages import HumanMessage
from langchain_core.messages import merge_content
from langchain_core.runnables import RunnableConfig
from onyx.agents.agent_search.deep_search_a.initial__retrieval_sub_answers__subgraph.states import (
from onyx.agents.agent_search.deep_search_a.initial.initial_answer_generation.states import (
SearchSQState,
)
from onyx.agents.agent_search.deep_search_a.main__graph.models import (
from onyx.agents.agent_search.deep_search_a.main.models import (
AgentRefinedMetrics,
)
from onyx.agents.agent_search.deep_search_a.main__graph.operations import (
from onyx.agents.agent_search.deep_search_a.main.operations import (
dispatch_subquestion,
)
from onyx.agents.agent_search.deep_search_a.main__graph.operations import logger
from onyx.agents.agent_search.deep_search_a.main__graph.states import BaseDecompUpdate
from onyx.agents.agent_search.deep_search_a.main.operations import logger
from onyx.agents.agent_search.deep_search_a.main.states import BaseDecompUpdate
from onyx.agents.agent_search.models import AgentSearchConfig
from onyx.agents.agent_search.shared_graph_utils.agent_prompt_ops import (
build_history_prompt,

View File

@@ -1,11 +1,11 @@
from typing import TypedDict
from onyx.agents.agent_search.core_state import CoreState
from onyx.agents.agent_search.deep_search_a.main__graph.states import BaseDecompUpdate
from onyx.agents.agent_search.deep_search_a.main__graph.states import (
from onyx.agents.agent_search.deep_search_a.main.states import BaseDecompUpdate
from onyx.agents.agent_search.deep_search_a.main.states import (
DecompAnswersUpdate,
)
from onyx.agents.agent_search.deep_search_a.main__graph.states import (
from onyx.agents.agent_search.deep_search_a.main.states import (
InitialAnswerUpdate,
)

View File

@@ -6,14 +6,14 @@ from typing import Literal
from langchain_core.runnables import RunnableConfig
from langgraph.types import Send
from onyx.agents.agent_search.deep_search_a.initial__individual_sub_answer__subgraph.states import (
from onyx.agents.agent_search.deep_search_a.initial.individual_sub_answer_generation.states import (
AnswerQuestionInput,
)
from onyx.agents.agent_search.deep_search_a.initial__individual_sub_answer__subgraph.states import (
from onyx.agents.agent_search.deep_search_a.initial.individual_sub_answer_generation.states import (
AnswerQuestionOutput,
)
from onyx.agents.agent_search.deep_search_a.main__graph.states import MainState
from onyx.agents.agent_search.deep_search_a.main__graph.states import (
from onyx.agents.agent_search.deep_search_a.main.states import MainState
from onyx.agents.agent_search.deep_search_a.main.states import (
RequireRefinedAnswerUpdate,
)
from onyx.agents.agent_search.models import AgentSearchConfig

View File

@@ -2,45 +2,45 @@ from langgraph.graph import END
from langgraph.graph import START
from langgraph.graph import StateGraph
from onyx.agents.agent_search.deep_search_a.initial__retrieval_sub_answers__subgraph.graph_builder import (
from onyx.agents.agent_search.deep_search_a.initial.initial_answer_generation.graph_builder import (
initial_search_sq_subgraph_builder,
)
from onyx.agents.agent_search.deep_search_a.main__graph.edges import (
from onyx.agents.agent_search.deep_search_a.main.edges import (
continue_to_refined_answer_or_end,
)
from onyx.agents.agent_search.deep_search_a.main__graph.edges import (
from onyx.agents.agent_search.deep_search_a.main.edges import (
parallelize_refined_sub_question_answering,
)
from onyx.agents.agent_search.deep_search_a.main__graph.edges import (
from onyx.agents.agent_search.deep_search_a.main.edges import (
route_initial_tool_choice,
)
from onyx.agents.agent_search.deep_search_a.main__graph.nodes.agent_logging import (
from onyx.agents.agent_search.deep_search_a.main.nodes.agent_logging import (
agent_logging,
)
from onyx.agents.agent_search.deep_search_a.main__graph.nodes.agent_search_start import (
from onyx.agents.agent_search.deep_search_a.main.nodes.agent_search_start import (
agent_search_start,
)
from onyx.agents.agent_search.deep_search_a.main__graph.nodes.answer_comparison import (
from onyx.agents.agent_search.deep_search_a.main.nodes.answer_comparison import (
answer_comparison,
)
from onyx.agents.agent_search.deep_search_a.main__graph.nodes.entity_term_extraction_llm import (
from onyx.agents.agent_search.deep_search_a.main.nodes.entity_term_extraction_llm import (
entity_term_extraction_llm,
)
from onyx.agents.agent_search.deep_search_a.main__graph.nodes.generate_refined_answer import (
from onyx.agents.agent_search.deep_search_a.main.nodes.generate_refined_answer import (
generate_refined_answer,
)
from onyx.agents.agent_search.deep_search_a.main__graph.nodes.ingest_refined_answers import (
from onyx.agents.agent_search.deep_search_a.main.nodes.ingest_refined_answers import (
ingest_refined_answers,
)
from onyx.agents.agent_search.deep_search_a.main__graph.nodes.refined_answer_decision import (
from onyx.agents.agent_search.deep_search_a.main.nodes.refined_answer_decision import (
refined_answer_decision,
)
from onyx.agents.agent_search.deep_search_a.main__graph.nodes.refined_sub_question_creation import (
from onyx.agents.agent_search.deep_search_a.main.nodes.refined_sub_question_creation import (
refined_sub_question_creation,
)
from onyx.agents.agent_search.deep_search_a.main__graph.states import MainInput
from onyx.agents.agent_search.deep_search_a.main__graph.states import MainState
from onyx.agents.agent_search.deep_search_a.refinement__consolidate_sub_answers__subgraph.graph_builder import (
from onyx.agents.agent_search.deep_search_a.main.states import MainInput
from onyx.agents.agent_search.deep_search_a.main.states import MainState
from onyx.agents.agent_search.deep_search_a.refininement.sub_answer_consolidation.graph_builder import (
answer_refined_query_graph_builder,
)
from onyx.agents.agent_search.orchestration.nodes.basic_use_tool_response import (

View File

@@ -3,13 +3,13 @@ from typing import cast
from langchain_core.runnables import RunnableConfig
from onyx.agents.agent_search.deep_search_a.main__graph.models import (
from onyx.agents.agent_search.deep_search_a.main.models import (
AgentAdditionalMetrics,
)
from onyx.agents.agent_search.deep_search_a.main__graph.models import AgentTimings
from onyx.agents.agent_search.deep_search_a.main__graph.operations import logger
from onyx.agents.agent_search.deep_search_a.main__graph.states import MainOutput
from onyx.agents.agent_search.deep_search_a.main__graph.states import MainState
from onyx.agents.agent_search.deep_search_a.main.models import AgentTimings
from onyx.agents.agent_search.deep_search_a.main.operations import logger
from onyx.agents.agent_search.deep_search_a.main.states import MainOutput
from onyx.agents.agent_search.deep_search_a.main.states import MainState
from onyx.agents.agent_search.models import AgentSearchConfig
from onyx.agents.agent_search.shared_graph_utils.models import CombinedAgentMetrics
from onyx.db.chat import log_agent_metrics
@@ -85,7 +85,7 @@ def agent_logging(state: MainState, config: RunnableConfig) -> MainOutput:
db_session = agent_a_config.db_session
chat_session_id = agent_a_config.chat_session_id
primary_message_id = agent_a_config.message_id
sub_question_answer_results = state.decomp_answer_results
sub_question_answer_results = state.sub_question_results
log_agent_sub_question_results(
db_session=db_session,

View File

@@ -3,9 +3,9 @@ from typing import cast
from langchain_core.runnables import RunnableConfig
from onyx.agents.agent_search.deep_search_a.main__graph.operations import logger
from onyx.agents.agent_search.deep_search_a.main__graph.states import MainState
from onyx.agents.agent_search.deep_search_a.main__graph.states import RoutingDecision
from onyx.agents.agent_search.deep_search_a.main.operations import logger
from onyx.agents.agent_search.deep_search_a.main.states import MainState
from onyx.agents.agent_search.deep_search_a.main.states import RoutingDecision
from onyx.agents.agent_search.models import AgentSearchConfig

View File

@@ -3,7 +3,7 @@ from typing import Literal
from langgraph.types import Command
from onyx.agents.agent_search.deep_search_a.main__graph.states import MainState
from onyx.agents.agent_search.deep_search_a.main.states import MainState
def agent_path_routing(

View File

@@ -3,11 +3,11 @@ from typing import cast
from langchain_core.runnables import RunnableConfig
from onyx.agents.agent_search.deep_search_a.main__graph.operations import logger
from onyx.agents.agent_search.deep_search_a.main__graph.states import (
from onyx.agents.agent_search.deep_search_a.main.operations import logger
from onyx.agents.agent_search.deep_search_a.main.states import (
ExploratorySearchUpdate,
)
from onyx.agents.agent_search.deep_search_a.main__graph.states import MainState
from onyx.agents.agent_search.deep_search_a.main.states import MainState
from onyx.agents.agent_search.models import AgentSearchConfig
from onyx.agents.agent_search.shared_graph_utils.agent_prompt_ops import (
build_history_prompt,

View File

@@ -5,9 +5,9 @@ from langchain_core.callbacks.manager import dispatch_custom_event
from langchain_core.messages import HumanMessage
from langchain_core.runnables import RunnableConfig
from onyx.agents.agent_search.deep_search_a.main__graph.operations import logger
from onyx.agents.agent_search.deep_search_a.main__graph.states import AnswerComparison
from onyx.agents.agent_search.deep_search_a.main__graph.states import MainState
from onyx.agents.agent_search.deep_search_a.main.operations import logger
from onyx.agents.agent_search.deep_search_a.main.states import AnswerComparison
from onyx.agents.agent_search.deep_search_a.main.states import MainState
from onyx.agents.agent_search.models import AgentSearchConfig
from onyx.agents.agent_search.shared_graph_utils.prompts import ANSWER_COMPARISON_PROMPT
from onyx.chat.models import RefinedAnswerImprovement

View File

@@ -7,11 +7,11 @@ from langchain_core.messages import HumanMessage
from langchain_core.messages import merge_content
from langchain_core.runnables import RunnableConfig
from onyx.agents.agent_search.deep_search_a.main__graph.operations import logger
from onyx.agents.agent_search.deep_search_a.main__graph.states import (
from onyx.agents.agent_search.deep_search_a.main.operations import logger
from onyx.agents.agent_search.deep_search_a.main.states import (
InitialAnswerUpdate,
)
from onyx.agents.agent_search.deep_search_a.main__graph.states import MainState
from onyx.agents.agent_search.deep_search_a.main.states import MainState
from onyx.agents.agent_search.models import AgentSearchConfig
from onyx.agents.agent_search.shared_graph_utils.prompts import DIRECT_LLM_PROMPT
from onyx.agents.agent_search.shared_graph_utils.utils import (

View File

@@ -6,11 +6,11 @@ from typing import cast
from langchain_core.messages import HumanMessage
from langchain_core.runnables import RunnableConfig
from onyx.agents.agent_search.deep_search_a.main__graph.operations import logger
from onyx.agents.agent_search.deep_search_a.main__graph.states import (
from onyx.agents.agent_search.deep_search_a.main.operations import logger
from onyx.agents.agent_search.deep_search_a.main.states import (
EntityTermExtractionUpdate,
)
from onyx.agents.agent_search.deep_search_a.main__graph.states import MainState
from onyx.agents.agent_search.deep_search_a.main.states import MainState
from onyx.agents.agent_search.models import AgentSearchConfig
from onyx.agents.agent_search.shared_graph_utils.agent_prompt_ops import (
trim_prompt_piece,

View File

@@ -4,11 +4,11 @@ from typing import cast
from langchain_core.messages import HumanMessage
from langchain_core.runnables import RunnableConfig
from onyx.agents.agent_search.deep_search_a.main__graph.operations import logger
from onyx.agents.agent_search.deep_search_a.main__graph.states import (
from onyx.agents.agent_search.deep_search_a.main.operations import logger
from onyx.agents.agent_search.deep_search_a.main.states import (
InitialAnswerBASEUpdate,
)
from onyx.agents.agent_search.deep_search_a.main__graph.states import MainState
from onyx.agents.agent_search.deep_search_a.main.states import MainState
from onyx.agents.agent_search.models import AgentSearchConfig
from onyx.agents.agent_search.shared_graph_utils.agent_prompt_ops import (
trim_prompt_piece,

View File

@@ -7,16 +7,16 @@ from langchain_core.messages import HumanMessage
from langchain_core.messages import merge_content
from langchain_core.runnables import RunnableConfig
from onyx.agents.agent_search.deep_search_a.main__graph.models import (
from onyx.agents.agent_search.deep_search_a.main.models import (
AgentRefinedMetrics,
)
from onyx.agents.agent_search.deep_search_a.main__graph.operations import get_query_info
from onyx.agents.agent_search.deep_search_a.main__graph.operations import logger
from onyx.agents.agent_search.deep_search_a.main__graph.operations import (
from onyx.agents.agent_search.deep_search_a.main.operations import get_query_info
from onyx.agents.agent_search.deep_search_a.main.operations import logger
from onyx.agents.agent_search.deep_search_a.main.operations import (
remove_document_citations,
)
from onyx.agents.agent_search.deep_search_a.main__graph.states import MainState
from onyx.agents.agent_search.deep_search_a.main__graph.states import (
from onyx.agents.agent_search.deep_search_a.main.states import MainState
from onyx.agents.agent_search.deep_search_a.main.states import (
RefinedAnswerUpdate,
)
from onyx.agents.agent_search.models import AgentSearchConfig
@@ -36,7 +36,7 @@ from onyx.agents.agent_search.shared_graph_utils.prompts import (
REVISED_RAG_PROMPT_NO_SUB_QUESTIONS,
)
from onyx.agents.agent_search.shared_graph_utils.prompts import (
SUB_QUESTION_ANSWER_TEMPLATE,
SUB_QUESTION_ANSWER_TEMPLATE_REVISED,
)
from onyx.agents.agent_search.shared_graph_utils.prompts import UNKNOWN_ANSWER
from onyx.agents.agent_search.shared_graph_utils.utils import (
@@ -119,7 +119,7 @@ def generate_refined_answer(
else:
revision_doc_effectiveness = 10.0
decomp_answer_results = state.decomp_answer_results
decomp_answer_results = state.sub_question_results
# revised_answer_results = state.refined_decomp_answer_results
answered_qa_list: list[str] = []
@@ -141,17 +141,20 @@ def generate_refined_answer(
and len(decomp_answer_result.answer) > 0
and decomp_answer_result.answer != UNKNOWN_ANSWER
):
if question_level == 0:
initial_good_sub_questions.append(decomp_answer_result.question)
sub_question_type = "initial"
else:
new_revised_good_sub_questions.append(decomp_answer_result.question)
sub_question_type = "refined"
answered_qa_list.append(
SUB_QUESTION_ANSWER_TEMPLATE.format(
SUB_QUESTION_ANSWER_TEMPLATE_REVISED.format(
sub_question=decomp_answer_result.question,
sub_answer=decomp_answer_result.answer,
sub_question_nr=sub_question_nr,
sub_question_type=sub_question_type,
)
)
if question_level == 0:
initial_good_sub_questions.append(decomp_answer_result.question)
else:
new_revised_good_sub_questions.append(decomp_answer_result.question)
sub_question_nr += 1

View File

@@ -1,10 +1,10 @@
from datetime import datetime
from onyx.agents.agent_search.deep_search_a.initial__individual_sub_answer__subgraph.states import (
from onyx.agents.agent_search.deep_search_a.initial.individual_sub_answer_generation.states import (
AnswerQuestionOutput,
)
from onyx.agents.agent_search.deep_search_a.main__graph.operations import logger
from onyx.agents.agent_search.deep_search_a.main__graph.states import (
from onyx.agents.agent_search.deep_search_a.main.operations import logger
from onyx.agents.agent_search.deep_search_a.main.states import (
DecompAnswersUpdate,
)
from onyx.agents.agent_search.shared_graph_utils.operators import (
@@ -34,7 +34,7 @@ def ingest_refined_answers(
# Deduping is done by the documents operator for the main graph
# so we might not need to dedup here
documents=dedup_inference_sections(documents, []),
decomp_answer_results=answer_results,
sub_question_results=answer_results,
log_messages=[
f"{now_start} -- Main - Ingest refined answers, Time taken: {now_end - now_start}"
],

View File

@@ -3,9 +3,9 @@ from typing import cast
from langchain_core.runnables import RunnableConfig
from onyx.agents.agent_search.deep_search_a.main__graph.operations import logger
from onyx.agents.agent_search.deep_search_a.main__graph.states import MainState
from onyx.agents.agent_search.deep_search_a.main__graph.states import (
from onyx.agents.agent_search.deep_search_a.main.operations import logger
from onyx.agents.agent_search.deep_search_a.main.states import MainState
from onyx.agents.agent_search.deep_search_a.main.states import (
RequireRefinedAnswerUpdate,
)
from onyx.agents.agent_search.models import AgentSearchConfig

View File

@@ -6,17 +6,17 @@ from langchain_core.messages import HumanMessage
from langchain_core.messages import merge_content
from langchain_core.runnables import RunnableConfig
from onyx.agents.agent_search.deep_search_a.main__graph.models import (
from onyx.agents.agent_search.deep_search_a.main.models import (
FollowUpSubQuestion,
)
from onyx.agents.agent_search.deep_search_a.main__graph.operations import (
from onyx.agents.agent_search.deep_search_a.main.operations import (
dispatch_subquestion,
)
from onyx.agents.agent_search.deep_search_a.main__graph.operations import logger
from onyx.agents.agent_search.deep_search_a.main__graph.states import (
from onyx.agents.agent_search.deep_search_a.main.operations import logger
from onyx.agents.agent_search.deep_search_a.main.states import (
FollowUpSubQuestionsUpdate,
)
from onyx.agents.agent_search.deep_search_a.main__graph.states import MainState
from onyx.agents.agent_search.deep_search_a.main.states import MainState
from onyx.agents.agent_search.models import AgentSearchConfig
from onyx.agents.agent_search.shared_graph_utils.agent_prompt_ops import (
build_history_prompt,
@@ -64,7 +64,7 @@ def refined_sub_question_creation(
entity_retlation_term_extractions
)
initial_question_answers = state.decomp_answer_results
initial_question_answers = state.sub_question_results
addressed_question_list = [
x.question for x in initial_question_answers if x.verified_high_quality

View File

@@ -6,14 +6,14 @@ from typing import TypedDict
from pydantic import BaseModel
from onyx.agents.agent_search.core_state import CoreState
from onyx.agents.agent_search.deep_search_a.main__graph.models import AgentBaseMetrics
from onyx.agents.agent_search.deep_search_a.main__graph.models import (
from onyx.agents.agent_search.deep_search_a.main.models import AgentBaseMetrics
from onyx.agents.agent_search.deep_search_a.main.models import (
AgentRefinedMetrics,
)
from onyx.agents.agent_search.deep_search_a.main__graph.models import (
from onyx.agents.agent_search.deep_search_a.main.models import (
FollowUpSubQuestion,
)
from onyx.agents.agent_search.deep_search_a.util__expanded_retrieval__subgraph.models import (
from onyx.agents.agent_search.deep_search_a.shared.expanded_retrieval.models import (
ExpandedRetrievalResult,
)
from onyx.agents.agent_search.orchestration.states import ToolCallUpdate
@@ -107,7 +107,7 @@ class DecompAnswersUpdate(LoggerUpdate):
cited_docs: Annotated[
list[InferenceSection], dedup_inference_sections
] = [] # cited docs from sub-answers are used for answer context
decomp_answer_results: Annotated[
sub_question_results: Annotated[
list[QuestionAnswerResults], dedup_question_answer_results
] = []

View File

@@ -3,10 +3,10 @@ from datetime import datetime
from langgraph.types import Send
from onyx.agents.agent_search.deep_search_a.initial__individual_sub_answer__subgraph.states import (
from onyx.agents.agent_search.deep_search_a.initial.individual_sub_answer_generation.states import (
AnswerQuestionInput,
)
from onyx.agents.agent_search.deep_search_a.util__expanded_retrieval__subgraph.states import (
from onyx.agents.agent_search.deep_search_a.shared.expanded_retrieval.states import (
ExpandedRetrievalInput,
)
from onyx.utils.logger import setup_logger

View File

@@ -2,31 +2,31 @@ from langgraph.graph import END
from langgraph.graph import START
from langgraph.graph import StateGraph
from onyx.agents.agent_search.deep_search_a.initial__individual_sub_answer__subgraph.nodes.answer_check import (
from onyx.agents.agent_search.deep_search_a.initial.individual_sub_answer_generation.nodes.answer_check import (
answer_check,
)
from onyx.agents.agent_search.deep_search_a.initial__individual_sub_answer__subgraph.nodes.answer_generation import (
from onyx.agents.agent_search.deep_search_a.initial.individual_sub_answer_generation.nodes.answer_generation import (
answer_generation,
)
from onyx.agents.agent_search.deep_search_a.initial__individual_sub_answer__subgraph.nodes.format_answer import (
from onyx.agents.agent_search.deep_search_a.initial.individual_sub_answer_generation.nodes.format_answer import (
format_answer,
)
from onyx.agents.agent_search.deep_search_a.initial__individual_sub_answer__subgraph.nodes.ingest_retrieval import (
from onyx.agents.agent_search.deep_search_a.initial.individual_sub_answer_generation.nodes.ingest_retrieval import (
ingest_retrieval,
)
from onyx.agents.agent_search.deep_search_a.initial__individual_sub_answer__subgraph.states import (
from onyx.agents.agent_search.deep_search_a.initial.individual_sub_answer_generation.states import (
AnswerQuestionInput,
)
from onyx.agents.agent_search.deep_search_a.initial__individual_sub_answer__subgraph.states import (
from onyx.agents.agent_search.deep_search_a.initial.individual_sub_answer_generation.states import (
AnswerQuestionOutput,
)
from onyx.agents.agent_search.deep_search_a.initial__individual_sub_answer__subgraph.states import (
from onyx.agents.agent_search.deep_search_a.initial.individual_sub_answer_generation.states import (
AnswerQuestionState,
)
from onyx.agents.agent_search.deep_search_a.refinement__consolidate_sub_answers__subgraph.edges import (
from onyx.agents.agent_search.deep_search_a.refininement.sub_answer_consolidation.edges import (
send_to_expanded_refined_retrieval,
)
from onyx.agents.agent_search.deep_search_a.util__expanded_retrieval__subgraph.graph_builder import (
from onyx.agents.agent_search.deep_search_a.shared.expanded_retrieval.graph_builder import (
expanded_retrieval_graph_builder,
)
from onyx.utils.logger import setup_logger

View File

@@ -4,10 +4,10 @@ from typing import cast
from langchain_core.runnables.config import RunnableConfig
from langgraph.types import Send
from onyx.agents.agent_search.deep_search_a.util__expanded_retrieval__subgraph.states import (
from onyx.agents.agent_search.deep_search_a.shared.expanded_retrieval.states import (
ExpandedRetrievalState,
)
from onyx.agents.agent_search.deep_search_a.util__expanded_retrieval__subgraph.states import (
from onyx.agents.agent_search.deep_search_a.shared.expanded_retrieval.states import (
RetrievalInput,
)
from onyx.agents.agent_search.models import AgentSearchConfig

View File

@@ -2,37 +2,37 @@ from langgraph.graph import END
from langgraph.graph import START
from langgraph.graph import StateGraph
from onyx.agents.agent_search.deep_search_a.util__expanded_retrieval__subgraph.edges import (
from onyx.agents.agent_search.deep_search_a.shared.expanded_retrieval.edges import (
parallel_retrieval_edge,
)
from onyx.agents.agent_search.deep_search_a.util__expanded_retrieval__subgraph.nodes.doc_reranking import (
from onyx.agents.agent_search.deep_search_a.shared.expanded_retrieval.nodes.doc_reranking import (
doc_reranking,
)
from onyx.agents.agent_search.deep_search_a.util__expanded_retrieval__subgraph.nodes.doc_retrieval import (
from onyx.agents.agent_search.deep_search_a.shared.expanded_retrieval.nodes.doc_retrieval import (
doc_retrieval,
)
from onyx.agents.agent_search.deep_search_a.util__expanded_retrieval__subgraph.nodes.doc_verification import (
from onyx.agents.agent_search.deep_search_a.shared.expanded_retrieval.nodes.doc_verification import (
doc_verification,
)
from onyx.agents.agent_search.deep_search_a.util__expanded_retrieval__subgraph.nodes.dummy import (
from onyx.agents.agent_search.deep_search_a.shared.expanded_retrieval.nodes.dummy import (
dummy,
)
from onyx.agents.agent_search.deep_search_a.util__expanded_retrieval__subgraph.nodes.expand_queries import (
from onyx.agents.agent_search.deep_search_a.shared.expanded_retrieval.nodes.expand_queries import (
expand_queries,
)
from onyx.agents.agent_search.deep_search_a.util__expanded_retrieval__subgraph.nodes.format_results import (
from onyx.agents.agent_search.deep_search_a.shared.expanded_retrieval.nodes.format_results import (
format_results,
)
from onyx.agents.agent_search.deep_search_a.util__expanded_retrieval__subgraph.nodes.verification_kickoff import (
from onyx.agents.agent_search.deep_search_a.shared.expanded_retrieval.nodes.verification_kickoff import (
verification_kickoff,
)
from onyx.agents.agent_search.deep_search_a.util__expanded_retrieval__subgraph.states import (
from onyx.agents.agent_search.deep_search_a.shared.expanded_retrieval.states import (
ExpandedRetrievalInput,
)
from onyx.agents.agent_search.deep_search_a.util__expanded_retrieval__subgraph.states import (
from onyx.agents.agent_search.deep_search_a.shared.expanded_retrieval.states import (
ExpandedRetrievalOutput,
)
from onyx.agents.agent_search.deep_search_a.util__expanded_retrieval__subgraph.states import (
from onyx.agents.agent_search.deep_search_a.shared.expanded_retrieval.states import (
ExpandedRetrievalState,
)
from onyx.agents.agent_search.shared_graph_utils.utils import get_test_config

View File

@@ -3,13 +3,13 @@ from typing import cast
from langchain_core.runnables.config import RunnableConfig
from onyx.agents.agent_search.deep_search_a.util__expanded_retrieval__subgraph.operations import (
from onyx.agents.agent_search.deep_search_a.shared.expanded_retrieval.operations import (
logger,
)
from onyx.agents.agent_search.deep_search_a.util__expanded_retrieval__subgraph.states import (
from onyx.agents.agent_search.deep_search_a.shared.expanded_retrieval.states import (
DocRerankingUpdate,
)
from onyx.agents.agent_search.deep_search_a.util__expanded_retrieval__subgraph.states import (
from onyx.agents.agent_search.deep_search_a.shared.expanded_retrieval.states import (
ExpandedRetrievalState,
)
from onyx.agents.agent_search.models import AgentSearchConfig

View File

@@ -3,13 +3,13 @@ from typing import cast
from langchain_core.runnables.config import RunnableConfig
from onyx.agents.agent_search.deep_search_a.util__expanded_retrieval__subgraph.operations import (
from onyx.agents.agent_search.deep_search_a.shared.expanded_retrieval.operations import (
logger,
)
from onyx.agents.agent_search.deep_search_a.util__expanded_retrieval__subgraph.states import (
from onyx.agents.agent_search.deep_search_a.shared.expanded_retrieval.states import (
DocRetrievalUpdate,
)
from onyx.agents.agent_search.deep_search_a.util__expanded_retrieval__subgraph.states import (
from onyx.agents.agent_search.deep_search_a.shared.expanded_retrieval.states import (
RetrievalInput,
)
from onyx.agents.agent_search.models import AgentSearchConfig

View File

@@ -3,10 +3,10 @@ from typing import cast
from langchain_core.messages import HumanMessage
from langchain_core.runnables.config import RunnableConfig
from onyx.agents.agent_search.deep_search_a.util__expanded_retrieval__subgraph.states import (
from onyx.agents.agent_search.deep_search_a.shared.expanded_retrieval.states import (
DocVerificationInput,
)
from onyx.agents.agent_search.deep_search_a.util__expanded_retrieval__subgraph.states import (
from onyx.agents.agent_search.deep_search_a.shared.expanded_retrieval.states import (
DocVerificationUpdate,
)
from onyx.agents.agent_search.models import AgentSearchConfig

View File

@@ -1,9 +1,9 @@
from langchain_core.runnables.config import RunnableConfig
from onyx.agents.agent_search.deep_search_a.util__expanded_retrieval__subgraph.states import (
from onyx.agents.agent_search.deep_search_a.shared.expanded_retrieval.states import (
ExpandedRetrievalState,
)
from onyx.agents.agent_search.deep_search_a.util__expanded_retrieval__subgraph.states import (
from onyx.agents.agent_search.deep_search_a.shared.expanded_retrieval.states import (
QueryExpansionUpdate,
)

View File

@@ -5,16 +5,16 @@ from langchain_core.messages import HumanMessage
from langchain_core.messages import merge_message_runs
from langchain_core.runnables.config import RunnableConfig
from onyx.agents.agent_search.deep_search_a.util__expanded_retrieval__subgraph.operations import (
from onyx.agents.agent_search.deep_search_a.shared.expanded_retrieval.operations import (
dispatch_subquery,
)
from onyx.agents.agent_search.deep_search_a.util__expanded_retrieval__subgraph.operations import (
from onyx.agents.agent_search.deep_search_a.shared.expanded_retrieval.operations import (
logger,
)
from onyx.agents.agent_search.deep_search_a.util__expanded_retrieval__subgraph.states import (
from onyx.agents.agent_search.deep_search_a.shared.expanded_retrieval.states import (
ExpandedRetrievalInput,
)
from onyx.agents.agent_search.deep_search_a.util__expanded_retrieval__subgraph.states import (
from onyx.agents.agent_search.deep_search_a.shared.expanded_retrieval.states import (
QueryExpansionUpdate,
)
from onyx.agents.agent_search.models import AgentSearchConfig

View File

@@ -3,16 +3,16 @@ from typing import cast
from langchain_core.callbacks.manager import dispatch_custom_event
from langchain_core.runnables.config import RunnableConfig
from onyx.agents.agent_search.deep_search_a.util__expanded_retrieval__subgraph.models import (
from onyx.agents.agent_search.deep_search_a.shared.expanded_retrieval.models import (
ExpandedRetrievalResult,
)
from onyx.agents.agent_search.deep_search_a.util__expanded_retrieval__subgraph.operations import (
from onyx.agents.agent_search.deep_search_a.shared.expanded_retrieval.operations import (
calculate_sub_question_retrieval_stats,
)
from onyx.agents.agent_search.deep_search_a.util__expanded_retrieval__subgraph.states import (
from onyx.agents.agent_search.deep_search_a.shared.expanded_retrieval.states import (
ExpandedRetrievalState,
)
from onyx.agents.agent_search.deep_search_a.util__expanded_retrieval__subgraph.states import (
from onyx.agents.agent_search.deep_search_a.shared.expanded_retrieval.states import (
ExpandedRetrievalUpdate,
)
from onyx.agents.agent_search.models import AgentSearchConfig

View File

@@ -4,10 +4,10 @@ from langchain_core.runnables.config import RunnableConfig
from langgraph.types import Command
from langgraph.types import Send
from onyx.agents.agent_search.deep_search_a.util__expanded_retrieval__subgraph.states import (
from onyx.agents.agent_search.deep_search_a.shared.expanded_retrieval.states import (
DocVerificationInput,
)
from onyx.agents.agent_search.deep_search_a.util__expanded_retrieval__subgraph.states import (
from onyx.agents.agent_search.deep_search_a.shared.expanded_retrieval.states import (
ExpandedRetrievalState,
)

View File

@@ -4,7 +4,7 @@ from typing import Annotated
from pydantic import BaseModel
from onyx.agents.agent_search.core_state import SubgraphCoreState
from onyx.agents.agent_search.deep_search_a.util__expanded_retrieval__subgraph.models import (
from onyx.agents.agent_search.deep_search_a.shared.expanded_retrieval.models import (
ExpandedRetrievalResult,
)
from onyx.agents.agent_search.shared_graph_utils.models import QueryResult

View File

@@ -9,10 +9,10 @@ from langgraph.graph.state import CompiledStateGraph
from onyx.agents.agent_search.basic.graph_builder import basic_graph_builder
from onyx.agents.agent_search.basic.states import BasicInput
from onyx.agents.agent_search.deep_search_a.main__graph.graph_builder import (
from onyx.agents.agent_search.deep_search_a.main.graph_builder import (
main_graph_builder as main_graph_builder_a,
)
from onyx.agents.agent_search.deep_search_a.main__graph.states import (
from onyx.agents.agent_search.deep_search_a.main.states import (
MainInput as MainInput_a,
)
from onyx.agents.agent_search.models import AgentSearchConfig

View File

@@ -2,14 +2,14 @@ from typing import Literal
from pydantic import BaseModel
from onyx.agents.agent_search.deep_search_a.main__graph.models import (
from onyx.agents.agent_search.deep_search_a.main.models import (
AgentAdditionalMetrics,
)
from onyx.agents.agent_search.deep_search_a.main__graph.models import AgentBaseMetrics
from onyx.agents.agent_search.deep_search_a.main__graph.models import (
from onyx.agents.agent_search.deep_search_a.main.models import AgentBaseMetrics
from onyx.agents.agent_search.deep_search_a.main.models import (
AgentRefinedMetrics,
)
from onyx.agents.agent_search.deep_search_a.main__graph.models import AgentTimings
from onyx.agents.agent_search.deep_search_a.main.models import AgentTimings
from onyx.context.search.models import InferenceSection
from onyx.tools.models import SearchQueryInfo

View File

@@ -1,9 +1,14 @@
# The prompts for the agentic framework:
UNKNOWN_ANSWER = "I do not have enough information to answer this question."
NO_RECOVERED_DOCS = "No relevant information recovered"
DATE_PROMPT = """Today is {date}.\n\n"""
HISTORY_PROMPT = """\n
For more context, here is the history of the conversation so far that preceded this question:
\n ------- \n
@@ -11,6 +16,7 @@ For more context, here is the history of the conversation so far that preceded t
\n ------- \n\n
"""
REWRITE_PROMPT_MULTI_ORIGINAL = """ \n
Please convert an initial user question into a 2-3 more appropriate short and pointed search queries for retrivel from a
document store. Particularly, try to think about resolving ambiguities and make the search queries more specific,
@@ -26,6 +32,7 @@ REWRITE_PROMPT_MULTI_ORIGINAL = """ \n
...
queries: """
REWRITE_PROMPT_MULTI = """ \n
Please create a list of 2-3 sample documents that could answer an original question. Each document
should be about as long as the original question. \n
@@ -35,6 +42,7 @@ REWRITE_PROMPT_MULTI = """ \n
\n ------- \n
Formulate the sample documents separated by '--' (Do not say 'Document 1: ...', just write the text): """
# The prompt is only used if there is no persona prompt, so the placeholder is ''
BASE_RAG_PROMPT = (
""" \n
@@ -48,6 +56,7 @@ BASE_RAG_PROMPT = (
question based on the context, say """
+ f'"{UNKNOWN_ANSWER}"'
+ """.
It is a matter of life and death that you do NOT use your internal knowledge, just the provided`
information!
@@ -63,6 +72,7 @@ BASE_RAG_PROMPT = (
"""
)
BASE_RAG_PROMPT_v2 = (
""" \n
{date_prompt}
@@ -84,8 +94,10 @@ It is important that the citation is close to the information it supports.
Proper citations are very important to the user!\n\n\n
For your general information, here is the ultimate motivation:
\n--\n {original_question} \n--\n
\n\n
\n--\n
{original_question}
\n--\n
\n
And here is the actual question I want you to answer based on the context above (with the motivation in mind):
\n--\n {question} \n--\n
@@ -97,11 +109,13 @@ Answer:
"""
)
SUB_CHECK_YES = "yes"
SUB_CHECK_NO = "no"
SUB_CHECK_PROMPT = (
"""
"""\n
Your task is to see whether a given answer addresses a given question.
Please do not use any internal knowledge you may have - just focus on whether the answer
as given seems to largely address the question as given, or at least addresses part of the question.
@@ -135,23 +149,32 @@ BASE_CHECK_PROMPT = """ \n
\n ------- \n
Please answer with yes or no:"""
VERIFIER_PROMPT = """
You are supposed to judge whether a document text contains data or information that is potentially relevant
for a question. It does not have to be fully relevant, but check whether it has some information that
would help - possibly in conjunction with other documents - to address the question.
Be careful that you do not use a document where you are not sure whether the text applies to the objects
or entities that are relevant for the question. For example, a book about chess could have long passage
discussing the psychology of chess without - within the passage - mentioning chess. If now a question
is asked about the psychology of football, one could be tempted to use the document as it does discuss
psychology in sports. However, it is NOT about football and should not be deemed relevant. Please
consider this logic.
Here is a document text that you can take as a fact:
--
DOCUMENT INFORMATION:
\n ------- \n
{document_content}
--
\n ------- \n
Do you think that this document text is useful and relevant to answer the following question?
--
QUESTION:
\n ------- \n
{question}
--
\n ------- \n
Please answer with 'yes' or 'no':
@@ -201,8 +224,14 @@ MODIFIED_RAG_PROMPT = (
Pay also particular attention to the sub-questions and their answers, at least it may enrich the answer.
Again, only use the provided context and do not use your internal knowledge!
\nQuestion: {question}
\nContext: {combined_context} \n
\nQuestion:
\n ------- \n
{question}
\n ------- \n
\nContext:
\n ------- \n
{combined_context}
\n ------- \n
Answer:"""
)
@@ -230,7 +259,6 @@ ERT_INFORMED_DEEP_DECOMPOSE_PROMPT = """ \n
Additional Guidelines:
- The sub-questions should be specific to the question and provide richer context for the question,
resolve ambiguities, or address shortcoming of the initial answer
- Each sub-question - when answered - should be relevant for the answer to the original question
- The sub-questions should be free from comparisons, ambiguities,judgements, aggregations, or any
other complications that may require extra context.
@@ -240,7 +268,8 @@ ERT_INFORMED_DEEP_DECOMPOSE_PROMPT = """ \n
- initial question: "What is the capital of France?"
- bad sub-question: "What is the name of the river there?"
- good sub-question: "What is the name of the river that flows through Paris?"
- For each sub-question, please provide a short explanation for why it is a good sub-question. So
For each sub-question, please provide a short explanation for why it is a good sub-question. So
generate a list of dictionaries with the following format:
[{{"sub_question": <sub-question>, "explanation": <explanation>, "search_term": <rewrite the
sub-question using as a search phrase for the document store>}}, ...]
@@ -284,6 +313,7 @@ ERT_INFORMED_DEEP_DECOMPOSE_PROMPT = """ \n
"search_term": <rewrite the sub-question using as a search phrase for the document store>}},
...]}} """
DOC_INFORMED_DEEP_DECOMPOSE_PROMPT = """ \n
An initial user question needs to be answered. An initial answer has been provided but it wasn't quite
good enough. Also, some sub-questions had been answered and this information has been used to provide
@@ -370,6 +400,7 @@ DOC_INFORMED_DEEP_DECOMPOSE_PROMPT = """ \n
...
"""
DEEP_DECOMPOSE_PROMPT_WITH_ENTITIES = """ \n
An initial user question needs to be answered. An initial answer has been provided but it wasn't quite
good enough. Also, some sub-questions had been answered and this information has been used to provide
@@ -525,10 +556,14 @@ SUB_QUESTION_EXPLANATION_RANKER_PROMPT = """-------
motivation comes first.
Here is the original question:
\n\n {original_question} \n\n
\n\n Here is the list of sub-question motivations:
\n\n {sub_question_explanations} \n\n
----------------
\n -------\n
{original_question}
\n -------\n
Here is the list of sub-question motivations:
\n -------\n
{sub_question_explanations}
\n -------\n
Please think step by step and then generate the ranked list of motivations.
@@ -554,9 +589,9 @@ answer the original question. The purpose for this decomposition may be to
If you think that a decomposition is not needed or helpful, please just return an empty string. That is ok too.
Here is the initial question:
-------
\n -------\n
{question}
-------
\n -------\n
{history}
Please formulate your answer as a newline-separated list of questions like so:
@@ -566,6 +601,7 @@ Please formulate your answer as a newline-separated list of questions like so:
Answer:"""
INITIAL_DECOMPOSITION_PROMPT_QUESTIONS_AFTER_SEARCH = """
If you think it is helpful, please decompose an initial user question into no more than 3 appropriate sub-questions that help to
answer the original question. The purpose for this decomposition may be to
@@ -589,14 +625,14 @@ disambiguations are most important!
2) If you think that a decomposition is not needed or helpful, please just return an empty string. That is very much ok too.
Here are the sample docs to give you some context:
-------
\n -------\n
{sample_doc_str}
-------
\n -------\n
And here is the initial question that you should think about decomposing:
-------
\n -------\n
{question}
-------
\n -------\n
{history}
@@ -608,6 +644,7 @@ Please formulate your answer as a newline-separated list of questions like so:
Answer:"""
INITIAL_DECOMPOSITION_PROMPT = """ \n
Please decompose an initial user question into 2 or 3 appropriate sub-questions that help to
answer the original question. The purpose for this decomposition is to isolate individual entities
@@ -631,6 +668,7 @@ INITIAL_DECOMPOSITION_PROMPT = """ \n
Answer:
"""
INITIAL_RAG_BASE_PROMPT = (
""" \n
You are an assistant for question-answering tasks. Use the information provided below - and only the
@@ -653,7 +691,8 @@ Here is the contextual information from the document store:
{context} \n\n\n
\n -------\n
And here is the question I want you to answer based on the context above (with the motivation in mind):
\n--\n {question} \n--\n
\n -------\n
{question} \n-------\n
Answer:"""
)
@@ -673,9 +712,9 @@ the 'LLM' option.
{history}
Here is the initial question:
-------
\n -------\n
{question}
-------
\n -------\n
Please decide whether to use the agent search or the LLM to answer the question. Choose from two choices,
'research' or 'LLM'.
@@ -698,14 +737,14 @@ you know the answer/can handle the request, you should choose the 'LLM' option.
{history}
Here is the initial question:
-------
\n -------\n
{question}
-------
\n -------\n
Here is the sample of documents that were retrieved from a document store:
-------
\n -------\n
{sample_doc_str}
-------
\n -------\n
Please decide whether to use the agent search ('research') or the LLM to answer the question. Choose from two choices,
'research' or 'LLM'.
@@ -725,16 +764,34 @@ You are an assistant for question-answering tasks. Here is more information abou
\n-------\n
"""
SUB_QUESTION_ANSWER_TEMPLATE = """
SUB_QUESTION_ANSWER_TEMPLATE = """\n
Sub-Question: Q{sub_question_nr}\n Sub-Question:\n - \n{sub_question}\n --\nAnswer:\n -\n {sub_answer}\n\n
"""
SUB_QUESTION_ANSWER_TEMPLATE_REVISED = """
Sub-Question: Q{sub_question_nr}\n Type: {level_type}\n Sub-Question:\n
SUB_QUESTION_ANSWER_TEMPLATE_REVISED = """\n
Sub-Question: Q{sub_question_nr}\n
Type:
\n----\n
{sub_question_type}
\n----\n
Sub-Question:
\n----\n
{sub_question}
\n----\n
\nAnswer:
\n----\n
{sub_answer}
\n----\n
\n
"""
SUB_QUESTION_ANSWER_TEMPLATE_REVISED = """\n
Sub-Question: Q{sub_question_nr}\n Type: {sub_question_type}\n Sub-Question:\n
- \n{sub_question}\n --\nAnswer:\n -\n {sub_answer}\n\n
"""
SUB_QUESTION_SEARCH_RESULTS_TEMPLATE = """
SUB_QUESTION_SEARCH_RESULTS_TEMPLATE = """\n
Sub-Question: Q{sub_question_nr}\n Sub-Question:\n - \n{sub_question}\n --\nRelevant Documents:\n
-\n {formatted_sub_question_docs}\n\n
"""
@@ -770,8 +827,10 @@ Try to keep your answer concise. But also highlight uncertainties you may have s
or assumptions you made.
Here is the contextual information:
\n-------\n
---------------
*Answered Sub-questions (these should really help to organize your thoughts):
\n-------\n
{answered_sub_questions}
And here are relevant document information that supports the sub-question answers, or that are relevant for the actual question:\n
@@ -781,9 +840,9 @@ And here are relevant document information that supports the sub-question answer
\n-------\n
\n
And here is the main question I want you to answer based on the information above:
\n--\n
\n-------\n
{question}
\n--\n\n
\n-------\n\n
Answer:"""
)
@@ -792,9 +851,9 @@ DIRECT_LLM_PROMPT = """ \n
{persona_specification}
Please answer the following question/address the request:
\n--\n
\n-------\n
{question}
\n--\n\n
\n-------\n\n
Answer:"""
INITIAL_RAG_PROMPT = (
@@ -804,8 +863,7 @@ INITIAL_RAG_PROMPT = (
Use the information provided below - and only the provided information - to answer the provided main question.
The information provided below consists of:
1) a number of answered sub-questions - these are very important to help you organize your thoughts and your
answer
1) a number of answered sub-questions - these are very important to help you organize your thoughts and your answer
2) a number of documents that deemed relevant for the question.
{history}
@@ -832,20 +890,22 @@ Try to keep your answer concise. But also highlight uncertainties you may have s
or assumptions you made.
Here is the contextual information:
\n-------\n
---------------
*Answered Sub-questions (these should really matter!):
\n-------\n
{answered_sub_questions}
\n-------\n
And here are relevant document information that support the sub-question answers, or that are relevant for the actual question:\n
{relevant_docs}
\n-------\n
\n
{relevant_docs}
\n-------\n
And here is the question I want you to answer based on the information above:
\n--\n
\n-------\n
{question}
\n--\n\n
\n-------\n\n
Please keep your answer brief and concise, and focus on facts and data.
@@ -854,7 +914,8 @@ Answer:"""
# sub_question_answer_str is empty
INITIAL_RAG_PROMPT_NO_SUB_QUESTIONS = (
"""{answered_sub_questions}
"""\n
{answered_sub_questions}
{persona_specification}
{date_prompt}
@@ -885,9 +946,9 @@ Here are is the relevant context information:
\n-------\n
And here is the question I want you to answer based on the context above
\n--\n
\n-------\n
{question}
\n--\n
\n-------\n
Please keep your answer brief and concise, and focus on facts and data.
@@ -904,11 +965,11 @@ Use the information provided below - and only the provided information - to writ
The information provided below consists of:
1) an initial answer that was given but found to be lacking in some way.
2) a number of answered sub-questions - these are very important(!) and definitely should help you to answer
the main question. Note that the sub-questions have a type, 'initial' and 'revised'. The 'initial'
ones were available for the initial answer, and the 'revised' were not, they are new. So please use
the 'revised' sub-questions in particular to update/extend/correct the initial answer!
the main question. Note that the sub-questions have a type, 'initial' and 'refined'. The 'initial'
ones were available for the creation of the initial answer, but the 'refined' were not, they are new. So please use
the 'refined' sub-questions in particular to update/extend/correct/enrich the initial answer and to add
more details/new facts!
3) a number of documents that were deemed relevant for the question. This the is the context that you use largely for
citations (see below). So consider the answers to the sub-questions as guidelines to construct your new answer, but
@@ -942,25 +1003,27 @@ Try to keep your answer concise. But also highlight uncertainties you may have s
or assumptions you made.
Here is the contextual information:
\n-------\n
---------------
*Initial Answer that was found to be lacking:
\n-------\n
{initial_answer}
\n-------\n
*Answered Sub-questions (these should really help you to research your answer! They also contain questions/answers
that were not available when the original answer was constructed):
{answered_sub_questions}
And here are the relevant documents that support the sub-question answers, and that are relevant for the actual question:\n
{relevant_docs}
\n-------\n
{relevant_docs}
\n-------\n
\n
Lastly, here is the main question I want you to answer based on the information above:
\n--\n
\n-------\n
{question}
\n--\n\n
\n-------\n
Please keep your answer brief and concise, and focus on facts and data.
@@ -969,7 +1032,8 @@ Answer:"""
# sub_question_answer_str is empty
REVISED_RAG_PROMPT_NO_SUB_QUESTIONS = (
"""{answered_sub_questions}\n
"""\n
{answered_sub_questions}\n
{persona_specification}
{date_prompt}
Use the information provided below - and only the
@@ -982,9 +1046,9 @@ The information provided below consists of:
It is critical that you provide proper] inline citations to documents in the format [[D1]](), [[D2]](), [[D3]](), etc!
It is important that the citation is close to the information it supports. If you have multiple
citations, please cite for example as [[D1]]()[[D3]](), or [[D2]]()[[D4]](), etc. Citations are very important for the user!\n\n
\n-------\n
{history}
\n-------\n
IMPORTANT RULES:
- If you cannot reliably answer the question solely using the provided information, say that you cannot reliably answer.
You may give some additional facts you learned, but do not try to invent an answer.
@@ -1000,21 +1064,22 @@ Try to keep your answer concise. But also highlight uncertainties you may have s
or assumptions you made.
Here is the contextual information:
\n-------\n
---------------
*Initial Answer that was found to be lacking:
\n-------\n
{initial_answer}
\n-------\n
And here are relevant document information that support the sub-question answers, or that are relevant for the actual question:\n
\n-------\n
{relevant_docs}
\n-------\n
\n
Lastly, here is the question I want you to answer based on the information above:
\n--\n
\n-------\n
{question}
\n--\n\n
\n-------\n\n
Please keep your answer brief and concise, and focus on facts and data.
Answer:"""
@@ -1056,53 +1121,57 @@ Please format your answer as a json object in the following format:
}}
}}
"""
ANSWER_COMPARISON_PROMPT = """
For the given question, please compare the initial answer and the refined answer and determine if
the refined answer is substantially better than the initial answer. Better could mean:
the refined answer is substantially better than the initial answer, not just a bit better. Better could mean:
- additional information
- more comprehensive information
- more concise information
- more structured information
- morde details
- new bullet points
- substantially more document citations ([[D1]](), [[D2]](), [[D3]](), etc.)
Put yourself in the shoes of the user and think about whether the refined answer is really substantially
better than the initial answer.
better and delivers really new insights than the initial answer.
Here is the question:
--
\n-------\n
{question}
--
\n-------\n
Here is the initial answer:
--
\n-------\n
{initial_answer}
--
\n-------\n
Here is the refined answer:
--
\n-------\n
{refined_answer}
--
\n-------\n
With these criteria in mind, is the refined answer substantially better than the initial answer?
Please answer with a simple 'yes' or 'no'.
"""
HISTORY_CONTEXT_SUMMARY_PROMPT = """\n
{persona_specification}
You need to summarize the key parts of the history of a conversation between a user and an agent
strictly for the purposed of providing the suitable context for a question.
Here is the question:
\n--\n
You need to summarize the key parts of the history of a conversation between a user and an agent. The
summary has two purposes:
1) providing the suitable context for a new question, and
2) To capture the key information that was discussed and that the user may have a follow-up question about.
\n-------\n
{question}
\n--\n
\n-------\n
And here is the history:
\n--\n
\n-------\n
{history}
\n--\n
\n-------\n
Please provide a summarized context from the history so that the question makes sense and can - with
suitable extra information - be answered.