mirror of
https://github.com/danswer-ai/danswer.git
synced 2025-08-03 13:43:18 +02:00
cleanup
This commit is contained in:
@@ -125,10 +125,10 @@ class OneShotQARequest(ChunkContext):
|
|||||||
# will also disable Thread-based Rewording if specified
|
# will also disable Thread-based Rewording if specified
|
||||||
query_override: str | None = None
|
query_override: str | None = None
|
||||||
|
|
||||||
# If True, skips generative an AI response to the search query
|
# If True, skips generating an AI response to the search query
|
||||||
skip_gen_ai_answer_generation: bool = False
|
skip_gen_ai_answer_generation: bool = False
|
||||||
|
|
||||||
# If True, uses pro search instead of basic search
|
# If True, uses agentic search instead of basic search
|
||||||
use_agentic_search: bool = False
|
use_agentic_search: bool = False
|
||||||
|
|
||||||
@model_validator(mode="after")
|
@model_validator(mode="after")
|
||||||
|
@@ -12,9 +12,8 @@ from onyx.agents.agent_search.orchestration.states import ToolChoiceUpdate
|
|||||||
# If you are using a value from the config and realize it needs to change,
|
# If you are using a value from the config and realize it needs to change,
|
||||||
# you should add it to the state and use/update the version in the state.
|
# you should add it to the state and use/update the version in the state.
|
||||||
|
|
||||||
|
|
||||||
## Graph Input State
|
## Graph Input State
|
||||||
|
|
||||||
|
|
||||||
class BasicInput(BaseModel):
|
class BasicInput(BaseModel):
|
||||||
# Langgraph needs a nonempty input, but we pass in all static
|
# Langgraph needs a nonempty input, but we pass in all static
|
||||||
# data through a RunnableConfig.
|
# data through a RunnableConfig.
|
||||||
@@ -22,18 +21,11 @@ class BasicInput(BaseModel):
|
|||||||
|
|
||||||
|
|
||||||
## Graph Output State
|
## Graph Output State
|
||||||
|
|
||||||
|
|
||||||
class BasicOutput(TypedDict):
|
class BasicOutput(TypedDict):
|
||||||
tool_call_chunk: AIMessageChunk
|
tool_call_chunk: AIMessageChunk
|
||||||
|
|
||||||
|
|
||||||
## Update States
|
|
||||||
|
|
||||||
|
|
||||||
## Graph State
|
## Graph State
|
||||||
|
|
||||||
|
|
||||||
class BasicState(
|
class BasicState(
|
||||||
BasicInput,
|
BasicInput,
|
||||||
ToolChoiceInput,
|
ToolChoiceInput,
|
||||||
|
@@ -26,7 +26,6 @@ def process_llm_stream(
|
|||||||
displayed_search_results: list[LlmDoc] | None = None,
|
displayed_search_results: list[LlmDoc] | None = None,
|
||||||
) -> AIMessageChunk:
|
) -> AIMessageChunk:
|
||||||
tool_call_chunk = AIMessageChunk(content="")
|
tool_call_chunk = AIMessageChunk(content="")
|
||||||
# for response in response_handler_manager.handle_llm_response(stream):
|
|
||||||
|
|
||||||
if final_search_results and displayed_search_results:
|
if final_search_results and displayed_search_results:
|
||||||
answer_handler: AnswerResponseHandler = CitationResponseHandler(
|
answer_handler: AnswerResponseHandler = CitationResponseHandler(
|
||||||
|
@@ -25,16 +25,6 @@ def parallelize_initial_sub_question_answering(
|
|||||||
"""
|
"""
|
||||||
edge_start_time = datetime.now()
|
edge_start_time = datetime.now()
|
||||||
if len(state.initial_sub_questions) > 0:
|
if len(state.initial_sub_questions) > 0:
|
||||||
# sub_question_record_ids = [subq_record.id for subq_record in state["sub_question_records"]]
|
|
||||||
# if len(state["sub_question_records"]) == 0:
|
|
||||||
# if state["config"].use_persistence:
|
|
||||||
# raise ValueError("No sub-questions found for initial decompozed questions")
|
|
||||||
# else:
|
|
||||||
# # in this case, we are doing retrieval on the original question.
|
|
||||||
# # to make all the logic consistent, we create a new sub-question
|
|
||||||
# # with the same content as the original question
|
|
||||||
# sub_question_record_ids = [1] * len(state["initial_decomp_questions"])
|
|
||||||
|
|
||||||
return [
|
return [
|
||||||
Send(
|
Send(
|
||||||
"answer_query_subgraph",
|
"answer_query_subgraph",
|
||||||
|
@@ -83,11 +83,6 @@ def generate_initial_answer_graph_builder(test_mode: bool = False) -> StateGraph
|
|||||||
end_key="generate_initial_answer",
|
end_key="generate_initial_answer",
|
||||||
)
|
)
|
||||||
|
|
||||||
# graph.add_edge(
|
|
||||||
# start_key="retrieval_consolidation",
|
|
||||||
# end_key="generate_initial_answer",
|
|
||||||
# )
|
|
||||||
|
|
||||||
graph.add_edge(
|
graph.add_edge(
|
||||||
start_key="generate_initial_answer",
|
start_key="generate_initial_answer",
|
||||||
end_key="validate_initial_answer",
|
end_key="validate_initial_answer",
|
||||||
|
@@ -23,16 +23,6 @@ def parallelize_initial_sub_question_answering(
|
|||||||
"""
|
"""
|
||||||
edge_start_time = datetime.now()
|
edge_start_time = datetime.now()
|
||||||
if len(state.initial_sub_questions) > 0:
|
if len(state.initial_sub_questions) > 0:
|
||||||
# sub_question_record_ids = [subq_record.id for subq_record in state["sub_question_records"]]
|
|
||||||
# if len(state["sub_question_records"]) == 0:
|
|
||||||
# if state["config"].use_persistence:
|
|
||||||
# raise ValueError("No sub-questions found for initial decompozed questions")
|
|
||||||
# else:
|
|
||||||
# # in this case, we are doing retrieval on the original question.
|
|
||||||
# # to make all the logic consistent, we create a new sub-question
|
|
||||||
# # with the same content as the original question
|
|
||||||
# sub_question_record_ids = [1] * len(state["initial_decomp_questions"])
|
|
||||||
|
|
||||||
return [
|
return [
|
||||||
Send(
|
Send(
|
||||||
"answer_sub_question_subgraphs",
|
"answer_sub_question_subgraphs",
|
||||||
|
@@ -58,30 +58,11 @@ def generate_sub_answers_graph_builder() -> StateGraph:
|
|||||||
action=format_initial_sub_answers,
|
action=format_initial_sub_answers,
|
||||||
)
|
)
|
||||||
|
|
||||||
### Add edges ###
|
|
||||||
|
|
||||||
# raph.add_edge(start_key=START, end_key="base_raw_search_subgraph")
|
|
||||||
|
|
||||||
# graph.add_edge(
|
|
||||||
# start_key="start_agent_search",
|
|
||||||
# end_key="extract_entity_term",
|
|
||||||
# )
|
|
||||||
|
|
||||||
graph.add_edge(
|
graph.add_edge(
|
||||||
start_key=START,
|
start_key=START,
|
||||||
end_key="decompose_orig_question",
|
end_key="decompose_orig_question",
|
||||||
)
|
)
|
||||||
|
|
||||||
# graph.add_edge(
|
|
||||||
# start_key="LLM",
|
|
||||||
# end_key=END,
|
|
||||||
# )
|
|
||||||
|
|
||||||
# graph.add_edge(
|
|
||||||
# start_key=START,
|
|
||||||
# end_key="initial_sub_question_creation",
|
|
||||||
# )
|
|
||||||
|
|
||||||
graph.add_conditional_edges(
|
graph.add_conditional_edges(
|
||||||
source="decompose_orig_question",
|
source="decompose_orig_question",
|
||||||
path=parallelize_initial_sub_question_answering,
|
path=parallelize_initial_sub_question_answering,
|
||||||
@@ -97,14 +78,4 @@ def generate_sub_answers_graph_builder() -> StateGraph:
|
|||||||
end_key=END,
|
end_key=END,
|
||||||
)
|
)
|
||||||
|
|
||||||
# graph.add_edge(
|
|
||||||
# start_key="generate_refined_answer",
|
|
||||||
# end_key="check_refined_answer",
|
|
||||||
# )
|
|
||||||
|
|
||||||
# graph.add_edge(
|
|
||||||
# start_key="check_refined_answer",
|
|
||||||
# end_key=END,
|
|
||||||
# )
|
|
||||||
|
|
||||||
return graph
|
return graph
|
||||||
|
@@ -5,19 +5,13 @@ from onyx.agents.agent_search.deep_search.shared.expanded_retrieval.states impor
|
|||||||
ExpandedRetrievalInput,
|
ExpandedRetrievalInput,
|
||||||
)
|
)
|
||||||
|
|
||||||
## Update States
|
|
||||||
|
|
||||||
|
|
||||||
## Graph Input State
|
## Graph Input State
|
||||||
|
|
||||||
|
|
||||||
class BaseRawSearchInput(ExpandedRetrievalInput):
|
class BaseRawSearchInput(ExpandedRetrievalInput):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
## Graph Output State
|
## Graph Output State
|
||||||
|
|
||||||
|
|
||||||
class BaseRawSearchOutput(OrigQuestionRetrievalUpdate):
|
class BaseRawSearchOutput(OrigQuestionRetrievalUpdate):
|
||||||
"""
|
"""
|
||||||
This is a list of results even though each call of this subgraph only returns one result.
|
This is a list of results even though each call of this subgraph only returns one result.
|
||||||
@@ -29,8 +23,6 @@ class BaseRawSearchOutput(OrigQuestionRetrievalUpdate):
|
|||||||
|
|
||||||
|
|
||||||
## Graph State
|
## Graph State
|
||||||
|
|
||||||
|
|
||||||
class BaseRawSearchState(
|
class BaseRawSearchState(
|
||||||
BaseRawSearchInput, BaseRawSearchOutput, OrigQuestionRetrievalUpdate
|
BaseRawSearchInput, BaseRawSearchOutput, OrigQuestionRetrievalUpdate
|
||||||
):
|
):
|
||||||
|
@@ -48,16 +48,6 @@ def parallelize_initial_sub_question_answering(
|
|||||||
) -> list[Send | Hashable]:
|
) -> list[Send | Hashable]:
|
||||||
edge_start_time = datetime.now()
|
edge_start_time = datetime.now()
|
||||||
if len(state.initial_sub_questions) > 0:
|
if len(state.initial_sub_questions) > 0:
|
||||||
# sub_question_record_ids = [subq_record.id for subq_record in state["sub_question_records"]]
|
|
||||||
# if len(state["sub_question_records"]) == 0:
|
|
||||||
# if state["config"].use_persistence:
|
|
||||||
# raise ValueError("No sub-questions found for initial decompozed questions")
|
|
||||||
# else:
|
|
||||||
# # in this case, we are doing retrieval on the original question.
|
|
||||||
# # to make all the logic consistent, we create a new sub-question
|
|
||||||
# # with the same content as the original question
|
|
||||||
# sub_question_record_ids = [1] * len(state["initial_decomp_questions"])
|
|
||||||
|
|
||||||
return [
|
return [
|
||||||
Send(
|
Send(
|
||||||
"answer_query_subgraph",
|
"answer_query_subgraph",
|
||||||
|
@@ -34,11 +34,8 @@ from onyx.agents.agent_search.shared_graph_utils.operators import (
|
|||||||
)
|
)
|
||||||
from onyx.context.search.models import InferenceSection
|
from onyx.context.search.models import InferenceSection
|
||||||
|
|
||||||
|
|
||||||
### States ###
|
### States ###
|
||||||
|
|
||||||
## Update States
|
|
||||||
|
|
||||||
|
|
||||||
class LoggerUpdate(BaseModel):
|
class LoggerUpdate(BaseModel):
|
||||||
log_messages: Annotated[list[str], add] = []
|
log_messages: Annotated[list[str], add] = []
|
||||||
|
|
||||||
@@ -147,15 +144,11 @@ class RefinedQuestionDecompositionUpdate(RefinedAgentStartStats, LoggerUpdate):
|
|||||||
|
|
||||||
|
|
||||||
## Graph Input State
|
## Graph Input State
|
||||||
|
|
||||||
|
|
||||||
class MainInput(CoreState):
|
class MainInput(CoreState):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
## Graph State
|
## Graph State
|
||||||
|
|
||||||
|
|
||||||
class MainState(
|
class MainState(
|
||||||
# This includes the core state
|
# This includes the core state
|
||||||
MainInput,
|
MainInput,
|
||||||
@@ -181,7 +174,5 @@ class MainState(
|
|||||||
|
|
||||||
|
|
||||||
## Graph Output State - presently not used
|
## Graph Output State - presently not used
|
||||||
|
|
||||||
|
|
||||||
class MainOutput(TypedDict):
|
class MainOutput(TypedDict):
|
||||||
log_messages: list[str]
|
log_messages: list[str]
|
||||||
|
@@ -1,3 +0,0 @@
|
|||||||
### Models ###
|
|
||||||
# class AnswerRetrievalStats(BaseModel):
|
|
||||||
# answer_retrieval_stats: dict[str, float | int]
|
|
@@ -37,8 +37,8 @@ def format_results(
|
|||||||
query_info = get_query_info(state.query_retrieval_results)
|
query_info = get_query_info(state.query_retrieval_results)
|
||||||
|
|
||||||
graph_config = cast(GraphConfig, config["metadata"]["config"])
|
graph_config = cast(GraphConfig, config["metadata"]["config"])
|
||||||
# main question docs will be sent later after aggregation and deduping with sub-question docs
|
|
||||||
|
|
||||||
|
# Main question docs will be sent later after aggregation and deduping with sub-question docs
|
||||||
reranked_documents = state.reranked_documents
|
reranked_documents = state.reranked_documents
|
||||||
|
|
||||||
if not (level == 0 and question_num == 0):
|
if not (level == 0 and question_num == 0):
|
||||||
@@ -79,8 +79,6 @@ def format_results(
|
|||||||
|
|
||||||
if sub_question_retrieval_stats is None:
|
if sub_question_retrieval_stats is None:
|
||||||
sub_question_retrieval_stats = AgentChunkRetrievalStats()
|
sub_question_retrieval_stats = AgentChunkRetrievalStats()
|
||||||
# else:
|
|
||||||
# sub_question_retrieval_stats = [sub_question_retrieval_stats]
|
|
||||||
|
|
||||||
return ExpandedRetrievalUpdate(
|
return ExpandedRetrievalUpdate(
|
||||||
expanded_retrieval_result=QuestionRetrievalResult(
|
expanded_retrieval_result=QuestionRetrievalResult(
|
||||||
|
@@ -17,11 +17,6 @@ class GraphInputs(BaseModel):
|
|||||||
"""Input data required for the graph execution"""
|
"""Input data required for the graph execution"""
|
||||||
|
|
||||||
search_request: SearchRequest
|
search_request: SearchRequest
|
||||||
# contains message history for the current chat session
|
|
||||||
# has the following (at most one is non-None)
|
|
||||||
# TODO: unify this into a single message history
|
|
||||||
# message_history: list[PreviousMessage] | None = None
|
|
||||||
# single_message_history: str | None = None
|
|
||||||
prompt_builder: AnswerPromptBuilder
|
prompt_builder: AnswerPromptBuilder
|
||||||
files: list[InMemoryChatFile] | None = None
|
files: list[InMemoryChatFile] | None = None
|
||||||
structured_response_format: dict | None = None
|
structured_response_format: dict | None = None
|
||||||
|
@@ -68,7 +68,7 @@ def llm_tool_choice(
|
|||||||
if chosen_tool_and_args:
|
if chosen_tool_and_args:
|
||||||
tool, tool_args = chosen_tool_and_args
|
tool, tool_args = chosen_tool_and_args
|
||||||
|
|
||||||
# If we have a tool and tool args, we are redy to request a tool call.
|
# If we have a tool and tool args, we are ready to request a tool call.
|
||||||
# This only happens if the tool call was forced or we are using a non-tool calling LLM.
|
# This only happens if the tool call was forced or we are using a non-tool calling LLM.
|
||||||
if tool and tool_args:
|
if tool and tool_args:
|
||||||
return ToolChoiceUpdate(
|
return ToolChoiceUpdate(
|
||||||
|
Reference in New Issue
Block a user