Move Answer (#3339)

This commit is contained in:
Yuhong Sun 2024-12-04 16:30:47 -08:00 committed by GitHub
parent ef9942b751
commit 2a55696545
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
42 changed files with 364 additions and 370 deletions

View File

@ -6,27 +6,27 @@ from langchain.schema.messages import BaseMessage
from langchain_core.messages import AIMessageChunk
from langchain_core.messages import ToolCall
from danswer.chat.llm_response_handler import LLMResponseHandlerManager
from danswer.chat.models import AnswerQuestionPossibleReturn
from danswer.chat.models import AnswerStyleConfig
from danswer.chat.models import CitationInfo
from danswer.chat.models import DanswerAnswerPiece
from danswer.file_store.utils import InMemoryChatFile
from danswer.llm.answering.llm_response_handler import LLMCall
from danswer.llm.answering.llm_response_handler import LLMResponseHandlerManager
from danswer.llm.answering.models import AnswerStyleConfig
from danswer.llm.answering.models import PreviousMessage
from danswer.llm.answering.models import PromptConfig
from danswer.llm.answering.prompts.build import AnswerPromptBuilder
from danswer.llm.answering.prompts.build import default_build_system_message
from danswer.llm.answering.prompts.build import default_build_user_message
from danswer.llm.answering.stream_processing.answer_response_handler import (
from danswer.chat.models import PromptConfig
from danswer.chat.prompt_builder.build import AnswerPromptBuilder
from danswer.chat.prompt_builder.build import default_build_system_message
from danswer.chat.prompt_builder.build import default_build_user_message
from danswer.chat.prompt_builder.build import LLMCall
from danswer.chat.stream_processing.answer_response_handler import (
CitationResponseHandler,
)
from danswer.llm.answering.stream_processing.answer_response_handler import (
from danswer.chat.stream_processing.answer_response_handler import (
DummyAnswerResponseHandler,
)
from danswer.llm.answering.stream_processing.utils import map_document_id_order
from danswer.llm.answering.tool.tool_response_handler import ToolResponseHandler
from danswer.chat.stream_processing.utils import map_document_id_order
from danswer.chat.tool_handling.tool_response_handler import ToolResponseHandler
from danswer.file_store.utils import InMemoryChatFile
from danswer.llm.interfaces import LLM
from danswer.llm.models import PreviousMessage
from danswer.natural_language_processing.utils import get_tokenizer
from danswer.tools.force import ForceUseTool
from danswer.tools.models import ToolResponse

View File

@ -26,7 +26,7 @@ from danswer.db.models import Prompt
from danswer.db.models import Tool
from danswer.db.models import User
from danswer.db.persona import get_prompts_by_ids
from danswer.llm.answering.models import PreviousMessage
from danswer.llm.models import PreviousMessage
from danswer.natural_language_processing.utils import BaseTokenizer
from danswer.server.query_and_chat.models import CreateChatMessageRequest
from danswer.tools.tool_implementations.custom.custom_tool import (

View File

@ -1,58 +1,22 @@
from collections.abc import Callable
from collections.abc import Generator
from collections.abc import Iterator
from typing import TYPE_CHECKING
from langchain_core.messages import BaseMessage
from pydantic.v1 import BaseModel as BaseModel__v1
from danswer.chat.models import CitationInfo
from danswer.chat.models import DanswerAnswerPiece
from danswer.chat.models import ResponsePart
from danswer.chat.models import StreamStopInfo
from danswer.chat.models import StreamStopReason
from danswer.file_store.models import InMemoryChatFile
from danswer.llm.answering.prompts.build import AnswerPromptBuilder
from danswer.tools.force import ForceUseTool
from danswer.tools.models import ToolCallFinalResult
from danswer.tools.models import ToolCallKickoff
from danswer.tools.models import ToolResponse
from danswer.tools.tool import Tool
if TYPE_CHECKING:
from danswer.llm.answering.stream_processing.answer_response_handler import (
AnswerResponseHandler,
)
from danswer.llm.answering.tool.tool_response_handler import ToolResponseHandler
ResponsePart = (
DanswerAnswerPiece
| CitationInfo
| ToolCallKickoff
| ToolResponse
| ToolCallFinalResult
| StreamStopInfo
)
class LLMCall(BaseModel__v1):
prompt_builder: AnswerPromptBuilder
tools: list[Tool]
force_use_tool: ForceUseTool
files: list[InMemoryChatFile]
tool_call_info: list[ToolCallKickoff | ToolResponse | ToolCallFinalResult]
using_tool_calling_llm: bool
class Config:
arbitrary_types_allowed = True
from danswer.chat.prompt_builder.build import LLMCall
from danswer.chat.stream_processing.answer_response_handler import AnswerResponseHandler
from danswer.chat.tool_handling.tool_response_handler import ToolResponseHandler
class LLMResponseHandlerManager:
def __init__(
self,
tool_handler: "ToolResponseHandler",
answer_handler: "AnswerResponseHandler",
tool_handler: ToolResponseHandler,
answer_handler: AnswerResponseHandler,
is_cancelled: Callable[[], bool],
):
self.tool_handler = tool_handler

View File

@ -1,10 +1,14 @@
from collections.abc import Callable
from collections.abc import Iterator
from datetime import datetime
from enum import Enum
from typing import Any
from typing import TYPE_CHECKING
from pydantic import BaseModel
from pydantic import ConfigDict
from pydantic import Field
from pydantic import model_validator
from danswer.configs.constants import DocumentSource
from danswer.configs.constants import MessageType
@ -12,8 +16,15 @@ from danswer.context.search.enums import QueryFlow
from danswer.context.search.enums import RecencyBiasSetting
from danswer.context.search.enums import SearchType
from danswer.context.search.models import RetrievalDocs
from danswer.llm.override_models import PromptOverride
from danswer.tools.models import ToolCallFinalResult
from danswer.tools.models import ToolCallKickoff
from danswer.tools.models import ToolResponse
from danswer.tools.tool_implementations.custom.base_tool_types import ToolResultType
if TYPE_CHECKING:
from danswer.db.models import Prompt
class LlmDoc(BaseModel):
"""This contains the minimal set information for the LLM portion including citations"""
@ -210,3 +221,109 @@ AnswerQuestionStreamReturn = Iterator[AnswerQuestionPossibleReturn]
class LLMMetricsContainer(BaseModel):
prompt_tokens: int
response_tokens: int
StreamProcessor = Callable[[Iterator[str]], AnswerQuestionStreamReturn]
class DocumentPruningConfig(BaseModel):
max_chunks: int | None = None
max_window_percentage: float | None = None
max_tokens: int | None = None
# different pruning behavior is expected when the
# user manually selects documents they want to chat with
# e.g. we don't want to truncate each document to be no more
# than one chunk long
is_manually_selected_docs: bool = False
# If user specifies to include additional context Chunks for each match, then different pruning
# is used. As many Sections as possible are included, and the last Section is truncated
# If this is false, all of the Sections are truncated if they are longer than the expected Chunk size.
# Sections are often expected to be longer than the maximum Chunk size but Chunks should not be.
use_sections: bool = True
# If using tools, then we need to consider the tool length
tool_num_tokens: int = 0
# If using a tool message to represent the docs, then we have to JSON serialize
# the document content, which adds to the token count.
using_tool_message: bool = False
class ContextualPruningConfig(DocumentPruningConfig):
num_chunk_multiple: int
@classmethod
def from_doc_pruning_config(
cls, num_chunk_multiple: int, doc_pruning_config: DocumentPruningConfig
) -> "ContextualPruningConfig":
return cls(num_chunk_multiple=num_chunk_multiple, **doc_pruning_config.dict())
class CitationConfig(BaseModel):
all_docs_useful: bool = False
class QuotesConfig(BaseModel):
pass
class AnswerStyleConfig(BaseModel):
citation_config: CitationConfig | None = None
quotes_config: QuotesConfig | None = None
document_pruning_config: DocumentPruningConfig = Field(
default_factory=DocumentPruningConfig
)
# forces the LLM to return a structured response, see
# https://platform.openai.com/docs/guides/structured-outputs/introduction
# right now, only used by the simple chat API
structured_response_format: dict | None = None
@model_validator(mode="after")
def check_quotes_and_citation(self) -> "AnswerStyleConfig":
if self.citation_config is None and self.quotes_config is None:
raise ValueError(
"One of `citation_config` or `quotes_config` must be provided"
)
if self.citation_config is not None and self.quotes_config is not None:
raise ValueError(
"Only one of `citation_config` or `quotes_config` must be provided"
)
return self
class PromptConfig(BaseModel):
"""Final representation of the Prompt configuration passed
into the `Answer` object."""
system_prompt: str
task_prompt: str
datetime_aware: bool
include_citations: bool
@classmethod
def from_model(
cls, model: "Prompt", prompt_override: PromptOverride | None = None
) -> "PromptConfig":
override_system_prompt = (
prompt_override.system_prompt if prompt_override else None
)
override_task_prompt = prompt_override.task_prompt if prompt_override else None
return cls(
system_prompt=override_system_prompt or model.system_prompt,
task_prompt=override_task_prompt or model.task_prompt,
datetime_aware=model.datetime_aware,
include_citations=model.include_citations,
)
model_config = ConfigDict(frozen=True)
ResponsePart = (
DanswerAnswerPiece
| CitationInfo
| ToolCallKickoff
| ToolResponse
| ToolCallFinalResult
| StreamStopInfo
)

View File

@ -6,19 +6,24 @@ from typing import cast
from sqlalchemy.orm import Session
from danswer.chat.answer import Answer
from danswer.chat.chat_utils import create_chat_chain
from danswer.chat.chat_utils import create_temporary_persona
from danswer.chat.models import AllCitations
from danswer.chat.models import AnswerStyleConfig
from danswer.chat.models import ChatDanswerBotResponse
from danswer.chat.models import CitationConfig
from danswer.chat.models import CitationInfo
from danswer.chat.models import CustomToolResponse
from danswer.chat.models import DanswerAnswerPiece
from danswer.chat.models import DanswerContexts
from danswer.chat.models import DocumentPruningConfig
from danswer.chat.models import FileChatDisplay
from danswer.chat.models import FinalUsedContextDocsResponse
from danswer.chat.models import LLMRelevanceFilterResponse
from danswer.chat.models import MessageResponseIDInfo
from danswer.chat.models import MessageSpecificCitations
from danswer.chat.models import PromptConfig
from danswer.chat.models import QADocsResponse
from danswer.chat.models import StreamingError
from danswer.chat.models import StreamStopInfo
@ -58,15 +63,10 @@ from danswer.file_store.models import ChatFileType
from danswer.file_store.models import FileDescriptor
from danswer.file_store.utils import load_all_chat_files
from danswer.file_store.utils import save_files_from_urls
from danswer.llm.answering.answer import Answer
from danswer.llm.answering.models import AnswerStyleConfig
from danswer.llm.answering.models import CitationConfig
from danswer.llm.answering.models import DocumentPruningConfig
from danswer.llm.answering.models import PreviousMessage
from danswer.llm.answering.models import PromptConfig
from danswer.llm.exceptions import GenAIDisabledException
from danswer.llm.factory import get_llms_for_persona
from danswer.llm.factory import get_main_llm_from_tuple
from danswer.llm.models import PreviousMessage
from danswer.llm.utils import litellm_exception_to_error_msg
from danswer.natural_language_processing.utils import get_tokenizer
from danswer.server.query_and_chat.models import ChatMessageDetail

View File

@ -4,20 +4,26 @@ from typing import cast
from langchain_core.messages import BaseMessage
from langchain_core.messages import HumanMessage
from langchain_core.messages import SystemMessage
from pydantic.v1 import BaseModel as BaseModel__v1
from danswer.chat.models import PromptConfig
from danswer.chat.prompt_builder.citations_prompt import compute_max_llm_input_tokens
from danswer.chat.prompt_builder.utils import translate_history_to_basemessages
from danswer.file_store.models import InMemoryChatFile
from danswer.llm.answering.models import PreviousMessage
from danswer.llm.answering.models import PromptConfig
from danswer.llm.answering.prompts.citations_prompt import compute_max_llm_input_tokens
from danswer.llm.interfaces import LLMConfig
from danswer.llm.models import PreviousMessage
from danswer.llm.utils import build_content_with_imgs
from danswer.llm.utils import check_message_tokens
from danswer.llm.utils import message_to_prompt_and_imgs
from danswer.llm.utils import translate_history_to_basemessages
from danswer.natural_language_processing.utils import get_tokenizer
from danswer.prompts.chat_prompts import CHAT_USER_CONTEXT_FREE_PROMPT
from danswer.prompts.prompt_utils import add_date_time_to_prompt
from danswer.prompts.prompt_utils import drop_messages_history_overflow
from danswer.tools.force import ForceUseTool
from danswer.tools.models import ToolCallFinalResult
from danswer.tools.models import ToolCallKickoff
from danswer.tools.models import ToolResponse
from danswer.tools.tool import Tool
def default_build_system_message(
@ -139,3 +145,15 @@ class AnswerPromptBuilder:
return drop_messages_history_overflow(
final_messages_with_tokens, self.max_tokens
)
class LLMCall(BaseModel__v1):
prompt_builder: AnswerPromptBuilder
tools: list[Tool]
force_use_tool: ForceUseTool
files: list[InMemoryChatFile]
tool_call_info: list[ToolCallKickoff | ToolResponse | ToolCallFinalResult]
using_tool_calling_llm: bool
class Config:
arbitrary_types_allowed = True

View File

@ -2,12 +2,12 @@ from langchain.schema.messages import HumanMessage
from langchain.schema.messages import SystemMessage
from danswer.chat.models import LlmDoc
from danswer.chat.models import PromptConfig
from danswer.configs.model_configs import GEN_AI_SINGLE_USER_MESSAGE_EXPECTED_MAX_TOKENS
from danswer.context.search.models import InferenceChunk
from danswer.db.models import Persona
from danswer.db.persona import get_default_prompt__read_only
from danswer.db.search_settings import get_multilingual_expansion
from danswer.llm.answering.models import PromptConfig
from danswer.llm.factory import get_llms_for_persona
from danswer.llm.factory import get_main_llm_from_tuple
from danswer.llm.interfaces import LLMConfig

View File

@ -1,10 +1,10 @@
from langchain.schema.messages import HumanMessage
from danswer.chat.models import LlmDoc
from danswer.chat.models import PromptConfig
from danswer.configs.chat_configs import LANGUAGE_HINT
from danswer.context.search.models import InferenceChunk
from danswer.db.search_settings import get_multilingual_expansion
from danswer.llm.answering.models import PromptConfig
from danswer.llm.utils import message_to_prompt_and_imgs
from danswer.prompts.direct_qa_prompts import CONTEXT_BLOCK
from danswer.prompts.direct_qa_prompts import HISTORY_BLOCK

View File

@ -0,0 +1,62 @@
from langchain.schema.messages import AIMessage
from langchain.schema.messages import BaseMessage
from langchain.schema.messages import HumanMessage
from danswer.configs.constants import MessageType
from danswer.db.models import ChatMessage
from danswer.file_store.models import InMemoryChatFile
from danswer.llm.models import PreviousMessage
from danswer.llm.utils import build_content_with_imgs
from danswer.prompts.direct_qa_prompts import PARAMATERIZED_PROMPT
from danswer.prompts.direct_qa_prompts import PARAMATERIZED_PROMPT_WITHOUT_CONTEXT
def build_dummy_prompt(
system_prompt: str, task_prompt: str, retrieval_disabled: bool
) -> str:
if retrieval_disabled:
return PARAMATERIZED_PROMPT_WITHOUT_CONTEXT.format(
user_query="<USER_QUERY>",
system_prompt=system_prompt,
task_prompt=task_prompt,
).strip()
return PARAMATERIZED_PROMPT.format(
context_docs_str="<CONTEXT_DOCS>",
user_query="<USER_QUERY>",
system_prompt=system_prompt,
task_prompt=task_prompt,
).strip()
def translate_danswer_msg_to_langchain(
msg: ChatMessage | PreviousMessage,
) -> BaseMessage:
files: list[InMemoryChatFile] = []
# If the message is a `ChatMessage`, it doesn't have the downloaded files
# attached. Just ignore them for now.
if not isinstance(msg, ChatMessage):
files = msg.files
content = build_content_with_imgs(msg.message, files, message_type=msg.message_type)
if msg.message_type == MessageType.SYSTEM:
raise ValueError("System messages are not currently part of history")
if msg.message_type == MessageType.ASSISTANT:
return AIMessage(content=content)
if msg.message_type == MessageType.USER:
return HumanMessage(content=content)
raise ValueError(f"New message type {msg.message_type} not handled")
def translate_history_to_basemessages(
history: list[ChatMessage] | list["PreviousMessage"],
) -> tuple[list[BaseMessage], list[int]]:
history_basemessages = [
translate_danswer_msg_to_langchain(msg)
for msg in history
if msg.token_count != 0
]
history_token_counts = [msg.token_count for msg in history if msg.token_count != 0]
return history_basemessages, history_token_counts

View File

@ -5,16 +5,16 @@ from typing import TypeVar
from pydantic import BaseModel
from danswer.chat.models import ContextualPruningConfig
from danswer.chat.models import (
LlmDoc,
)
from danswer.chat.models import PromptConfig
from danswer.chat.prompt_builder.citations_prompt import compute_max_document_tokens
from danswer.configs.constants import IGNORE_FOR_QA
from danswer.configs.model_configs import DOC_EMBEDDING_CONTEXT_SIZE
from danswer.context.search.models import InferenceChunk
from danswer.context.search.models import InferenceSection
from danswer.llm.answering.models import ContextualPruningConfig
from danswer.llm.answering.models import PromptConfig
from danswer.llm.answering.prompts.citations_prompt import compute_max_document_tokens
from danswer.llm.interfaces import LLMConfig
from danswer.natural_language_processing.utils import get_tokenizer
from danswer.natural_language_processing.utils import tokenizer_trim_content

View File

@ -3,13 +3,11 @@ from collections.abc import Generator
from langchain_core.messages import BaseMessage
from danswer.chat.llm_response_handler import ResponsePart
from danswer.chat.models import CitationInfo
from danswer.chat.models import LlmDoc
from danswer.llm.answering.llm_response_handler import ResponsePart
from danswer.llm.answering.stream_processing.citation_processing import (
CitationProcessor,
)
from danswer.llm.answering.stream_processing.utils import DocumentIdOrderMapping
from danswer.chat.stream_processing.citation_processing import CitationProcessor
from danswer.chat.stream_processing.utils import DocumentIdOrderMapping
from danswer.utils.logger import setup_logger
logger = setup_logger()

View File

@ -4,8 +4,8 @@ from collections.abc import Generator
from danswer.chat.models import CitationInfo
from danswer.chat.models import DanswerAnswerPiece
from danswer.chat.models import LlmDoc
from danswer.chat.stream_processing.utils import DocumentIdOrderMapping
from danswer.configs.chat_configs import STOP_STREAM_PAT
from danswer.llm.answering.stream_processing.utils import DocumentIdOrderMapping
from danswer.prompts.constants import TRIPLE_BACKTICK
from danswer.utils.logger import setup_logger

View File

@ -4,8 +4,8 @@ from langchain_core.messages import AIMessageChunk
from langchain_core.messages import BaseMessage
from langchain_core.messages import ToolCall
from danswer.llm.answering.llm_response_handler import LLMCall
from danswer.llm.answering.llm_response_handler import ResponsePart
from danswer.chat.models import ResponsePart
from danswer.chat.prompt_builder.build import LLMCall
from danswer.llm.interfaces import LLM
from danswer.tools.force import ForceUseTool
from danswer.tools.message import build_tool_message

View File

@ -5,7 +5,11 @@ from typing import cast
from sqlalchemy.orm import Session
from danswer.chat.models import PromptConfig
from danswer.chat.models import SectionRelevancePiece
from danswer.chat.prune_and_merge import _merge_sections
from danswer.chat.prune_and_merge import ChunkRange
from danswer.chat.prune_and_merge import merge_chunk_intervals
from danswer.configs.chat_configs import DISABLE_LLM_DOC_RELEVANCE
from danswer.context.search.enums import LLMEvaluationType
from danswer.context.search.enums import QueryFlow
@ -27,10 +31,6 @@ from danswer.db.models import User
from danswer.db.search_settings import get_current_search_settings
from danswer.document_index.factory import get_default_document_index
from danswer.document_index.interfaces import VespaChunkRequest
from danswer.llm.answering.models import PromptConfig
from danswer.llm.answering.prune_and_merge import _merge_sections
from danswer.llm.answering.prune_and_merge import ChunkRange
from danswer.llm.answering.prune_and_merge import merge_chunk_intervals
from danswer.llm.interfaces import LLM
from danswer.secondary_llm_flows.agentic_evaluation import evaluate_inference_section
from danswer.utils.logger import setup_logger

View File

@ -1,163 +0,0 @@
from collections.abc import Callable
from collections.abc import Iterator
from typing import TYPE_CHECKING
from langchain.schema.messages import AIMessage
from langchain.schema.messages import BaseMessage
from langchain.schema.messages import HumanMessage
from langchain.schema.messages import SystemMessage
from pydantic import BaseModel
from pydantic import ConfigDict
from pydantic import Field
from pydantic import model_validator
from danswer.chat.models import AnswerQuestionStreamReturn
from danswer.configs.constants import MessageType
from danswer.file_store.models import InMemoryChatFile
from danswer.llm.override_models import PromptOverride
from danswer.llm.utils import build_content_with_imgs
from danswer.tools.models import ToolCallFinalResult
if TYPE_CHECKING:
from danswer.db.models import ChatMessage
from danswer.db.models import Prompt
StreamProcessor = Callable[[Iterator[str]], AnswerQuestionStreamReturn]
class PreviousMessage(BaseModel):
"""Simplified version of `ChatMessage`"""
message: str
token_count: int
message_type: MessageType
files: list[InMemoryChatFile]
tool_call: ToolCallFinalResult | None
@classmethod
def from_chat_message(
cls, chat_message: "ChatMessage", available_files: list[InMemoryChatFile]
) -> "PreviousMessage":
message_file_ids = (
[file["id"] for file in chat_message.files] if chat_message.files else []
)
return cls(
message=chat_message.message,
token_count=chat_message.token_count,
message_type=chat_message.message_type,
files=[
file
for file in available_files
if str(file.file_id) in message_file_ids
],
tool_call=ToolCallFinalResult(
tool_name=chat_message.tool_call.tool_name,
tool_args=chat_message.tool_call.tool_arguments,
tool_result=chat_message.tool_call.tool_result,
)
if chat_message.tool_call
else None,
)
def to_langchain_msg(self) -> BaseMessage:
content = build_content_with_imgs(self.message, self.files)
if self.message_type == MessageType.USER:
return HumanMessage(content=content)
elif self.message_type == MessageType.ASSISTANT:
return AIMessage(content=content)
else:
return SystemMessage(content=content)
class DocumentPruningConfig(BaseModel):
max_chunks: int | None = None
max_window_percentage: float | None = None
max_tokens: int | None = None
# different pruning behavior is expected when the
# user manually selects documents they want to chat with
# e.g. we don't want to truncate each document to be no more
# than one chunk long
is_manually_selected_docs: bool = False
# If user specifies to include additional context Chunks for each match, then different pruning
# is used. As many Sections as possible are included, and the last Section is truncated
# If this is false, all of the Sections are truncated if they are longer than the expected Chunk size.
# Sections are often expected to be longer than the maximum Chunk size but Chunks should not be.
use_sections: bool = True
# If using tools, then we need to consider the tool length
tool_num_tokens: int = 0
# If using a tool message to represent the docs, then we have to JSON serialize
# the document content, which adds to the token count.
using_tool_message: bool = False
class ContextualPruningConfig(DocumentPruningConfig):
num_chunk_multiple: int
@classmethod
def from_doc_pruning_config(
cls, num_chunk_multiple: int, doc_pruning_config: DocumentPruningConfig
) -> "ContextualPruningConfig":
return cls(num_chunk_multiple=num_chunk_multiple, **doc_pruning_config.dict())
class CitationConfig(BaseModel):
all_docs_useful: bool = False
class QuotesConfig(BaseModel):
pass
class AnswerStyleConfig(BaseModel):
citation_config: CitationConfig | None = None
quotes_config: QuotesConfig | None = None
document_pruning_config: DocumentPruningConfig = Field(
default_factory=DocumentPruningConfig
)
# forces the LLM to return a structured response, see
# https://platform.openai.com/docs/guides/structured-outputs/introduction
# right now, only used by the simple chat API
structured_response_format: dict | None = None
@model_validator(mode="after")
def check_quotes_and_citation(self) -> "AnswerStyleConfig":
if self.citation_config is None and self.quotes_config is None:
raise ValueError(
"One of `citation_config` or `quotes_config` must be provided"
)
if self.citation_config is not None and self.quotes_config is not None:
raise ValueError(
"Only one of `citation_config` or `quotes_config` must be provided"
)
return self
class PromptConfig(BaseModel):
"""Final representation of the Prompt configuration passed
into the `Answer` object."""
system_prompt: str
task_prompt: str
datetime_aware: bool
include_citations: bool
@classmethod
def from_model(
cls, model: "Prompt", prompt_override: PromptOverride | None = None
) -> "PromptConfig":
override_system_prompt = (
prompt_override.system_prompt if prompt_override else None
)
override_task_prompt = prompt_override.task_prompt if prompt_override else None
return cls(
system_prompt=override_system_prompt or model.system_prompt,
task_prompt=override_task_prompt or model.task_prompt,
datetime_aware=model.datetime_aware,
include_citations=model.include_citations,
)
model_config = ConfigDict(frozen=True)

View File

@ -1,20 +0,0 @@
from danswer.prompts.direct_qa_prompts import PARAMATERIZED_PROMPT
from danswer.prompts.direct_qa_prompts import PARAMATERIZED_PROMPT_WITHOUT_CONTEXT
def build_dummy_prompt(
system_prompt: str, task_prompt: str, retrieval_disabled: bool
) -> str:
if retrieval_disabled:
return PARAMATERIZED_PROMPT_WITHOUT_CONTEXT.format(
user_query="<USER_QUERY>",
system_prompt=system_prompt,
task_prompt=task_prompt,
).strip()
return PARAMATERIZED_PROMPT.format(
context_docs_str="<CONTEXT_DOCS>",
user_query="<USER_QUERY>",
system_prompt=system_prompt,
task_prompt=task_prompt,
).strip()

View File

@ -0,0 +1,59 @@
from typing import TYPE_CHECKING
from langchain.schema.messages import AIMessage
from langchain.schema.messages import BaseMessage
from langchain.schema.messages import HumanMessage
from langchain.schema.messages import SystemMessage
from pydantic import BaseModel
from danswer.configs.constants import MessageType
from danswer.file_store.models import InMemoryChatFile
from danswer.llm.utils import build_content_with_imgs
from danswer.tools.models import ToolCallFinalResult
if TYPE_CHECKING:
from danswer.db.models import ChatMessage
class PreviousMessage(BaseModel):
"""Simplified version of `ChatMessage`"""
message: str
token_count: int
message_type: MessageType
files: list[InMemoryChatFile]
tool_call: ToolCallFinalResult | None
@classmethod
def from_chat_message(
cls, chat_message: "ChatMessage", available_files: list[InMemoryChatFile]
) -> "PreviousMessage":
message_file_ids = (
[file["id"] for file in chat_message.files] if chat_message.files else []
)
return cls(
message=chat_message.message,
token_count=chat_message.token_count,
message_type=chat_message.message_type,
files=[
file
for file in available_files
if str(file.file_id) in message_file_ids
],
tool_call=ToolCallFinalResult(
tool_name=chat_message.tool_call.tool_name,
tool_args=chat_message.tool_call.tool_arguments,
tool_result=chat_message.tool_call.tool_result,
)
if chat_message.tool_call
else None,
)
def to_langchain_msg(self) -> BaseMessage:
content = build_content_with_imgs(self.message, self.files)
if self.message_type == MessageType.USER:
return HumanMessage(content=content)
elif self.message_type == MessageType.ASSISTANT:
return AIMessage(content=content)
else:
return SystemMessage(content=content)

View File

@ -5,8 +5,6 @@ from collections.abc import Callable
from collections.abc import Iterator
from typing import Any
from typing import cast
from typing import TYPE_CHECKING
from typing import Union
import litellm # type: ignore
import pandas as pd
@ -36,7 +34,6 @@ from danswer.configs.constants import MessageType
from danswer.configs.model_configs import GEN_AI_MAX_TOKENS
from danswer.configs.model_configs import GEN_AI_MODEL_FALLBACK_MAX_TOKENS
from danswer.configs.model_configs import GEN_AI_NUM_RESERVED_OUTPUT_TOKENS
from danswer.db.models import ChatMessage
from danswer.file_store.models import ChatFileType
from danswer.file_store.models import InMemoryChatFile
from danswer.llm.interfaces import LLM
@ -44,9 +41,6 @@ from danswer.prompts.constants import CODE_BLOCK_PAT
from danswer.utils.logger import setup_logger
from shared_configs.configs import LOG_LEVEL
if TYPE_CHECKING:
from danswer.llm.answering.models import PreviousMessage
logger = setup_logger()
@ -104,39 +98,6 @@ def litellm_exception_to_error_msg(
return error_msg
def translate_danswer_msg_to_langchain(
msg: Union[ChatMessage, "PreviousMessage"],
) -> BaseMessage:
files: list[InMemoryChatFile] = []
# If the message is a `ChatMessage`, it doesn't have the downloaded files
# attached. Just ignore them for now.
if not isinstance(msg, ChatMessage):
files = msg.files
content = build_content_with_imgs(msg.message, files, message_type=msg.message_type)
if msg.message_type == MessageType.SYSTEM:
raise ValueError("System messages are not currently part of history")
if msg.message_type == MessageType.ASSISTANT:
return AIMessage(content=content)
if msg.message_type == MessageType.USER:
return HumanMessage(content=content)
raise ValueError(f"New message type {msg.message_type} not handled")
def translate_history_to_basemessages(
history: list[ChatMessage] | list["PreviousMessage"],
) -> tuple[list[BaseMessage], list[int]]:
history_basemessages = [
translate_danswer_msg_to_langchain(msg)
for msg in history
if msg.token_count != 0
]
history_token_counts = [msg.token_count for msg in history if msg.token_count != 0]
return history_basemessages, history_token_counts
# Processes CSV files to show the first 5 rows and max_columns (default 40) columns
def _process_csv_file(file: InMemoryChatFile, max_columns: int = 40) -> str:
df = pd.read_csv(io.StringIO(file.content.decode("utf-8")))

View File

@ -5,11 +5,11 @@ from typing import cast
from langchain_core.messages import BaseMessage
from danswer.chat.models import LlmDoc
from danswer.chat.models import PromptConfig
from danswer.configs.chat_configs import LANGUAGE_HINT
from danswer.configs.constants import DocumentSource
from danswer.context.search.models import InferenceChunk
from danswer.db.models import Prompt
from danswer.llm.answering.models import PromptConfig
from danswer.prompts.chat_prompts import ADDITIONAL_INFO
from danswer.prompts.chat_prompts import CITATION_REMINDER
from danswer.prompts.constants import CODE_BLOCK_PAT

View File

@ -3,14 +3,14 @@ from langchain.schema import HumanMessage
from langchain.schema import SystemMessage
from danswer.chat.chat_utils import combine_message_chain
from danswer.chat.prompt_builder.utils import translate_danswer_msg_to_langchain
from danswer.configs.chat_configs import DISABLE_LLM_CHOOSE_SEARCH
from danswer.configs.model_configs import GEN_AI_HISTORY_CUTOFF
from danswer.db.models import ChatMessage
from danswer.llm.answering.models import PreviousMessage
from danswer.llm.interfaces import LLM
from danswer.llm.models import PreviousMessage
from danswer.llm.utils import dict_based_prompt_to_langchain_prompt
from danswer.llm.utils import message_to_string
from danswer.llm.utils import translate_danswer_msg_to_langchain
from danswer.prompts.chat_prompts import AGGRESSIVE_SEARCH_TEMPLATE
from danswer.prompts.chat_prompts import NO_SEARCH
from danswer.prompts.chat_prompts import REQUIRE_SEARCH_HINT

View File

@ -4,10 +4,10 @@ from danswer.chat.chat_utils import combine_message_chain
from danswer.configs.chat_configs import DISABLE_LLM_QUERY_REPHRASE
from danswer.configs.model_configs import GEN_AI_HISTORY_CUTOFF
from danswer.db.models import ChatMessage
from danswer.llm.answering.models import PreviousMessage
from danswer.llm.exceptions import GenAIDisabledException
from danswer.llm.factory import get_default_llms
from danswer.llm.interfaces import LLM
from danswer.llm.models import PreviousMessage
from danswer.llm.utils import dict_based_prompt_to_langchain_prompt
from danswer.llm.utils import message_to_string
from danswer.prompts.chat_prompts import HISTORY_QUERY_REPHRASE

View File

@ -13,6 +13,7 @@ from danswer.auth.users import current_admin_user
from danswer.auth.users import current_curator_or_admin_user
from danswer.auth.users import current_limited_user
from danswer.auth.users import current_user
from danswer.chat.prompt_builder.utils import build_dummy_prompt
from danswer.configs.constants import FileOrigin
from danswer.configs.constants import NotificationType
from danswer.db.engine import get_session
@ -33,7 +34,6 @@ from danswer.db.persona import update_persona_shared_users
from danswer.db.persona import update_persona_visibility
from danswer.file_store.file_store import get_default_file_store
from danswer.file_store.models import ChatFileType
from danswer.llm.answering.prompts.utils import build_dummy_prompt
from danswer.server.features.persona.models import CreatePersonaRequest
from danswer.server.features.persona.models import ImageGenerationToolStatus
from danswer.server.features.persona.models import PersonaCategoryCreate

View File

@ -23,6 +23,9 @@ from danswer.auth.users import current_user
from danswer.chat.chat_utils import create_chat_chain
from danswer.chat.chat_utils import extract_headers
from danswer.chat.process_message import stream_chat_message
from danswer.chat.prompt_builder.citations_prompt import (
compute_max_document_tokens_for_persona,
)
from danswer.configs.app_configs import WEB_DOMAIN
from danswer.configs.constants import FileOrigin
from danswer.configs.constants import MessageType
@ -51,9 +54,6 @@ from danswer.file_processing.extract_file_text import extract_file_text
from danswer.file_store.file_store import get_default_file_store
from danswer.file_store.models import ChatFileType
from danswer.file_store.models import FileDescriptor
from danswer.llm.answering.prompts.citations_prompt import (
compute_max_document_tokens_for_persona,
)
from danswer.llm.exceptions import GenAIDisabledException
from danswer.llm.factory import get_default_llms
from danswer.llm.factory import get_llms_for_persona

View File

@ -1,5 +1,6 @@
from datetime import datetime
from typing import Any
from typing import TYPE_CHECKING
from uuid import UUID
from pydantic import BaseModel
@ -22,6 +23,9 @@ from danswer.llm.override_models import LLMOverride
from danswer.llm.override_models import PromptOverride
from danswer.tools.models import ToolCallFinalResult
if TYPE_CHECKING:
pass
class SourceTag(Tag):
source: DocumentSource

View File

@ -7,7 +7,7 @@ from danswer.llm.utils import message_to_prompt_and_imgs
from danswer.tools.tool import Tool
if TYPE_CHECKING:
from danswer.llm.answering.prompts.build import AnswerPromptBuilder
from danswer.chat.prompt_builder.build import AnswerPromptBuilder
from danswer.tools.tool_implementations.custom.custom_tool import (
CustomToolCallSummary,
)

View File

@ -3,13 +3,13 @@ from collections.abc import Generator
from typing import Any
from typing import TYPE_CHECKING
from danswer.llm.answering.models import PreviousMessage
from danswer.llm.interfaces import LLM
from danswer.llm.models import PreviousMessage
from danswer.utils.special_types import JSON_ro
if TYPE_CHECKING:
from danswer.llm.answering.prompts.build import AnswerPromptBuilder
from danswer.chat.prompt_builder.build import AnswerPromptBuilder
from danswer.tools.message import ToolCallSummary
from danswer.tools.models import ToolResponse

View File

@ -5,6 +5,10 @@ from pydantic import BaseModel
from pydantic import Field
from sqlalchemy.orm import Session
from danswer.chat.models import AnswerStyleConfig
from danswer.chat.models import CitationConfig
from danswer.chat.models import DocumentPruningConfig
from danswer.chat.models import PromptConfig
from danswer.configs.app_configs import AZURE_DALLE_API_BASE
from danswer.configs.app_configs import AZURE_DALLE_API_KEY
from danswer.configs.app_configs import AZURE_DALLE_API_VERSION
@ -19,10 +23,6 @@ from danswer.db.llm import fetch_existing_llm_providers
from danswer.db.models import Persona
from danswer.db.models import User
from danswer.file_store.models import InMemoryChatFile
from danswer.llm.answering.models import AnswerStyleConfig
from danswer.llm.answering.models import CitationConfig
from danswer.llm.answering.models import DocumentPruningConfig
from danswer.llm.answering.models import PromptConfig
from danswer.llm.interfaces import LLM
from danswer.llm.interfaces import LLMConfig
from danswer.natural_language_processing.utils import get_tokenizer

View File

@ -15,14 +15,14 @@ from langchain_core.messages import SystemMessage
from pydantic import BaseModel
from requests import JSONDecodeError
from danswer.chat.prompt_builder.build import AnswerPromptBuilder
from danswer.configs.constants import FileOrigin
from danswer.db.engine import get_session_with_default_tenant
from danswer.file_store.file_store import get_default_file_store
from danswer.file_store.models import ChatFileType
from danswer.file_store.models import InMemoryChatFile
from danswer.llm.answering.models import PreviousMessage
from danswer.llm.answering.prompts.build import AnswerPromptBuilder
from danswer.llm.interfaces import LLM
from danswer.llm.models import PreviousMessage
from danswer.tools.base_tool import BaseTool
from danswer.tools.message import ToolCallSummary
from danswer.tools.models import CHAT_SESSION_ID_PLACEHOLDER

View File

@ -8,10 +8,10 @@ from litellm import image_generation # type: ignore
from pydantic import BaseModel
from danswer.chat.chat_utils import combine_message_chain
from danswer.chat.prompt_builder.build import AnswerPromptBuilder
from danswer.configs.model_configs import GEN_AI_HISTORY_CUTOFF
from danswer.llm.answering.models import PreviousMessage
from danswer.llm.answering.prompts.build import AnswerPromptBuilder
from danswer.llm.interfaces import LLM
from danswer.llm.models import PreviousMessage
from danswer.llm.utils import build_content_with_imgs
from danswer.llm.utils import message_to_string
from danswer.prompts.constants import GENERAL_SEP_PAT

View File

@ -7,15 +7,15 @@ from typing import cast
import httpx
from danswer.chat.chat_utils import combine_message_chain
from danswer.chat.models import AnswerStyleConfig
from danswer.chat.models import LlmDoc
from danswer.chat.models import PromptConfig
from danswer.chat.prompt_builder.build import AnswerPromptBuilder
from danswer.configs.constants import DocumentSource
from danswer.configs.model_configs import GEN_AI_HISTORY_CUTOFF
from danswer.context.search.models import SearchDoc
from danswer.llm.answering.models import AnswerStyleConfig
from danswer.llm.answering.models import PreviousMessage
from danswer.llm.answering.models import PromptConfig
from danswer.llm.answering.prompts.build import AnswerPromptBuilder
from danswer.llm.interfaces import LLM
from danswer.llm.models import PreviousMessage
from danswer.llm.utils import message_to_string
from danswer.prompts.chat_prompts import INTERNET_SEARCH_QUERY_REPHRASE
from danswer.prompts.constants import GENERAL_SEP_PAT

View File

@ -7,10 +7,19 @@ from pydantic import BaseModel
from sqlalchemy.orm import Session
from danswer.chat.chat_utils import llm_doc_from_inference_section
from danswer.chat.llm_response_handler import LLMCall
from danswer.chat.models import AnswerStyleConfig
from danswer.chat.models import ContextualPruningConfig
from danswer.chat.models import DanswerContext
from danswer.chat.models import DanswerContexts
from danswer.chat.models import DocumentPruningConfig
from danswer.chat.models import LlmDoc
from danswer.chat.models import PromptConfig
from danswer.chat.models import SectionRelevancePiece
from danswer.chat.prompt_builder.build import AnswerPromptBuilder
from danswer.chat.prompt_builder.citations_prompt import compute_max_llm_input_tokens
from danswer.chat.prune_and_merge import prune_and_merge_sections
from danswer.chat.prune_and_merge import prune_sections
from danswer.configs.chat_configs import CONTEXT_CHUNKS_ABOVE
from danswer.configs.chat_configs import CONTEXT_CHUNKS_BELOW
from danswer.configs.model_configs import GEN_AI_MODEL_FALLBACK_MAX_TOKENS
@ -25,17 +34,8 @@ from danswer.context.search.models import SearchRequest
from danswer.context.search.pipeline import SearchPipeline
from danswer.db.models import Persona
from danswer.db.models import User
from danswer.llm.answering.llm_response_handler import LLMCall
from danswer.llm.answering.models import AnswerStyleConfig
from danswer.llm.answering.models import ContextualPruningConfig
from danswer.llm.answering.models import DocumentPruningConfig
from danswer.llm.answering.models import PreviousMessage
from danswer.llm.answering.models import PromptConfig
from danswer.llm.answering.prompts.build import AnswerPromptBuilder
from danswer.llm.answering.prompts.citations_prompt import compute_max_llm_input_tokens
from danswer.llm.answering.prune_and_merge import prune_and_merge_sections
from danswer.llm.answering.prune_and_merge import prune_sections
from danswer.llm.interfaces import LLM
from danswer.llm.models import PreviousMessage
from danswer.secondary_llm_flows.choose_search import check_if_need_search
from danswer.secondary_llm_flows.query_expansion import history_based_query_rephrase
from danswer.tools.message import ToolCallSummary

View File

@ -2,15 +2,15 @@ from typing import cast
from langchain_core.messages import HumanMessage
from danswer.chat.models import AnswerStyleConfig
from danswer.chat.models import LlmDoc
from danswer.llm.answering.models import AnswerStyleConfig
from danswer.llm.answering.models import PromptConfig
from danswer.llm.answering.prompts.build import AnswerPromptBuilder
from danswer.llm.answering.prompts.citations_prompt import (
from danswer.chat.models import PromptConfig
from danswer.chat.prompt_builder.build import AnswerPromptBuilder
from danswer.chat.prompt_builder.citations_prompt import (
build_citations_system_message,
)
from danswer.llm.answering.prompts.citations_prompt import build_citations_user_message
from danswer.llm.answering.prompts.quotes_prompt import build_quotes_user_message
from danswer.chat.prompt_builder.citations_prompt import build_citations_user_message
from danswer.chat.prompt_builder.quotes_prompt import build_quotes_user_message
from danswer.tools.message import ToolCallSummary
from danswer.tools.models import ToolResponse

View File

@ -2,8 +2,8 @@ from collections.abc import Callable
from collections.abc import Generator
from typing import Any
from danswer.llm.answering.models import PreviousMessage
from danswer.llm.interfaces import LLM
from danswer.llm.models import PreviousMessage
from danswer.tools.models import ToolCallFinalResult
from danswer.tools.models import ToolCallKickoff
from danswer.tools.models import ToolResponse

View File

@ -3,8 +3,8 @@ from typing import Any
from danswer.chat.chat_utils import combine_message_chain
from danswer.configs.model_configs import GEN_AI_HISTORY_CUTOFF
from danswer.llm.answering.models import PreviousMessage
from danswer.llm.interfaces import LLM
from danswer.llm.models import PreviousMessage
from danswer.llm.utils import message_to_string
from danswer.prompts.constants import GENERAL_SEP_PAT
from danswer.tools.tool import Tool

View File

@ -5,12 +5,12 @@ from unittest.mock import MagicMock
import pytest
from langchain_core.messages import SystemMessage
from danswer.chat.models import AnswerStyleConfig
from danswer.chat.models import CitationConfig
from danswer.chat.models import LlmDoc
from danswer.chat.models import PromptConfig
from danswer.chat.prompt_builder.build import AnswerPromptBuilder
from danswer.configs.constants import DocumentSource
from danswer.llm.answering.models import AnswerStyleConfig
from danswer.llm.answering.models import CitationConfig
from danswer.llm.answering.models import PromptConfig
from danswer.llm.answering.prompts.build import AnswerPromptBuilder
from danswer.llm.interfaces import LLMConfig
from danswer.tools.models import ToolResponse
from danswer.tools.tool_implementations.search.search_tool import SearchTool

View File

@ -5,11 +5,9 @@ import pytest
from danswer.chat.models import CitationInfo
from danswer.chat.models import DanswerAnswerPiece
from danswer.chat.models import LlmDoc
from danswer.chat.stream_processing.citation_processing import CitationProcessor
from danswer.chat.stream_processing.utils import DocumentIdOrderMapping
from danswer.configs.constants import DocumentSource
from danswer.llm.answering.stream_processing.citation_processing import (
CitationProcessor,
)
from danswer.llm.answering.stream_processing.utils import DocumentIdOrderMapping
"""

View File

@ -2,14 +2,10 @@ import textwrap
import pytest
from danswer.chat.stream_processing.quotes_processing import match_quotes_to_docs
from danswer.chat.stream_processing.quotes_processing import separate_answer_quotes
from danswer.configs.constants import DocumentSource
from danswer.context.search.models import InferenceChunk
from danswer.llm.answering.stream_processing.quotes_processing import (
match_quotes_to_docs,
)
from danswer.llm.answering.stream_processing.quotes_processing import (
separate_answer_quotes,
)
def test_passed_in_quotes() -> None:

View File

@ -11,21 +11,21 @@ from langchain_core.messages import SystemMessage
from langchain_core.messages import ToolCall
from langchain_core.messages import ToolCallChunk
from danswer.chat.answer import Answer
from danswer.chat.models import AnswerStyleConfig
from danswer.chat.models import CitationInfo
from danswer.chat.models import DanswerAnswerPiece
from danswer.chat.models import LlmDoc
from danswer.chat.models import PromptConfig
from danswer.chat.models import StreamStopInfo
from danswer.chat.models import StreamStopReason
from danswer.llm.answering.answer import Answer
from danswer.llm.answering.models import AnswerStyleConfig
from danswer.llm.answering.models import PromptConfig
from danswer.llm.interfaces import LLM
from danswer.tools.force import ForceUseTool
from danswer.tools.models import ToolCallFinalResult
from danswer.tools.models import ToolCallKickoff
from danswer.tools.models import ToolResponse
from tests.unit.danswer.llm.answering.conftest import DEFAULT_SEARCH_ARGS
from tests.unit.danswer.llm.answering.conftest import QUERY
from tests.unit.danswer.chat.conftest import DEFAULT_SEARCH_ARGS
from tests.unit.danswer.chat.conftest import QUERY
@pytest.fixture

View File

@ -1,9 +1,9 @@
import pytest
from danswer.chat.prune_and_merge import _merge_sections
from danswer.configs.constants import DocumentSource
from danswer.context.search.models import InferenceChunk
from danswer.context.search.models import InferenceSection
from danswer.llm.answering.prune_and_merge import _merge_sections
# This large test accounts for all of the following:

View File

@ -5,10 +5,10 @@ from unittest.mock import Mock
import pytest
from pytest_mock import MockerFixture
from danswer.llm.answering.answer import Answer
from danswer.llm.answering.answer import AnswerStream
from danswer.llm.answering.models import AnswerStyleConfig
from danswer.llm.answering.models import PromptConfig
from danswer.chat.answer import Answer
from danswer.chat.answer import AnswerStream
from danswer.chat.models import AnswerStyleConfig
from danswer.chat.models import PromptConfig
from danswer.tools.force import ForceUseTool
from danswer.tools.tool_implementations.search.search_tool import SearchTool
from tests.regression.answer_quality.run_qa import _process_and_write_query_results