diff --git a/backend/danswer/chat/models.py b/backend/danswer/chat/models.py index 967648011..2902efe89 100644 --- a/backend/danswer/chat/models.py +++ b/backend/danswer/chat/models.py @@ -78,6 +78,7 @@ class CitationInfo(BaseModel): class StreamingError(BaseModel): error: str + stack_trace: str | None = None class DanswerQuote(BaseModel): diff --git a/backend/danswer/chat/process_message.py b/backend/danswer/chat/process_message.py index 2c3a0a332..1471d8fc3 100644 --- a/backend/danswer/chat/process_message.py +++ b/backend/danswer/chat/process_message.py @@ -51,6 +51,7 @@ from danswer.llm.exceptions import GenAIDisabledException from danswer.llm.factory import get_llms_for_persona from danswer.llm.factory import get_main_llm_from_tuple from danswer.llm.interfaces import LLMConfig +from danswer.llm.utils import litellm_exception_to_error_msg from danswer.natural_language_processing.utils import get_tokenizer from danswer.search.enums import LLMEvaluationType from danswer.search.enums import OptionalSearchSetting @@ -691,31 +692,14 @@ def stream_chat_message_objects( if isinstance(packet, ToolCallFinalResult): tool_result = packet yield cast(ChatPacket, packet) - except Exception as e: error_msg = str(e) - logger.exception(f"Failed to process chat message: {error_msg}") - if "Illegal header value b'Bearer '" in error_msg: - error_msg = ( - f"Authentication error: Invalid or empty API key provided for '{llm.config.model_provider}'. " - "Please check your API key configuration." - ) - elif ( - "Invalid leading whitespace, reserved character(s), or return character(s) in header value" - in error_msg - ): - error_msg = ( - f"Authentication error: Invalid API key format for '{llm.config.model_provider}'. " - "Please ensure your API key does not contain leading/trailing whitespace or invalid characters." - ) - elif llm.config.api_key and llm.config.api_key.lower() in error_msg.lower(): - error_msg = f"LLM failed to respond. Invalid API key error from '{llm.config.model_provider}'." - else: - error_msg = "An unexpected error occurred while processing your request. Please try again later." - - yield StreamingError(error=error_msg) + client_error_msg = litellm_exception_to_error_msg(e, llm) + if llm.config.api_key and len(llm.config.api_key) > 2: + error_msg = error_msg.replace(llm.config.api_key, "[REDACTED_API_KEY]") + yield StreamingError(error=client_error_msg, stack_trace=error_msg) db_session.rollback() return diff --git a/backend/danswer/llm/utils.py b/backend/danswer/llm/utils.py index 2a464d61c..73782d391 100644 --- a/backend/danswer/llm/utils.py +++ b/backend/danswer/llm/utils.py @@ -16,6 +16,18 @@ from langchain.schema.messages import AIMessage from langchain.schema.messages import BaseMessage from langchain.schema.messages import HumanMessage from langchain.schema.messages import SystemMessage +from litellm.exceptions import APIConnectionError # type: ignore +from litellm.exceptions import APIError # type: ignore +from litellm.exceptions import AuthenticationError # type: ignore +from litellm.exceptions import BadRequestError # type: ignore +from litellm.exceptions import BudgetExceededError # type: ignore +from litellm.exceptions import ContentPolicyViolationError # type: ignore +from litellm.exceptions import ContextWindowExceededError # type: ignore +from litellm.exceptions import NotFoundError # type: ignore +from litellm.exceptions import PermissionDeniedError # type: ignore +from litellm.exceptions import RateLimitError # type: ignore +from litellm.exceptions import Timeout # type: ignore +from litellm.exceptions import UnprocessableEntityError # type: ignore from danswer.configs.constants import MessageType from danswer.configs.model_configs import GEN_AI_MAX_OUTPUT_TOKENS @@ -29,13 +41,64 @@ from danswer.prompts.constants import CODE_BLOCK_PAT from danswer.utils.logger import setup_logger from shared_configs.configs import LOG_LEVEL - if TYPE_CHECKING: from danswer.llm.answering.models import PreviousMessage logger = setup_logger() +def litellm_exception_to_error_msg(e: Exception, llm: LLM) -> str: + error_msg = str(e) + + if isinstance(e, BadRequestError): + error_msg = "Bad request: The server couldn't process your request. Please check your input." + elif isinstance(e, AuthenticationError): + error_msg = "Authentication failed: Please check your API key and credentials." + elif isinstance(e, PermissionDeniedError): + error_msg = ( + "Permission denied: You don't have the necessary permissions for this operation." + "Ensure you have access to this model." + ) + elif isinstance(e, NotFoundError): + error_msg = "Resource not found: The requested resource doesn't exist." + elif isinstance(e, UnprocessableEntityError): + error_msg = "Unprocessable entity: The server couldn't process your request due to semantic errors." + elif isinstance(e, RateLimitError): + error_msg = ( + "Rate limit exceeded: Please slow down your requests and try again later." + ) + elif isinstance(e, ContextWindowExceededError): + error_msg = ( + "Context window exceeded: Your input is too long for the model to process." + ) + if llm is not None: + try: + max_context = get_max_input_tokens( + model_name=llm.config.model_name, + model_provider=llm.config.model_provider, + ) + error_msg += f"Your invoked model ({llm.config.model_name}) has a maximum context size of {max_context}" + except Exception: + logger.warning( + "Unable to get maximum input token for LiteLLM excpetion handling" + ) + elif isinstance(e, ContentPolicyViolationError): + error_msg = "Content policy violation: Your request violates the content policy. Please revise your input." + elif isinstance(e, APIConnectionError): + error_msg = "API connection error: Failed to connect to the API. Please check your internet connection." + elif isinstance(e, BudgetExceededError): + error_msg = ( + "Budget exceeded: You've exceeded your allocated budget for API usage." + ) + elif isinstance(e, Timeout): + error_msg = "Request timed out: The operation took too long to complete. Please try again." + elif isinstance(e, APIError): + error_msg = f"API error: An error occurred while communicating with the API. Details: {str(e)}" + else: + error_msg = "An unexpected error occurred while processing your request. Please try again later." + return error_msg + + def translate_danswer_msg_to_langchain( msg: Union[ChatMessage, "PreviousMessage"], ) -> BaseMessage: diff --git a/web/src/app/admin/connector/[ccPairId]/IndexingAttemptsTable.tsx b/web/src/app/admin/connector/[ccPairId]/IndexingAttemptsTable.tsx index 56a6c0e4f..baa833645 100644 --- a/web/src/app/admin/connector/[ccPairId]/IndexingAttemptsTable.tsx +++ b/web/src/app/admin/connector/[ccPairId]/IndexingAttemptsTable.tsx @@ -19,6 +19,7 @@ import { localizeAndPrettify } from "@/lib/time"; import { getDocsProcessedPerMinute } from "@/lib/indexAttempt"; import { Modal } from "@/components/Modal"; import { CheckmarkIcon, CopyIcon } from "@/components/icons/icons"; +import ExceptionTraceModal from "@/components/modals/ExceptionTraceModal"; const NUM_IN_PAGE = 8; @@ -36,43 +37,10 @@ export function IndexingAttemptsTable({ ccPair }: { ccPair: CCPairFullInfo }) { <> {indexAttemptToDisplayTraceFor && indexAttemptToDisplayTraceFor.full_exception_trace && ( - setIndexAttemptTracePopupId(null)} - > -
-
- {!copyClicked ? ( -
{ - navigator.clipboard.writeText( - indexAttemptToDisplayTraceFor.full_exception_trace! - ); - setCopyClicked(true); - setTimeout(() => setCopyClicked(false), 2000); - }} - className="flex w-fit cursor-pointer hover:bg-hover-light p-2 border-border border rounded" - > - Copy full trace - -
- ) : ( -
- Copied to clipboard - -
- )} -
-
- {indexAttemptToDisplayTraceFor.full_exception_trace} -
-
-
+ exceptionTrace={indexAttemptToDisplayTraceFor.full_exception_trace!} + /> )} diff --git a/web/src/app/chat/ChatPage.tsx b/web/src/app/chat/ChatPage.tsx index 90f56a9d7..0d2bb5e32 100644 --- a/web/src/app/chat/ChatPage.tsx +++ b/web/src/app/chat/ChatPage.tsx @@ -85,6 +85,7 @@ import { SetDefaultModelModal } from "./modal/SetDefaultModelModal"; import { DeleteChatModal } from "./modal/DeleteChatModal"; import remarkGfm from "remark-gfm"; import { MinimalMarkdown } from "@/components/chat_search/MinimalMarkdown"; +import ExceptionTraceModal from "@/components/modals/ExceptionTraceModal"; const TEMP_USER_MESSAGE_ID = -1; const TEMP_ASSISTANT_MESSAGE_ID = -2; @@ -118,6 +119,8 @@ export function ChatPage({ // chat session const existingChatIdRaw = searchParams.get("chatId"); + const currentPersonaId = searchParams.get(SEARCH_PARAM_NAMES.PERSONA_ID); + const existingChatSessionId = existingChatIdRaw ? parseInt(existingChatIdRaw) : null; @@ -181,10 +184,19 @@ export function ChatPage({ defaultTemperature ); - const liveAssistant = - selectedAssistant || filteredAssistants[0] || availableAssistants[0]; + const [alternativeAssistant, setAlternativeAssistant] = + useState(null); + const liveAssistant = + alternativeAssistant || + selectedAssistant || + filteredAssistants[0] || + availableAssistants[0]; useEffect(() => { + if (!loadedIdSessionRef.current && !currentPersonaId) { + return; + } + const personaDefault = getLLMProviderOverrideForPersona( liveAssistant, llmProviders @@ -200,8 +212,6 @@ export function ChatPage({ }, [liveAssistant]); // this is for "@"ing assistants - const [alternativeAssistant, setAlternativeAssistant] = - useState(null); // this is used to track which assistant is being used to generate the current message // for example, this would come into play when: @@ -816,6 +826,8 @@ export function ChatPage({ let documents: DanswerDocument[] = selectedDocuments; let aiMessageImages: FileDescriptor[] | null = null; let error: string | null = null; + let stackTrace: string | null = null; + let finalMessage: BackendMessage | null = null; let toolCalls: ToolCallMetadata[] = []; @@ -887,6 +899,7 @@ export function ChatPage({ if (!stack.isEmpty()) { const packet = stack.nextPacket(); + console.log(packet); if (packet) { if (Object.hasOwn(packet, "answer_piece")) { answer += (packet as AnswerPiecePacket).answer_piece; @@ -918,6 +931,7 @@ export function ChatPage({ ); } else if (Object.hasOwn(packet, "error")) { error = (packet as StreamingError).error; + stackTrace = (packet as StreamingError).stack_trace; } else if (Object.hasOwn(packet, "message_id")) { finalMessage = packet as BackendMessage; } @@ -950,6 +964,7 @@ export function ChatPage({ toolCalls: finalMessage?.tool_calls || toolCalls, parentMessageId: newUserMessageId, alternateAssistantID: alternativeAssistant?.id, + stackTrace: stackTrace, }, ]); } @@ -1172,6 +1187,9 @@ export function ChatPage({ liveAssistant ); }); + const [stackTraceModalContent, setStackTraceModalContent] = useState< + string | null + >(null); const innerSidebarElementRef = useRef(null); const [settingsToggled, setSettingsToggled] = useState(false); @@ -1264,6 +1282,13 @@ export function ChatPage({ /> )} + {stackTraceModalContent && ( + setStackTraceModalContent(null)} + exceptionTrace={stackTraceModalContent} + /> + )} + {sharedChatSession && ( {message.message} + {message.stackTrace && ( + + setStackTraceModalContent( + message.stackTrace! + ) + } + className="ml-2 cursor-pointer underline" + > + Show stack trace. + + )}

} /> diff --git a/web/src/app/chat/interfaces.ts b/web/src/app/chat/interfaces.ts index ee918dc8d..366bc1ec7 100644 --- a/web/src/app/chat/interfaces.ts +++ b/web/src/app/chat/interfaces.ts @@ -87,6 +87,7 @@ export interface Message { childrenMessageIds?: number[]; latestChildMessageId?: number | null; alternateAssistantID?: number | null; + stackTrace?: string | null; } export interface BackendChatSession { @@ -128,4 +129,5 @@ export interface ImageGenerationDisplay { export interface StreamingError { error: string; + stack_trace: string; } diff --git a/web/src/app/chat/message/Messages.tsx b/web/src/app/chat/message/Messages.tsx index 8dcd4e27b..945bfaf0f 100644 --- a/web/src/app/chat/message/Messages.tsx +++ b/web/src/app/chat/message/Messages.tsx @@ -8,7 +8,14 @@ import { FiGlobe, } from "react-icons/fi"; import { FeedbackType } from "../types"; -import { useContext, useEffect, useRef, useState } from "react"; +import { + Dispatch, + SetStateAction, + useContext, + useEffect, + useRef, + useState, +} from "react"; import ReactMarkdown from "react-markdown"; import { DanswerDocument, @@ -52,6 +59,7 @@ import { useMouseTracking } from "./hooks"; import { InternetSearchIcon } from "@/components/InternetSearchIcon"; import { SettingsContext } from "@/components/settings/SettingsProvider"; import GeneratingImageDisplay from "../tools/GeneratingImageDisplay"; +import ExceptionTraceModal from "@/components/modals/ExceptionTraceModal"; const TOOLS_WITH_CUSTOM_HANDLING = [ SEARCH_TOOL_NAME, @@ -432,7 +440,6 @@ export const AIMessage = ({ ) : isComplete ? null : ( <> )} - {isComplete && docs && docs.length > 0 && (
diff --git a/web/src/components/modals/ExceptionTraceModal.tsx b/web/src/components/modals/ExceptionTraceModal.tsx new file mode 100644 index 000000000..f7a940a40 --- /dev/null +++ b/web/src/components/modals/ExceptionTraceModal.tsx @@ -0,0 +1,49 @@ +import { useState } from "react"; +import { Modal } from "../Modal"; +import { CheckmarkIcon, CopyIcon } from "../icons/icons"; + +export default function ExceptionTraceModal({ + onOutsideClick, + exceptionTrace, +}: { + onOutsideClick: () => void; + exceptionTrace: string; +}) { + const [copyClicked, setCopyClicked] = useState(false); + + return ( + +
+
+ {!copyClicked ? ( +
{ + navigator.clipboard.writeText(exceptionTrace!); + setCopyClicked(true); + setTimeout(() => setCopyClicked(false), 2000); + }} + className="flex w-fit cursor-pointer hover:bg-hover-light p-2 border-border border rounded" + > + Copy full trace + +
+ ) : ( +
+ Copied to clipboard + +
+ )} +
+
{exceptionTrace}
+
+
+ ); +}