diff --git a/backend/danswer/chat/models.py b/backend/danswer/chat/models.py
index 967648011..2902efe89 100644
--- a/backend/danswer/chat/models.py
+++ b/backend/danswer/chat/models.py
@@ -78,6 +78,7 @@ class CitationInfo(BaseModel):
class StreamingError(BaseModel):
error: str
+ stack_trace: str | None = None
class DanswerQuote(BaseModel):
diff --git a/backend/danswer/chat/process_message.py b/backend/danswer/chat/process_message.py
index 2c3a0a332..1471d8fc3 100644
--- a/backend/danswer/chat/process_message.py
+++ b/backend/danswer/chat/process_message.py
@@ -51,6 +51,7 @@ from danswer.llm.exceptions import GenAIDisabledException
from danswer.llm.factory import get_llms_for_persona
from danswer.llm.factory import get_main_llm_from_tuple
from danswer.llm.interfaces import LLMConfig
+from danswer.llm.utils import litellm_exception_to_error_msg
from danswer.natural_language_processing.utils import get_tokenizer
from danswer.search.enums import LLMEvaluationType
from danswer.search.enums import OptionalSearchSetting
@@ -691,31 +692,14 @@ def stream_chat_message_objects(
if isinstance(packet, ToolCallFinalResult):
tool_result = packet
yield cast(ChatPacket, packet)
-
except Exception as e:
error_msg = str(e)
-
logger.exception(f"Failed to process chat message: {error_msg}")
- if "Illegal header value b'Bearer '" in error_msg:
- error_msg = (
- f"Authentication error: Invalid or empty API key provided for '{llm.config.model_provider}'. "
- "Please check your API key configuration."
- )
- elif (
- "Invalid leading whitespace, reserved character(s), or return character(s) in header value"
- in error_msg
- ):
- error_msg = (
- f"Authentication error: Invalid API key format for '{llm.config.model_provider}'. "
- "Please ensure your API key does not contain leading/trailing whitespace or invalid characters."
- )
- elif llm.config.api_key and llm.config.api_key.lower() in error_msg.lower():
- error_msg = f"LLM failed to respond. Invalid API key error from '{llm.config.model_provider}'."
- else:
- error_msg = "An unexpected error occurred while processing your request. Please try again later."
-
- yield StreamingError(error=error_msg)
+ client_error_msg = litellm_exception_to_error_msg(e, llm)
+ if llm.config.api_key and len(llm.config.api_key) > 2:
+ error_msg = error_msg.replace(llm.config.api_key, "[REDACTED_API_KEY]")
+ yield StreamingError(error=client_error_msg, stack_trace=error_msg)
db_session.rollback()
return
diff --git a/backend/danswer/llm/utils.py b/backend/danswer/llm/utils.py
index 2a464d61c..73782d391 100644
--- a/backend/danswer/llm/utils.py
+++ b/backend/danswer/llm/utils.py
@@ -16,6 +16,18 @@ from langchain.schema.messages import AIMessage
from langchain.schema.messages import BaseMessage
from langchain.schema.messages import HumanMessage
from langchain.schema.messages import SystemMessage
+from litellm.exceptions import APIConnectionError # type: ignore
+from litellm.exceptions import APIError # type: ignore
+from litellm.exceptions import AuthenticationError # type: ignore
+from litellm.exceptions import BadRequestError # type: ignore
+from litellm.exceptions import BudgetExceededError # type: ignore
+from litellm.exceptions import ContentPolicyViolationError # type: ignore
+from litellm.exceptions import ContextWindowExceededError # type: ignore
+from litellm.exceptions import NotFoundError # type: ignore
+from litellm.exceptions import PermissionDeniedError # type: ignore
+from litellm.exceptions import RateLimitError # type: ignore
+from litellm.exceptions import Timeout # type: ignore
+from litellm.exceptions import UnprocessableEntityError # type: ignore
from danswer.configs.constants import MessageType
from danswer.configs.model_configs import GEN_AI_MAX_OUTPUT_TOKENS
@@ -29,13 +41,64 @@ from danswer.prompts.constants import CODE_BLOCK_PAT
from danswer.utils.logger import setup_logger
from shared_configs.configs import LOG_LEVEL
-
if TYPE_CHECKING:
from danswer.llm.answering.models import PreviousMessage
logger = setup_logger()
+def litellm_exception_to_error_msg(e: Exception, llm: LLM) -> str:
+ error_msg = str(e)
+
+ if isinstance(e, BadRequestError):
+ error_msg = "Bad request: The server couldn't process your request. Please check your input."
+ elif isinstance(e, AuthenticationError):
+ error_msg = "Authentication failed: Please check your API key and credentials."
+ elif isinstance(e, PermissionDeniedError):
+ error_msg = (
+ "Permission denied: You don't have the necessary permissions for this operation."
+ "Ensure you have access to this model."
+ )
+ elif isinstance(e, NotFoundError):
+ error_msg = "Resource not found: The requested resource doesn't exist."
+ elif isinstance(e, UnprocessableEntityError):
+ error_msg = "Unprocessable entity: The server couldn't process your request due to semantic errors."
+ elif isinstance(e, RateLimitError):
+ error_msg = (
+ "Rate limit exceeded: Please slow down your requests and try again later."
+ )
+ elif isinstance(e, ContextWindowExceededError):
+ error_msg = (
+ "Context window exceeded: Your input is too long for the model to process."
+ )
+ if llm is not None:
+ try:
+ max_context = get_max_input_tokens(
+ model_name=llm.config.model_name,
+ model_provider=llm.config.model_provider,
+ )
+ error_msg += f"Your invoked model ({llm.config.model_name}) has a maximum context size of {max_context}"
+ except Exception:
+ logger.warning(
+ "Unable to get maximum input token for LiteLLM excpetion handling"
+ )
+ elif isinstance(e, ContentPolicyViolationError):
+ error_msg = "Content policy violation: Your request violates the content policy. Please revise your input."
+ elif isinstance(e, APIConnectionError):
+ error_msg = "API connection error: Failed to connect to the API. Please check your internet connection."
+ elif isinstance(e, BudgetExceededError):
+ error_msg = (
+ "Budget exceeded: You've exceeded your allocated budget for API usage."
+ )
+ elif isinstance(e, Timeout):
+ error_msg = "Request timed out: The operation took too long to complete. Please try again."
+ elif isinstance(e, APIError):
+ error_msg = f"API error: An error occurred while communicating with the API. Details: {str(e)}"
+ else:
+ error_msg = "An unexpected error occurred while processing your request. Please try again later."
+ return error_msg
+
+
def translate_danswer_msg_to_langchain(
msg: Union[ChatMessage, "PreviousMessage"],
) -> BaseMessage:
diff --git a/web/src/app/admin/connector/[ccPairId]/IndexingAttemptsTable.tsx b/web/src/app/admin/connector/[ccPairId]/IndexingAttemptsTable.tsx
index 56a6c0e4f..baa833645 100644
--- a/web/src/app/admin/connector/[ccPairId]/IndexingAttemptsTable.tsx
+++ b/web/src/app/admin/connector/[ccPairId]/IndexingAttemptsTable.tsx
@@ -19,6 +19,7 @@ import { localizeAndPrettify } from "@/lib/time";
import { getDocsProcessedPerMinute } from "@/lib/indexAttempt";
import { Modal } from "@/components/Modal";
import { CheckmarkIcon, CopyIcon } from "@/components/icons/icons";
+import ExceptionTraceModal from "@/components/modals/ExceptionTraceModal";
const NUM_IN_PAGE = 8;
@@ -36,43 +37,10 @@ export function IndexingAttemptsTable({ ccPair }: { ccPair: CCPairFullInfo }) {
<>
{indexAttemptToDisplayTraceFor &&
indexAttemptToDisplayTraceFor.full_exception_trace && (
-