Add verbose error messages + robustify assistant switching (#2144)

* add verbose error messages + robustify assistant switching and chat sessions

* fix typing

* cleaner errors + add stack trace
This commit is contained in:
pablodanswer 2024-08-15 14:05:04 -07:00 committed by GitHub
parent 9fa4280f96
commit 33fed955d9
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
8 changed files with 175 additions and 64 deletions

View File

@ -78,6 +78,7 @@ class CitationInfo(BaseModel):
class StreamingError(BaseModel):
error: str
stack_trace: str | None = None
class DanswerQuote(BaseModel):

View File

@ -51,6 +51,7 @@ from danswer.llm.exceptions import GenAIDisabledException
from danswer.llm.factory import get_llms_for_persona
from danswer.llm.factory import get_main_llm_from_tuple
from danswer.llm.interfaces import LLMConfig
from danswer.llm.utils import litellm_exception_to_error_msg
from danswer.natural_language_processing.utils import get_tokenizer
from danswer.search.enums import LLMEvaluationType
from danswer.search.enums import OptionalSearchSetting
@ -691,31 +692,14 @@ def stream_chat_message_objects(
if isinstance(packet, ToolCallFinalResult):
tool_result = packet
yield cast(ChatPacket, packet)
except Exception as e:
error_msg = str(e)
logger.exception(f"Failed to process chat message: {error_msg}")
if "Illegal header value b'Bearer '" in error_msg:
error_msg = (
f"Authentication error: Invalid or empty API key provided for '{llm.config.model_provider}'. "
"Please check your API key configuration."
)
elif (
"Invalid leading whitespace, reserved character(s), or return character(s) in header value"
in error_msg
):
error_msg = (
f"Authentication error: Invalid API key format for '{llm.config.model_provider}'. "
"Please ensure your API key does not contain leading/trailing whitespace or invalid characters."
)
elif llm.config.api_key and llm.config.api_key.lower() in error_msg.lower():
error_msg = f"LLM failed to respond. Invalid API key error from '{llm.config.model_provider}'."
else:
error_msg = "An unexpected error occurred while processing your request. Please try again later."
yield StreamingError(error=error_msg)
client_error_msg = litellm_exception_to_error_msg(e, llm)
if llm.config.api_key and len(llm.config.api_key) > 2:
error_msg = error_msg.replace(llm.config.api_key, "[REDACTED_API_KEY]")
yield StreamingError(error=client_error_msg, stack_trace=error_msg)
db_session.rollback()
return

View File

@ -16,6 +16,18 @@ from langchain.schema.messages import AIMessage
from langchain.schema.messages import BaseMessage
from langchain.schema.messages import HumanMessage
from langchain.schema.messages import SystemMessage
from litellm.exceptions import APIConnectionError # type: ignore
from litellm.exceptions import APIError # type: ignore
from litellm.exceptions import AuthenticationError # type: ignore
from litellm.exceptions import BadRequestError # type: ignore
from litellm.exceptions import BudgetExceededError # type: ignore
from litellm.exceptions import ContentPolicyViolationError # type: ignore
from litellm.exceptions import ContextWindowExceededError # type: ignore
from litellm.exceptions import NotFoundError # type: ignore
from litellm.exceptions import PermissionDeniedError # type: ignore
from litellm.exceptions import RateLimitError # type: ignore
from litellm.exceptions import Timeout # type: ignore
from litellm.exceptions import UnprocessableEntityError # type: ignore
from danswer.configs.constants import MessageType
from danswer.configs.model_configs import GEN_AI_MAX_OUTPUT_TOKENS
@ -29,13 +41,64 @@ from danswer.prompts.constants import CODE_BLOCK_PAT
from danswer.utils.logger import setup_logger
from shared_configs.configs import LOG_LEVEL
if TYPE_CHECKING:
from danswer.llm.answering.models import PreviousMessage
logger = setup_logger()
def litellm_exception_to_error_msg(e: Exception, llm: LLM) -> str:
error_msg = str(e)
if isinstance(e, BadRequestError):
error_msg = "Bad request: The server couldn't process your request. Please check your input."
elif isinstance(e, AuthenticationError):
error_msg = "Authentication failed: Please check your API key and credentials."
elif isinstance(e, PermissionDeniedError):
error_msg = (
"Permission denied: You don't have the necessary permissions for this operation."
"Ensure you have access to this model."
)
elif isinstance(e, NotFoundError):
error_msg = "Resource not found: The requested resource doesn't exist."
elif isinstance(e, UnprocessableEntityError):
error_msg = "Unprocessable entity: The server couldn't process your request due to semantic errors."
elif isinstance(e, RateLimitError):
error_msg = (
"Rate limit exceeded: Please slow down your requests and try again later."
)
elif isinstance(e, ContextWindowExceededError):
error_msg = (
"Context window exceeded: Your input is too long for the model to process."
)
if llm is not None:
try:
max_context = get_max_input_tokens(
model_name=llm.config.model_name,
model_provider=llm.config.model_provider,
)
error_msg += f"Your invoked model ({llm.config.model_name}) has a maximum context size of {max_context}"
except Exception:
logger.warning(
"Unable to get maximum input token for LiteLLM excpetion handling"
)
elif isinstance(e, ContentPolicyViolationError):
error_msg = "Content policy violation: Your request violates the content policy. Please revise your input."
elif isinstance(e, APIConnectionError):
error_msg = "API connection error: Failed to connect to the API. Please check your internet connection."
elif isinstance(e, BudgetExceededError):
error_msg = (
"Budget exceeded: You've exceeded your allocated budget for API usage."
)
elif isinstance(e, Timeout):
error_msg = "Request timed out: The operation took too long to complete. Please try again."
elif isinstance(e, APIError):
error_msg = f"API error: An error occurred while communicating with the API. Details: {str(e)}"
else:
error_msg = "An unexpected error occurred while processing your request. Please try again later."
return error_msg
def translate_danswer_msg_to_langchain(
msg: Union[ChatMessage, "PreviousMessage"],
) -> BaseMessage:

View File

@ -19,6 +19,7 @@ import { localizeAndPrettify } from "@/lib/time";
import { getDocsProcessedPerMinute } from "@/lib/indexAttempt";
import { Modal } from "@/components/Modal";
import { CheckmarkIcon, CopyIcon } from "@/components/icons/icons";
import ExceptionTraceModal from "@/components/modals/ExceptionTraceModal";
const NUM_IN_PAGE = 8;
@ -36,43 +37,10 @@ export function IndexingAttemptsTable({ ccPair }: { ccPair: CCPairFullInfo }) {
<>
{indexAttemptToDisplayTraceFor &&
indexAttemptToDisplayTraceFor.full_exception_trace && (
<Modal
width="w-4/6"
className="h-5/6 overflow-y-hidden flex flex-col"
title="Full Exception Trace"
<ExceptionTraceModal
onOutsideClick={() => setIndexAttemptTracePopupId(null)}
>
<div className="overflow-y-auto mb-6">
<div className="mb-6">
{!copyClicked ? (
<div
onClick={() => {
navigator.clipboard.writeText(
indexAttemptToDisplayTraceFor.full_exception_trace!
);
setCopyClicked(true);
setTimeout(() => setCopyClicked(false), 2000);
}}
className="flex w-fit cursor-pointer hover:bg-hover-light p-2 border-border border rounded"
>
Copy full trace
<CopyIcon className="ml-2 my-auto" />
</div>
) : (
<div className="flex w-fit hover:bg-hover-light p-2 border-border border rounded cursor-default">
Copied to clipboard
<CheckmarkIcon
className="my-auto ml-2 flex flex-shrink-0 text-success"
size={16}
/>
</div>
)}
</div>
<div className="whitespace-pre-wrap">
{indexAttemptToDisplayTraceFor.full_exception_trace}
</div>
</div>
</Modal>
exceptionTrace={indexAttemptToDisplayTraceFor.full_exception_trace!}
/>
)}
<Table>

View File

@ -85,6 +85,7 @@ import { SetDefaultModelModal } from "./modal/SetDefaultModelModal";
import { DeleteChatModal } from "./modal/DeleteChatModal";
import remarkGfm from "remark-gfm";
import { MinimalMarkdown } from "@/components/chat_search/MinimalMarkdown";
import ExceptionTraceModal from "@/components/modals/ExceptionTraceModal";
const TEMP_USER_MESSAGE_ID = -1;
const TEMP_ASSISTANT_MESSAGE_ID = -2;
@ -118,6 +119,8 @@ export function ChatPage({
// chat session
const existingChatIdRaw = searchParams.get("chatId");
const currentPersonaId = searchParams.get(SEARCH_PARAM_NAMES.PERSONA_ID);
const existingChatSessionId = existingChatIdRaw
? parseInt(existingChatIdRaw)
: null;
@ -181,10 +184,19 @@ export function ChatPage({
defaultTemperature
);
const liveAssistant =
selectedAssistant || filteredAssistants[0] || availableAssistants[0];
const [alternativeAssistant, setAlternativeAssistant] =
useState<Persona | null>(null);
const liveAssistant =
alternativeAssistant ||
selectedAssistant ||
filteredAssistants[0] ||
availableAssistants[0];
useEffect(() => {
if (!loadedIdSessionRef.current && !currentPersonaId) {
return;
}
const personaDefault = getLLMProviderOverrideForPersona(
liveAssistant,
llmProviders
@ -200,8 +212,6 @@ export function ChatPage({
}, [liveAssistant]);
// this is for "@"ing assistants
const [alternativeAssistant, setAlternativeAssistant] =
useState<Persona | null>(null);
// this is used to track which assistant is being used to generate the current message
// for example, this would come into play when:
@ -816,6 +826,8 @@ export function ChatPage({
let documents: DanswerDocument[] = selectedDocuments;
let aiMessageImages: FileDescriptor[] | null = null;
let error: string | null = null;
let stackTrace: string | null = null;
let finalMessage: BackendMessage | null = null;
let toolCalls: ToolCallMetadata[] = [];
@ -887,6 +899,7 @@ export function ChatPage({
if (!stack.isEmpty()) {
const packet = stack.nextPacket();
console.log(packet);
if (packet) {
if (Object.hasOwn(packet, "answer_piece")) {
answer += (packet as AnswerPiecePacket).answer_piece;
@ -918,6 +931,7 @@ export function ChatPage({
);
} else if (Object.hasOwn(packet, "error")) {
error = (packet as StreamingError).error;
stackTrace = (packet as StreamingError).stack_trace;
} else if (Object.hasOwn(packet, "message_id")) {
finalMessage = packet as BackendMessage;
}
@ -950,6 +964,7 @@ export function ChatPage({
toolCalls: finalMessage?.tool_calls || toolCalls,
parentMessageId: newUserMessageId,
alternateAssistantID: alternativeAssistant?.id,
stackTrace: stackTrace,
},
]);
}
@ -1172,6 +1187,9 @@ export function ChatPage({
liveAssistant
);
});
const [stackTraceModalContent, setStackTraceModalContent] = useState<
string | null
>(null);
const innerSidebarElementRef = useRef<HTMLDivElement>(null);
const [settingsToggled, setSettingsToggled] = useState(false);
@ -1264,6 +1282,13 @@ export function ChatPage({
/>
)}
{stackTraceModalContent && (
<ExceptionTraceModal
onOutsideClick={() => setStackTraceModalContent(null)}
exceptionTrace={stackTraceModalContent}
/>
)}
{sharedChatSession && (
<ShareChatSessionModal
chatSessionId={sharedChatSession.id}
@ -1615,6 +1640,18 @@ export function ChatPage({
content={
<p className="text-red-700 text-sm my-auto">
{message.message}
{message.stackTrace && (
<span
onClick={() =>
setStackTraceModalContent(
message.stackTrace!
)
}
className="ml-2 cursor-pointer underline"
>
Show stack trace.
</span>
)}
</p>
}
/>

View File

@ -87,6 +87,7 @@ export interface Message {
childrenMessageIds?: number[];
latestChildMessageId?: number | null;
alternateAssistantID?: number | null;
stackTrace?: string | null;
}
export interface BackendChatSession {
@ -128,4 +129,5 @@ export interface ImageGenerationDisplay {
export interface StreamingError {
error: string;
stack_trace: string;
}

View File

@ -8,7 +8,14 @@ import {
FiGlobe,
} from "react-icons/fi";
import { FeedbackType } from "../types";
import { useContext, useEffect, useRef, useState } from "react";
import {
Dispatch,
SetStateAction,
useContext,
useEffect,
useRef,
useState,
} from "react";
import ReactMarkdown from "react-markdown";
import {
DanswerDocument,
@ -52,6 +59,7 @@ import { useMouseTracking } from "./hooks";
import { InternetSearchIcon } from "@/components/InternetSearchIcon";
import { SettingsContext } from "@/components/settings/SettingsProvider";
import GeneratingImageDisplay from "../tools/GeneratingImageDisplay";
import ExceptionTraceModal from "@/components/modals/ExceptionTraceModal";
const TOOLS_WITH_CUSTOM_HANDLING = [
SEARCH_TOOL_NAME,
@ -432,7 +440,6 @@ export const AIMessage = ({
) : isComplete ? null : (
<></>
)}
{isComplete && docs && docs.length > 0 && (
<div className="mt-2 -mx-8 w-full mb-4 flex relative">
<div className="w-full">

View File

@ -0,0 +1,49 @@
import { useState } from "react";
import { Modal } from "../Modal";
import { CheckmarkIcon, CopyIcon } from "../icons/icons";
export default function ExceptionTraceModal({
onOutsideClick,
exceptionTrace,
}: {
onOutsideClick: () => void;
exceptionTrace: string;
}) {
const [copyClicked, setCopyClicked] = useState(false);
return (
<Modal
width="w-4/6"
className="h-5/6 overflow-y-hidden flex flex-col"
title="Full Exception Trace"
onOutsideClick={onOutsideClick}
>
<div className="overflow-y-auto mb-6">
<div className="mb-6">
{!copyClicked ? (
<div
onClick={() => {
navigator.clipboard.writeText(exceptionTrace!);
setCopyClicked(true);
setTimeout(() => setCopyClicked(false), 2000);
}}
className="flex w-fit cursor-pointer hover:bg-hover-light p-2 border-border border rounded"
>
Copy full trace
<CopyIcon className="ml-2 my-auto" />
</div>
) : (
<div className="flex w-fit hover:bg-hover-light p-2 border-border border rounded cursor-default">
Copied to clipboard
<CheckmarkIcon
className="my-auto ml-2 flex flex-shrink-0 text-success"
size={16}
/>
</div>
)}
</div>
<div className="whitespace-pre-wrap">{exceptionTrace}</div>
</div>
</Modal>
);
}