Add support for o1 (#2538)

* add o1 support + bump litellm/openai

* ports

* update exception message for testing
This commit is contained in:
pablodanswer 2024-09-22 16:16:28 -07:00 committed by GitHub
parent 014ba9e220
commit 45f67368a2
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
6 changed files with 17 additions and 7 deletions

View File

@ -24,6 +24,8 @@ class WellKnownLLMProviderDescriptor(BaseModel):
OPENAI_PROVIDER_NAME = "openai"
OPEN_AI_MODEL_NAMES = [
"o1-mini",
"o1-preview",
"gpt-4",
"gpt-4o",
"gpt-4o-mini",

View File

@ -47,7 +47,9 @@ if TYPE_CHECKING:
logger = setup_logger()
def litellm_exception_to_error_msg(e: Exception, llm: LLM) -> str:
def litellm_exception_to_error_msg(
e: Exception, llm: LLM, fallback_to_error_msg: bool = False
) -> str:
error_msg = str(e)
if isinstance(e, BadRequestError):
@ -94,7 +96,7 @@ def litellm_exception_to_error_msg(e: Exception, llm: LLM) -> str:
error_msg = "Request timed out: The operation took too long to complete. Please try again."
elif isinstance(e, APIError):
error_msg = f"API error: An error occurred while communicating with the API. Details: {str(e)}"
else:
elif not fallback_to_error_msg:
error_msg = "An unexpected error occurred while processing your request. Please try again later."
return error_msg

View File

@ -18,6 +18,7 @@ from danswer.llm.factory import get_default_llms
from danswer.llm.factory import get_llm
from danswer.llm.llm_provider_options import fetch_available_well_known_llms
from danswer.llm.llm_provider_options import WellKnownLLMProviderDescriptor
from danswer.llm.utils import litellm_exception_to_error_msg
from danswer.llm.utils import test_llm
from danswer.server.manage.llm.models import FullLLMProvider
from danswer.server.manage.llm.models import LLMProviderDescriptor
@ -78,7 +79,10 @@ def test_llm_configuration(
)
if error:
raise HTTPException(status_code=400, detail=error)
client_error_msg = litellm_exception_to_error_msg(
error, llm, fallback_to_error_msg=True
)
raise HTTPException(status_code=400, detail=client_error_msg)
@admin_router.post("/test/default")

View File

@ -28,14 +28,14 @@ jsonref==1.1.0
langchain==0.1.17
langchain-core==0.1.50
langchain-text-splitters==0.0.1
litellm==1.43.18
litellm==1.47.1
llama-index==0.9.45
Mako==1.2.4
msal==1.28.0
nltk==3.8.1
Office365-REST-Python-Client==2.5.9
oauthlib==3.2.2
openai==1.41.1
openai==1.47.0
openpyxl==3.1.2
playwright==1.41.2
psutil==5.9.5

View File

@ -3,7 +3,7 @@ einops==0.8.0
fastapi==0.109.2
google-cloud-aiplatform==1.58.0
numpy==1.26.4
openai==1.41.1
openai==1.47.0
pydantic==2.8.2
retry==0.9.2
safetensors==0.4.2

View File

@ -230,6 +230,8 @@ export const useUserGroups = (): {
const MODEL_DISPLAY_NAMES: { [key: string]: string } = {
// OpenAI models
"o1-mini": "O1 Mini",
"o1-preview": "O1 Preview",
"gpt-4": "GPT 4",
"gpt-4o": "GPT 4o",
"gpt-4o-2024-08-06": "GPT 4o (Structured Outputs)",
@ -292,7 +294,7 @@ export function getDisplayNameForModel(modelName: string): string {
}
export const defaultModelsByProvider: { [name: string]: string[] } = {
openai: ["gpt-4", "gpt-4o", "gpt-4o-mini"],
openai: ["gpt-4", "gpt-4o", "gpt-4o-mini", "o1-mini", "o1-preview"],
bedrock: [
"meta.llama3-1-70b-instruct-v1:0",
"meta.llama3-1-8b-instruct-v1:0",