Set GPT 4o as default and add O3 mini (#3899)

* quick update to models

* add reqs

* update version
This commit is contained in:
pablonyx 2025-02-04 19:06:05 -08:00 committed by GitHub
parent 8eb4320f76
commit 0ec065f1fb
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
5 changed files with 22 additions and 11 deletions

View File

@ -409,7 +409,11 @@ class DefaultMultiLLM(LLM):
# For now, we don't support parallel tool calls
# NOTE: we can't pass this in if tools are not specified
# or else OpenAI throws an error
**({"parallel_tool_calls": False} if tools else {}),
**(
{"parallel_tool_calls": False}
if tools and self.config.model_name != "o3-mini"
else {}
), # TODO: remove once LITELLM has patched
**(
{"response_format": structured_response_format}
if structured_response_format
@ -469,9 +473,7 @@ class DefaultMultiLLM(LLM):
if LOG_DANSWER_MODEL_INTERACTIONS:
self.log_model_configs()
if (
DISABLE_LITELLM_STREAMING or self.config.model_name == "o1-2024-12-17"
): # TODO: remove once litellm supports streaming
if DISABLE_LITELLM_STREAMING:
yield self.invoke(prompt, tools, tool_choice, structured_response_format)
return

View File

@ -27,6 +27,7 @@ class WellKnownLLMProviderDescriptor(BaseModel):
OPENAI_PROVIDER_NAME = "openai"
OPEN_AI_MODEL_NAMES = [
"o3-mini",
"o1-mini",
"o1-preview",
"o1-2024-12-17",
@ -91,7 +92,7 @@ def fetch_available_well_known_llms() -> list[WellKnownLLMProviderDescriptor]:
api_version_required=False,
custom_config_keys=[],
llm_names=fetch_models_for_provider(OPENAI_PROVIDER_NAME),
default_model="gpt-4",
default_model="gpt-4o",
default_fast_model="gpt-4o-mini",
),
WellKnownLLMProviderDescriptor(

View File

@ -37,7 +37,7 @@ langchainhub==0.1.21
langgraph==0.2.59
langgraph-checkpoint==2.0.5
langgraph-sdk==0.1.44
litellm==1.55.4
litellm==1.60.2
lxml==5.3.0
lxml_html_clean==0.2.2
llama-index==0.9.45
@ -46,7 +46,7 @@ msal==1.28.0
nltk==3.8.1
Office365-REST-Python-Client==2.5.9
oauthlib==3.2.2
openai==1.55.3
openai==1.61.0
openpyxl==3.1.2
playwright==1.41.2
psutil==5.9.5

View File

@ -3,7 +3,7 @@ cohere==5.6.1
fastapi==0.109.2
google-cloud-aiplatform==1.58.0
numpy==1.26.4
openai==1.55.3
openai==1.61.0
pydantic==2.8.2
retry==0.9.2
safetensors==0.4.2
@ -12,5 +12,5 @@ torch==2.2.0
transformers==4.39.2
uvicorn==0.21.1
voyageai==0.2.3
litellm==1.55.4
litellm==1.60.2
sentry-sdk[fastapi,celery,starlette]==2.14.0

View File

@ -647,9 +647,10 @@ export const useUserGroups = (): {
const MODEL_DISPLAY_NAMES: { [key: string]: string } = {
// OpenAI models
"o3-mini": "O3 Mini",
"o1-mini": "O1 Mini",
"o1-preview": "O1 Preview",
"o1-2024-12-17": "O1",
o1: "O1",
"gpt-4": "GPT 4",
"gpt-4o": "GPT 4o",
"gpt-4o-2024-08-06": "GPT 4o (Structured Outputs)",
@ -751,7 +752,14 @@ export function getDisplayNameForModel(modelName: string): string {
}
export const defaultModelsByProvider: { [name: string]: string[] } = {
openai: ["gpt-4", "gpt-4o", "gpt-4o-mini", "o1-mini", "o1-preview"],
openai: [
"gpt-4",
"gpt-4o",
"gpt-4o-mini",
"o3-mini",
"o1-mini",
"o1-preview",
],
bedrock: [
"meta.llama3-1-70b-instruct-v1:0",
"meta.llama3-1-8b-instruct-v1:0",