mirror of
https://github.com/danswer-ai/danswer.git
synced 2025-03-30 04:31:49 +02:00
Set GPT 4o as default and add O3 mini (#3899)
* quick update to models * add reqs * update version
This commit is contained in:
parent
8eb4320f76
commit
0ec065f1fb
@ -409,7 +409,11 @@ class DefaultMultiLLM(LLM):
|
|||||||
# For now, we don't support parallel tool calls
|
# For now, we don't support parallel tool calls
|
||||||
# NOTE: we can't pass this in if tools are not specified
|
# NOTE: we can't pass this in if tools are not specified
|
||||||
# or else OpenAI throws an error
|
# or else OpenAI throws an error
|
||||||
**({"parallel_tool_calls": False} if tools else {}),
|
**(
|
||||||
|
{"parallel_tool_calls": False}
|
||||||
|
if tools and self.config.model_name != "o3-mini"
|
||||||
|
else {}
|
||||||
|
), # TODO: remove once LITELLM has patched
|
||||||
**(
|
**(
|
||||||
{"response_format": structured_response_format}
|
{"response_format": structured_response_format}
|
||||||
if structured_response_format
|
if structured_response_format
|
||||||
@ -469,9 +473,7 @@ class DefaultMultiLLM(LLM):
|
|||||||
if LOG_DANSWER_MODEL_INTERACTIONS:
|
if LOG_DANSWER_MODEL_INTERACTIONS:
|
||||||
self.log_model_configs()
|
self.log_model_configs()
|
||||||
|
|
||||||
if (
|
if DISABLE_LITELLM_STREAMING:
|
||||||
DISABLE_LITELLM_STREAMING or self.config.model_name == "o1-2024-12-17"
|
|
||||||
): # TODO: remove once litellm supports streaming
|
|
||||||
yield self.invoke(prompt, tools, tool_choice, structured_response_format)
|
yield self.invoke(prompt, tools, tool_choice, structured_response_format)
|
||||||
return
|
return
|
||||||
|
|
||||||
|
@ -27,6 +27,7 @@ class WellKnownLLMProviderDescriptor(BaseModel):
|
|||||||
|
|
||||||
OPENAI_PROVIDER_NAME = "openai"
|
OPENAI_PROVIDER_NAME = "openai"
|
||||||
OPEN_AI_MODEL_NAMES = [
|
OPEN_AI_MODEL_NAMES = [
|
||||||
|
"o3-mini",
|
||||||
"o1-mini",
|
"o1-mini",
|
||||||
"o1-preview",
|
"o1-preview",
|
||||||
"o1-2024-12-17",
|
"o1-2024-12-17",
|
||||||
@ -91,7 +92,7 @@ def fetch_available_well_known_llms() -> list[WellKnownLLMProviderDescriptor]:
|
|||||||
api_version_required=False,
|
api_version_required=False,
|
||||||
custom_config_keys=[],
|
custom_config_keys=[],
|
||||||
llm_names=fetch_models_for_provider(OPENAI_PROVIDER_NAME),
|
llm_names=fetch_models_for_provider(OPENAI_PROVIDER_NAME),
|
||||||
default_model="gpt-4",
|
default_model="gpt-4o",
|
||||||
default_fast_model="gpt-4o-mini",
|
default_fast_model="gpt-4o-mini",
|
||||||
),
|
),
|
||||||
WellKnownLLMProviderDescriptor(
|
WellKnownLLMProviderDescriptor(
|
||||||
|
@ -37,7 +37,7 @@ langchainhub==0.1.21
|
|||||||
langgraph==0.2.59
|
langgraph==0.2.59
|
||||||
langgraph-checkpoint==2.0.5
|
langgraph-checkpoint==2.0.5
|
||||||
langgraph-sdk==0.1.44
|
langgraph-sdk==0.1.44
|
||||||
litellm==1.55.4
|
litellm==1.60.2
|
||||||
lxml==5.3.0
|
lxml==5.3.0
|
||||||
lxml_html_clean==0.2.2
|
lxml_html_clean==0.2.2
|
||||||
llama-index==0.9.45
|
llama-index==0.9.45
|
||||||
@ -46,7 +46,7 @@ msal==1.28.0
|
|||||||
nltk==3.8.1
|
nltk==3.8.1
|
||||||
Office365-REST-Python-Client==2.5.9
|
Office365-REST-Python-Client==2.5.9
|
||||||
oauthlib==3.2.2
|
oauthlib==3.2.2
|
||||||
openai==1.55.3
|
openai==1.61.0
|
||||||
openpyxl==3.1.2
|
openpyxl==3.1.2
|
||||||
playwright==1.41.2
|
playwright==1.41.2
|
||||||
psutil==5.9.5
|
psutil==5.9.5
|
||||||
|
@ -3,7 +3,7 @@ cohere==5.6.1
|
|||||||
fastapi==0.109.2
|
fastapi==0.109.2
|
||||||
google-cloud-aiplatform==1.58.0
|
google-cloud-aiplatform==1.58.0
|
||||||
numpy==1.26.4
|
numpy==1.26.4
|
||||||
openai==1.55.3
|
openai==1.61.0
|
||||||
pydantic==2.8.2
|
pydantic==2.8.2
|
||||||
retry==0.9.2
|
retry==0.9.2
|
||||||
safetensors==0.4.2
|
safetensors==0.4.2
|
||||||
@ -12,5 +12,5 @@ torch==2.2.0
|
|||||||
transformers==4.39.2
|
transformers==4.39.2
|
||||||
uvicorn==0.21.1
|
uvicorn==0.21.1
|
||||||
voyageai==0.2.3
|
voyageai==0.2.3
|
||||||
litellm==1.55.4
|
litellm==1.60.2
|
||||||
sentry-sdk[fastapi,celery,starlette]==2.14.0
|
sentry-sdk[fastapi,celery,starlette]==2.14.0
|
@ -647,9 +647,10 @@ export const useUserGroups = (): {
|
|||||||
|
|
||||||
const MODEL_DISPLAY_NAMES: { [key: string]: string } = {
|
const MODEL_DISPLAY_NAMES: { [key: string]: string } = {
|
||||||
// OpenAI models
|
// OpenAI models
|
||||||
|
"o3-mini": "O3 Mini",
|
||||||
"o1-mini": "O1 Mini",
|
"o1-mini": "O1 Mini",
|
||||||
"o1-preview": "O1 Preview",
|
"o1-preview": "O1 Preview",
|
||||||
"o1-2024-12-17": "O1",
|
o1: "O1",
|
||||||
"gpt-4": "GPT 4",
|
"gpt-4": "GPT 4",
|
||||||
"gpt-4o": "GPT 4o",
|
"gpt-4o": "GPT 4o",
|
||||||
"gpt-4o-2024-08-06": "GPT 4o (Structured Outputs)",
|
"gpt-4o-2024-08-06": "GPT 4o (Structured Outputs)",
|
||||||
@ -751,7 +752,14 @@ export function getDisplayNameForModel(modelName: string): string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
export const defaultModelsByProvider: { [name: string]: string[] } = {
|
export const defaultModelsByProvider: { [name: string]: string[] } = {
|
||||||
openai: ["gpt-4", "gpt-4o", "gpt-4o-mini", "o1-mini", "o1-preview"],
|
openai: [
|
||||||
|
"gpt-4",
|
||||||
|
"gpt-4o",
|
||||||
|
"gpt-4o-mini",
|
||||||
|
"o3-mini",
|
||||||
|
"o1-mini",
|
||||||
|
"o1-preview",
|
||||||
|
],
|
||||||
bedrock: [
|
bedrock: [
|
||||||
"meta.llama3-1-70b-instruct-v1:0",
|
"meta.llama3-1-70b-instruct-v1:0",
|
||||||
"meta.llama3-1-8b-instruct-v1:0",
|
"meta.llama3-1-8b-instruct-v1:0",
|
||||||
|
Loading…
x
Reference in New Issue
Block a user