chatpage now checks for llm override for image uploads

This commit is contained in:
hagen-danswer 2024-06-09 17:05:41 -07:00
parent 64a042b94d
commit 4d0794f4f5
3 changed files with 21 additions and 6 deletions

View File

@ -30,9 +30,9 @@ OPEN_AI_MODEL_NAMES = [
"gpt-4-turbo-preview",
"gpt-4-1106-preview",
"gpt-4-vision-preview",
"gpt-4-32k",
# "gpt-4-32k", # not EOL but still doesnt work
"gpt-4-0613",
"gpt-4-32k-0613",
# "gpt-4-32k-0613", # not EOL but still doesnt work
"gpt-4-0314",
"gpt-4-32k-0314",
"gpt-3.5-turbo",
@ -51,8 +51,16 @@ BEDROCK_MODEL_NAMES = [model for model in litellm.bedrock_models if "/" not in m
::-1
]
IGNORABLE_ANTHROPIC_MODELS = [
"claude-2",
"claude-instant-1",
]
ANTHROPIC_PROVIDER_NAME = "anthropic"
ANTHROPIC_MODEL_NAMES = [model for model in litellm.anthropic_models][::-1]
ANTHROPIC_MODEL_NAMES = [
model
for model in litellm.anthropic_models
if model not in IGNORABLE_ANTHROPIC_MODELS
][::-1]
AZURE_PROVIDER_NAME = "azure"
@ -73,7 +81,7 @@ def fetch_available_well_known_llms() -> list[WellKnownLLMProviderDescriptor]:
api_base_required=False,
api_version_required=False,
custom_config_keys=[],
llm_names=fetch_models_for_provider("openai"),
llm_names=fetch_models_for_provider(OPENAI_PROVIDER_NAME),
default_model="gpt-4",
default_fast_model="gpt-3.5-turbo",
),

View File

@ -773,7 +773,7 @@ export function ChatPage({
const handleImageUpload = (acceptedFiles: File[]) => {
const llmAcceptsImages = checkLLMSupportsImageInput(
...getFinalLLM(llmProviders, livePersona)
...getFinalLLM(llmProviders, livePersona, llmOverrideManager.llmOverride)
);
const imageFiles = acceptedFiles.filter((file) =>
file.type.startsWith("image/")

View File

@ -1,9 +1,11 @@
import { Persona } from "@/app/admin/assistants/interfaces";
import { LLMProviderDescriptor } from "@/app/admin/models/llm/interfaces";
import { LlmOverride } from "@/lib/hooks";
export function getFinalLLM(
llmProviders: LLMProviderDescriptor[],
persona: Persona | null
persona: Persona | null,
llmOverride: LlmOverride | null
): [string, string] {
const defaultProvider = llmProviders.find(
(llmProvider) => llmProvider.is_default_provider
@ -17,6 +19,11 @@ export function getFinalLLM(
model = persona.llm_model_version_override || model;
}
if (llmOverride) {
provider = llmOverride.provider || provider;
model = llmOverride.modelName || model;
}
return [provider, model];
}