Merge pull request #1601 from danswer-ai/prune-model-list

chatpage now checks for llm override for image uploads
This commit is contained in:
hagen-danswer 2024-06-09 20:17:13 -04:00 committed by GitHub
commit f18aa2368e
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
6 changed files with 24 additions and 9 deletions

View File

@ -30,9 +30,9 @@ OPEN_AI_MODEL_NAMES = [
"gpt-4-turbo-preview",
"gpt-4-1106-preview",
"gpt-4-vision-preview",
"gpt-4-32k",
# "gpt-4-32k", # not EOL but still doesnt work
"gpt-4-0613",
"gpt-4-32k-0613",
# "gpt-4-32k-0613", # not EOL but still doesnt work
"gpt-4-0314",
"gpt-4-32k-0314",
"gpt-3.5-turbo",
@ -51,8 +51,16 @@ BEDROCK_MODEL_NAMES = [model for model in litellm.bedrock_models if "/" not in m
::-1
]
IGNORABLE_ANTHROPIC_MODELS = [
"claude-2",
"claude-instant-1",
]
ANTHROPIC_PROVIDER_NAME = "anthropic"
ANTHROPIC_MODEL_NAMES = [model for model in litellm.anthropic_models][::-1]
ANTHROPIC_MODEL_NAMES = [
model
for model in litellm.anthropic_models
if model not in IGNORABLE_ANTHROPIC_MODELS
][::-1]
AZURE_PROVIDER_NAME = "azure"
@ -73,7 +81,7 @@ def fetch_available_well_known_llms() -> list[WellKnownLLMProviderDescriptor]:
api_base_required=False,
api_version_required=False,
custom_config_keys=[],
llm_names=fetch_models_for_provider("openai"),
llm_names=fetch_models_for_provider(OPENAI_PROVIDER_NAME),
default_model="gpt-4",
default_fast_model="gpt-3.5-turbo",
),

View File

@ -773,7 +773,7 @@ export function ChatPage({
const handleImageUpload = (acceptedFiles: File[]) => {
const llmAcceptsImages = checkLLMSupportsImageInput(
...getFinalLLM(llmProviders, livePersona)
...getFinalLLM(llmProviders, livePersona, llmOverrideManager.llmOverride)
);
const imageFiles = acceptedFiles.filter((file) =>
file.type.startsWith("image/")

View File

@ -56,7 +56,7 @@ export function ChatInputBar({
}, [message]);
const { llmProviders } = useChatContext();
const [_, llmName] = getFinalLLM(llmProviders, selectedAssistant);
const [_, llmName] = getFinalLLM(llmProviders, selectedAssistant, null);
return (
<div>

View File

@ -19,7 +19,7 @@ export function AssistantsTab({
llmProviders,
onSelect,
}: AssistantsTabProps) {
const [_, llmName] = getFinalLLM(llmProviders, null);
const [_, llmName] = getFinalLLM(llmProviders, null, null);
return (
<>

View File

@ -34,7 +34,7 @@ export function LlmTab({
debouncedSetTemperature(value);
};
const [_, defaultLlmName] = getFinalLLM(llmProviders, currentAssistant);
const [_, defaultLlmName] = getFinalLLM(llmProviders, currentAssistant, null);
const llmOptions: { name: string; value: string }[] = [];
const structureValue = (

View File

@ -1,9 +1,11 @@
import { Persona } from "@/app/admin/assistants/interfaces";
import { LLMProviderDescriptor } from "@/app/admin/models/llm/interfaces";
import { LlmOverride } from "@/lib/hooks";
export function getFinalLLM(
llmProviders: LLMProviderDescriptor[],
persona: Persona | null
persona: Persona | null,
llmOverride: LlmOverride | null
): [string, string] {
const defaultProvider = llmProviders.find(
(llmProvider) => llmProvider.is_default_provider
@ -17,6 +19,11 @@ export function getFinalLLM(
model = persona.llm_model_version_override || model;
}
if (llmOverride) {
provider = llmOverride.provider || provider;
model = llmOverride.modelName || model;
}
return [provider, model];
}