llm: auto detect models that require Ollama Engine (#1)

This commit is contained in:
Daniel Hiltgen 2025-03-11 04:25:16 -07:00 committed by Michael Yang
parent 11bfa62796
commit ab39e08eb9
2 changed files with 5 additions and 1 deletions

View File

@ -133,6 +133,10 @@ func (kv KV) Floats(key string, defaultValue ...[]float32) []float32 {
return s
}
func (kv KV) OllamaEngineRequired() bool {
return kv.Architecture() == "gemma3"
}
func keyValue[T string | uint32 | uint64 | float32 | *array | bool](kv KV, key string, defaultValue ...T) T {
if !strings.HasPrefix(key, "tokenizer.") && !strings.HasPrefix(key, "general.") {
key = kv.Architecture() + "." + key

View File

@ -271,7 +271,7 @@ func NewLlamaServer(gpus discover.GpuInfoList, modelPath string, f *ggml.GGML, a
var llamaModel *llama.Model
var textProcessor model.TextProcessor
if envconfig.NewEngine() {
if envconfig.NewEngine() || f.KV().OllamaEngineRequired() {
textProcessor, err = model.NewTextProcessor(modelPath)
if err != nil {
// To prepare for opt-out mode, instead of treating this as an error, we fallback to the old runner