mirror of
https://github.com/ollama/ollama.git
synced 2025-06-04 22:19:16 +02:00
llm: auto detect models that require Ollama Engine (#1)
This commit is contained in:
parent
11bfa62796
commit
ab39e08eb9
@ -133,6 +133,10 @@ func (kv KV) Floats(key string, defaultValue ...[]float32) []float32 {
|
||||
return s
|
||||
}
|
||||
|
||||
func (kv KV) OllamaEngineRequired() bool {
|
||||
return kv.Architecture() == "gemma3"
|
||||
}
|
||||
|
||||
func keyValue[T string | uint32 | uint64 | float32 | *array | bool](kv KV, key string, defaultValue ...T) T {
|
||||
if !strings.HasPrefix(key, "tokenizer.") && !strings.HasPrefix(key, "general.") {
|
||||
key = kv.Architecture() + "." + key
|
||||
|
@ -271,7 +271,7 @@ func NewLlamaServer(gpus discover.GpuInfoList, modelPath string, f *ggml.GGML, a
|
||||
|
||||
var llamaModel *llama.Model
|
||||
var textProcessor model.TextProcessor
|
||||
if envconfig.NewEngine() {
|
||||
if envconfig.NewEngine() || f.KV().OllamaEngineRequired() {
|
||||
textProcessor, err = model.NewTextProcessor(modelPath)
|
||||
if err != nil {
|
||||
// To prepare for opt-out mode, instead of treating this as an error, we fallback to the old runner
|
||||
|
Loading…
x
Reference in New Issue
Block a user