From 5296f487a840b2b9ffc28ed9b45d223a32359973 Mon Sep 17 00:00:00 2001 From: Jeffrey Morgan Date: Thu, 13 Feb 2025 22:37:59 -0800 Subject: [PATCH] llm: attempt to evaluate symlinks, but do not fail (#9089) provides a better approach to #9088 that will attempt to evaluate symlinks (important for macOS where 'ollama' is often a symlink), but use the result of os.Executable() as a fallback in scenarios where filepath.EvalSymlinks fails due to permission erorrs or other issues --- discover/path.go | 4 ++++ llm/server.go | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/discover/path.go b/discover/path.go index 23aa8110d..8a20d8c21 100644 --- a/discover/path.go +++ b/discover/path.go @@ -19,6 +19,10 @@ var LibOllamaPath string = func() string { return "" } + if eval, err := filepath.EvalSymlinks(exe); err == nil { + exe = eval + } + var libPath string switch runtime.GOOS { case "windows": diff --git a/llm/server.go b/llm/server.go index f88963060..fd027a535 100644 --- a/llm/server.go +++ b/llm/server.go @@ -320,6 +320,10 @@ func NewLlamaServer(gpus discover.GpuInfoList, model string, f *ggml.GGML, adapt return nil, fmt.Errorf("unable to lookup executable path: %w", err) } + if eval, err := filepath.EvalSymlinks(exe); err == nil { + exe = eval + } + // TODO - once fully switched to the Go runner, load the model here for tokenize/detokenize cgo access s := &llmServer{ port: port,