ollamarunner: Provide mechanism for backends to report loading progress

This enables the runner to report progress back to the Ollama server,
both for showing status to the user and also to prevent the server
from killing the runner if it thinks things have stalled.

Most of the infrastructure was already there, this extends it to
be available to the backends.
This commit is contained in:
Jesse Gross 2025-03-20 10:35:19 -07:00 committed by Jesse Gross
parent d3e9ca3eda
commit 0ff28758b3
2 changed files with 7 additions and 0 deletions

View File

@ -60,6 +60,10 @@ type CacheConfig struct {
// BackendParams controls how the backend loads and executes models
type BackendParams struct {
// Progress is a callback function that allows reporting percentage completion
// of model loading
Progress func(float32)
// NumThreads sets the number of threads to use if running on the CPU
NumThreads int

View File

@ -783,6 +783,9 @@ func Execute(args []string) error {
}
params := ml.BackendParams{
Progress: func(progress float32) {
server.progress = progress
},
NumThreads: *threads,
NumGPULayers: *numGPULayers,
MainGPU: *mainGPU,