mirror of
https://github.com/ollama/ollama.git
synced 2025-08-27 20:49:09 +02:00
llm: Check for nil memory data before printing
We dump out our best memory estimate after we complete processing for any reason, including errors. This is helpful for finding what what stopped us in error conditions but in some cases we might not have gotten even the first result yet. Fixes #11957
This commit is contained in:
@@ -651,7 +651,9 @@ func (s *ollamaServer) Load(ctx context.Context, gpus discover.GpuInfoList, requ
|
|||||||
if !success {
|
if !success {
|
||||||
s.initModel(ctx, LoadRequest{}, LoadOperationClose)
|
s.initModel(ctx, LoadRequest{}, LoadOperationClose)
|
||||||
}
|
}
|
||||||
s.mem.Log(slog.LevelInfo)
|
if s.mem != nil {
|
||||||
|
s.mem.Log(slog.LevelInfo)
|
||||||
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
slog.Info("loading model", "model layers", s.totalLayers, "requested", s.options.NumGPU)
|
slog.Info("loading model", "model layers", s.totalLayers, "requested", s.options.NumGPU)
|
||||||
|
Reference in New Issue
Block a user