mirror of
https://github.com/ollama/ollama.git
synced 2025-04-08 11:58:07 +02:00
fix gemma, command-r layer weights
This commit is contained in:
parent
7fea1ecdf6
commit
f81f308118
@ -102,10 +102,14 @@ func EstimateGPULayers(gpus []gpu.GpuInfo, ggml *GGML, projectors []string, opts
|
||||
layers := ggml.Tensors().Layers()
|
||||
|
||||
var memoryLayerOutput uint64
|
||||
for k, v := range layers {
|
||||
if k == "output" || k == "output_norm" {
|
||||
memoryLayerOutput += v.size()
|
||||
}
|
||||
if layer, ok := layers["output_norm"]; ok {
|
||||
memoryLayerOutput += layer.size()
|
||||
}
|
||||
|
||||
if layer, ok := layers["output"]; ok {
|
||||
memoryLayerOutput += layer.size()
|
||||
} else if layer, ok := layers["token_embd"]; ok {
|
||||
memoryLayerOutput += layer.size()
|
||||
}
|
||||
|
||||
if gpus[0].Library == "metal" && opts.UseMMap {
|
||||
|
Loading…
x
Reference in New Issue
Block a user