mirror of
https://github.com/ollama/ollama.git
synced 2025-11-11 13:57:18 +01:00
ggml: Seperate tensor load from backend creation
Currently, when the backend is created, the tensors are loaded at the same time, which is a slow operation. This separates them to be two steps: - Create backend, including enumerating tensors and memory allocation - Loading tensor data This allows more flexibility in managing model loading.
This commit is contained in:
@@ -271,7 +271,7 @@ func TestQuantizeModel(t *testing.T) {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
defer fp.Close()
|
||||
meta, _, err := fsggml.Decode(fp, -1)
|
||||
meta, err := fsggml.Decode(fp, -1)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
@@ -303,7 +303,7 @@ func TestQuantizeModel(t *testing.T) {
|
||||
t.Fatalf("failed to load the quantized model %s: %s", tmp.Name(), err)
|
||||
}
|
||||
defer fpNew.Close()
|
||||
newMeta, _, err := fsggml.Decode(fpNew, -1)
|
||||
newMeta, err := fsggml.Decode(fpNew, -1)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to load the quantized model %s: %s", tmp.Name(), err)
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user