mirror of
https://github.com/ollama/ollama.git
synced 2025-11-11 05:07:57 +01:00
For each memory allocation we report the size of the (attempted) allocation and whether it succeeded or failed. The latter status reporting proved to be not that useful in practice as systems such as Windows can automatically overflow from VRAM into RAM, resultings in successful allocations even when there isn't enough memory where we wanted. As a result, this information is only used for debug logging, which isn't worthwhile enough for the amount of code. It also isn't fully accurate, as multiple allocations may result in partial failures.
142 lines
6.3 KiB
Diff
142 lines
6.3 KiB
Diff
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
|
|
From: Jesse Gross <jesse@ollama.com>
|
|
Date: Fri, 18 Apr 2025 15:58:19 -0700
|
|
Subject: [PATCH] graph memory reporting on failure
|
|
|
|
---
|
|
ggml/include/ggml-alloc.h | 1 +
|
|
ggml/include/ggml-backend.h | 1 +
|
|
ggml/src/ggml-alloc.c | 36 ++++++++++++++++++++++++++++++++----
|
|
ggml/src/ggml-backend.cpp | 7 +++++++
|
|
4 files changed, 41 insertions(+), 4 deletions(-)
|
|
|
|
diff --git a/ggml/include/ggml-alloc.h b/ggml/include/ggml-alloc.h
|
|
index 2cb150fd2..7ab3f0192 100644
|
|
--- a/ggml/include/ggml-alloc.h
|
|
+++ b/ggml/include/ggml-alloc.h
|
|
@@ -65,6 +65,7 @@ GGML_API bool ggml_gallocr_reserve_n(
|
|
GGML_API bool ggml_gallocr_alloc_graph(ggml_gallocr_t galloc, struct ggml_cgraph * graph);
|
|
|
|
GGML_API size_t ggml_gallocr_get_buffer_size(ggml_gallocr_t galloc, int buffer_id);
|
|
+GGML_API size_t ggml_gallocr_get_attempted_buffer_size(ggml_gallocr_t galloc, int buffer_id);
|
|
|
|
// Utils
|
|
// Create a buffer and allocate all the tensors in a ggml_context
|
|
diff --git a/ggml/include/ggml-backend.h b/ggml/include/ggml-backend.h
|
|
index a2977ea2e..e8cf30841 100644
|
|
--- a/ggml/include/ggml-backend.h
|
|
+++ b/ggml/include/ggml-backend.h
|
|
@@ -303,6 +303,7 @@ extern "C" {
|
|
GGML_API int ggml_backend_sched_get_n_copies(ggml_backend_sched_t sched);
|
|
|
|
GGML_API size_t ggml_backend_sched_get_buffer_size(ggml_backend_sched_t sched, ggml_backend_t backend);
|
|
+ GGML_API size_t ggml_backend_sched_get_attempted_buffer_size(ggml_backend_sched_t sched, ggml_backend_t backend);
|
|
|
|
GGML_API void ggml_backend_sched_set_tensor_backend(ggml_backend_sched_t sched, struct ggml_tensor * node, ggml_backend_t backend);
|
|
GGML_API ggml_backend_t ggml_backend_sched_get_tensor_backend(ggml_backend_sched_t sched, struct ggml_tensor * node);
|
|
diff --git a/ggml/src/ggml-alloc.c b/ggml/src/ggml-alloc.c
|
|
index 8b6e60283..b58bd671d 100644
|
|
--- a/ggml/src/ggml-alloc.c
|
|
+++ b/ggml/src/ggml-alloc.c
|
|
@@ -350,6 +350,7 @@ struct node_alloc {
|
|
struct ggml_gallocr {
|
|
ggml_backend_buffer_type_t * bufts; // [n_buffers]
|
|
ggml_backend_buffer_t * buffers; // [n_buffers]
|
|
+ size_t *buffer_sizes; // [n_buffers]
|
|
struct ggml_dyn_tallocr ** buf_tallocs; // [n_buffers]
|
|
int n_buffers;
|
|
|
|
@@ -373,6 +374,9 @@ ggml_gallocr_t ggml_gallocr_new_n(ggml_backend_buffer_type_t * bufts, int n_bufs
|
|
galloc->buffers = calloc(n_bufs, sizeof(ggml_backend_buffer_t));
|
|
GGML_ASSERT(galloc->buffers != NULL);
|
|
|
|
+ galloc->buffer_sizes = calloc(n_bufs, sizeof(size_t));
|
|
+ GGML_ASSERT(galloc->buffer_sizes != NULL);
|
|
+
|
|
galloc->buf_tallocs = calloc(n_bufs, sizeof(struct ggml_dyn_tallocr *));
|
|
GGML_ASSERT(galloc->buf_tallocs != NULL);
|
|
|
|
@@ -439,6 +443,7 @@ void ggml_gallocr_free(ggml_gallocr_t galloc) {
|
|
ggml_hash_set_free(&galloc->hash_set);
|
|
free(galloc->hash_values);
|
|
free(galloc->bufts);
|
|
+ free(galloc->buffer_sizes);
|
|
free(galloc->buffers);
|
|
free(galloc->buf_tallocs);
|
|
free(galloc->node_allocs);
|
|
@@ -734,6 +739,8 @@ bool ggml_gallocr_reserve_n(ggml_gallocr_t galloc, struct ggml_cgraph * graph, c
|
|
}
|
|
}
|
|
|
|
+ bool success = true;
|
|
+
|
|
// reallocate buffers if needed
|
|
for (int i = 0; i < galloc->n_buffers; i++) {
|
|
// if the buffer type is used multiple times, we reuse the same buffer
|
|
@@ -755,15 +762,20 @@ bool ggml_gallocr_reserve_n(ggml_gallocr_t galloc, struct ggml_cgraph * graph, c
|
|
|
|
ggml_backend_buffer_free(galloc->buffers[i]);
|
|
galloc->buffers[i] = ggml_backend_buft_alloc_buffer(galloc->bufts[i], new_size);
|
|
- if (galloc->buffers[i] == NULL) {
|
|
+ if (galloc->buffers[i]) {
|
|
+ galloc->buffer_sizes[i] = ggml_backend_buffer_get_size(galloc->buffers[i]);
|
|
+ ggml_backend_buffer_set_usage(galloc->buffers[i], GGML_BACKEND_BUFFER_USAGE_COMPUTE);
|
|
+ } else {
|
|
GGML_LOG_ERROR("%s: failed to allocate %s buffer of size %zu\n", __func__, ggml_backend_buft_name(galloc->bufts[i]), new_size);
|
|
- return false;
|
|
+ galloc->buffer_sizes[i] = new_size;
|
|
+ success = false;
|
|
}
|
|
- ggml_backend_buffer_set_usage(galloc->buffers[i], GGML_BACKEND_BUFFER_USAGE_COMPUTE);
|
|
+ } else {
|
|
+ galloc->buffer_sizes[i] = ggml_backend_buffer_get_size(galloc->buffers[i]);
|
|
}
|
|
}
|
|
|
|
- return true;
|
|
+ return success;
|
|
}
|
|
|
|
bool ggml_gallocr_reserve(ggml_gallocr_t galloc, struct ggml_cgraph *graph) {
|
|
@@ -920,6 +932,22 @@ size_t ggml_gallocr_get_buffer_size(ggml_gallocr_t galloc, int buffer_id) {
|
|
return ggml_backend_buffer_get_size(galloc->buffers[buffer_id]);
|
|
}
|
|
|
|
+size_t ggml_gallocr_get_attempted_buffer_size(ggml_gallocr_t galloc, int buffer_id) {
|
|
+ GGML_ASSERT(buffer_id >= 0 && buffer_id < galloc->n_buffers);
|
|
+
|
|
+ for (int i = 0; i < buffer_id; i++) {
|
|
+ if (galloc->buf_tallocs[i] == galloc->buf_tallocs[buffer_id]) {
|
|
+ // This buffer is the same as a previous one due to the same buffer type being used multiple times
|
|
+ // (See above.) However, we need a different check because multiple buffers might be NULL in our
|
|
+ // case and we still want to know the attempted size.
|
|
+
|
|
+ return 0;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return galloc->buffer_sizes[buffer_id];
|
|
+}
|
|
+
|
|
// utils
|
|
|
|
static void free_buffers(ggml_backend_buffer_t ** buffers, const size_t * n_buffers) {
|
|
diff --git a/ggml/src/ggml-backend.cpp b/ggml/src/ggml-backend.cpp
|
|
index 97f47abd2..d02a40e60 100644
|
|
--- a/ggml/src/ggml-backend.cpp
|
|
+++ b/ggml/src/ggml-backend.cpp
|
|
@@ -1631,6 +1631,13 @@ size_t ggml_backend_sched_get_buffer_size(ggml_backend_sched_t sched, ggml_backe
|
|
return ggml_gallocr_get_buffer_size(sched->galloc, backend_index);
|
|
}
|
|
|
|
+size_t ggml_backend_sched_get_attempted_buffer_size(ggml_backend_sched_t sched, ggml_backend_t backend) {
|
|
+ int backend_index = ggml_backend_sched_backend_id(sched, backend);
|
|
+ GGML_ASSERT(backend_index >= 0 && backend_index < sched->n_backends);
|
|
+
|
|
+ return ggml_gallocr_get_attempted_buffer_size(sched->galloc, backend_index);
|
|
+}
|
|
+
|
|
void ggml_backend_sched_set_tensor_backend(ggml_backend_sched_t sched, struct ggml_tensor * node, ggml_backend_t backend) {
|
|
int backend_index = ggml_backend_sched_backend_id(sched, backend);
|
|
GGML_ASSERT(backend_index >= 0 && backend_index < sched->n_backends);
|