mirror of
https://github.com/ollama/ollama.git
synced 2025-03-26 01:32:06 +01:00
* add build to .dockerignore * test: only build one arch * add build to .gitignore * fix ccache path * filter amdgpu targets * only filter if autodetecting * Don't clobber gpu list for default runner This ensures the GPU specific environment variables are set properly * explicitly set CXX compiler for HIP * Update build_windows.ps1 This isn't complete, but is close. Dependencies are missing, and it only builds the "default" preset. * build: add ollama subdir * add .git to .dockerignore * docs: update development.md * update build_darwin.sh * remove unused scripts * llm: add cwd and build/lib/ollama to library paths * default DYLD_LIBRARY_PATH to LD_LIBRARY_PATH in runner on macOS * add additional cmake output vars for msvc * interim edits to make server detection logic work with dll directories like lib/ollama/cuda_v12 * remove unncessary filepath.Dir, cleanup * add hardware-specific directory to path * use absolute server path * build: linux arm * cmake install targets * remove unused files * ml: visit each library path once * build: skip cpu variants on arm * build: install cpu targets * build: fix workflow * shorter names * fix rocblas install * docs: clean up development.md * consistent build dir removal in development.md * silence -Wimplicit-function-declaration build warnings in ggml-cpu * update readme * update development readme * llm: update library lookup logic now that there is one runner (#8587) * tweak development.md * update docs * add windows cuda/rocm tests --------- Co-authored-by: jmorganca <jmorganca@gmail.com> Co-authored-by: Daniel Hiltgen <daniel@ollama.com>
48 lines
1.7 KiB
Diff
48 lines
1.7 KiB
Diff
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
|
|
From: jmorganca <jmorganca@gmail.com>
|
|
Date: Thu, 6 Jun 2024 23:55:47 -0700
|
|
Subject: [PATCH] cuda
|
|
|
|
---
|
|
ggml/src/ggml-backend.cpp | 1 -
|
|
ggml/src/ggml-cuda/ggml-cuda.cu | 1 +
|
|
ggml/src/ggml-metal/ggml-metal.m | 1 +
|
|
3 files changed, 2 insertions(+), 1 deletion(-)
|
|
|
|
diff --git a/ggml/src/ggml-backend.cpp b/ggml/src/ggml-backend.cpp
|
|
index e2d6c405..a12172dc 100644
|
|
--- a/ggml/src/ggml-backend.cpp
|
|
+++ b/ggml/src/ggml-backend.cpp
|
|
@@ -106,7 +106,6 @@ void ggml_backend_buffer_free(ggml_backend_buffer_t buffer) {
|
|
if (buffer->iface.free_buffer != NULL) {
|
|
buffer->iface.free_buffer(buffer);
|
|
}
|
|
- delete buffer;
|
|
}
|
|
|
|
size_t ggml_backend_buffer_get_size(ggml_backend_buffer_t buffer) {
|
|
diff --git a/ggml/src/ggml-cuda/ggml-cuda.cu b/ggml/src/ggml-cuda/ggml-cuda.cu
|
|
index 0b06be72..be29e979 100644
|
|
--- a/ggml/src/ggml-cuda/ggml-cuda.cu
|
|
+++ b/ggml/src/ggml-cuda/ggml-cuda.cu
|
|
@@ -424,6 +424,7 @@ struct ggml_backend_cuda_buffer_context {
|
|
static void ggml_backend_cuda_buffer_free_buffer(ggml_backend_buffer_t buffer) {
|
|
ggml_backend_cuda_buffer_context * ctx = (ggml_backend_cuda_buffer_context *)buffer->context;
|
|
delete ctx;
|
|
+ delete buffer;
|
|
}
|
|
|
|
static bool ggml_backend_buffer_is_cuda(ggml_backend_buffer_t buffer) {
|
|
diff --git a/ggml/src/ggml-metal/ggml-metal.m b/ggml/src/ggml-metal/ggml-metal.m
|
|
index a85502ee..cd8ef741 100644
|
|
--- a/ggml/src/ggml-metal/ggml-metal.m
|
|
+++ b/ggml/src/ggml-metal/ggml-metal.m
|
|
@@ -4187,6 +4187,7 @@ static void ggml_backend_metal_buffer_free_buffer(ggml_backend_buffer_t buffer)
|
|
}
|
|
|
|
free(ctx);
|
|
+ free(buffer);
|
|
}
|
|
|
|
static void * ggml_backend_metal_buffer_get_base(ggml_backend_buffer_t buffer) {
|