From f8ef4439e9673c7df2314fafb5975aeab856c51f Mon Sep 17 00:00:00 2001
From: 65a <65a.invalid>
Date: Mon, 16 Oct 2023 17:41:40 -0700
Subject: [PATCH] Use build tags to generate accelerated binaries for CUDA and
 ROCm on Linux.

The build tags rocm or cuda must be specified to both go generate and go build.
ROCm builds should have both ROCM_PATH set (and the ROCM SDK present) as well
as CLBlast installed (for GGML) and CLBlast_DIR set in the environment to the
CLBlast cmake directory (likely /usr/lib/cmake/CLBlast). Build tags are also
used to switch VRAM detection between cuda and rocm implementations, using
added "accelerator_foo.go" files which contain architecture specific functions
and variables. accelerator_none is used when no tags are set, and a helper
function addRunner will ignore it if it is the chosen accelerator. Fix go
generate commands, thanks @deadmeu for testing.
---
 Dockerfile                           |  6 +-
 Dockerfile.build                     |  4 +-
 README.md                            | 35 +++++++++++-
 llm/accelerator_cuda.go              | 67 ++++++++++++++++++++++
 llm/accelerator_none.go              | 21 +++++++
 llm/accelerator_rocm.go              | 85 ++++++++++++++++++++++++++++
 llm/llama.cpp/generate_linux_cuda.go | 24 ++++++++
 llm/llama.cpp/generate_linux_rocm.go | 25 ++++++++
 8 files changed, 261 insertions(+), 6 deletions(-)
 create mode 100644 llm/accelerator_cuda.go
 create mode 100644 llm/accelerator_none.go
 create mode 100644 llm/accelerator_rocm.go
 create mode 100644 llm/llama.cpp/generate_linux_cuda.go
 create mode 100644 llm/llama.cpp/generate_linux_rocm.go

diff --git a/Dockerfile b/Dockerfile
index c50665b6b..7c8828523 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -11,8 +11,8 @@ RUN mkdir -p /usr/local && tar xz -C /usr/local </tmp/go1.21.3.tar.gz
 COPY . .
 ENV GOARCH=$TARGETARCH
 ENV GOFLAGS=$GOFLAGS
-RUN /usr/local/go/bin/go generate ./... \
-    && /usr/local/go/bin/go build .
+RUN /usr/local/go/bin/go generate -tags cuda ./... \
+    && /usr/local/go/bin/go build -tags cuda .
 
 FROM ubuntu:22.04
 RUN apt-get update && apt-get install -y ca-certificates
@@ -27,3 +27,5 @@ ENV NVIDIA_DRIVER_CAPABILITIES=compute,utility
 
 ENTRYPOINT ["/bin/ollama"]
 CMD ["serve"]
+
+
diff --git a/Dockerfile.build b/Dockerfile.build
index 1da3b8be0..6e1b605c6 100644
--- a/Dockerfile.build
+++ b/Dockerfile.build
@@ -27,5 +27,5 @@ ENV GOOS=linux
 ENV GOARCH=$TARGETARCH
 ENV GOFLAGS=$GOFLAGS
 
-RUN /usr/local/go/bin/go generate ./... && \
-    /usr/local/go/bin/go build .
+RUN /usr/local/go/bin/go generate -tags cuda ./... && \
+    /usr/local/go/bin/go build -tags cuda .
diff --git a/README.md b/README.md
index 674ed0d46..923290d57 100644
--- a/README.md
+++ b/README.md
@@ -185,19 +185,50 @@ ollama list
 
 ## Building
 
+### Generic (CPU)
+
 Install `cmake` and `go`:
 
 ```
 brew install cmake go
 ```
 
-Then generate dependencies and build:
-
+Then generate dependencies:
 ```
 go generate ./...
+```
+Then build the binary:
+```
 go build .
 ```
 
+### CUDA (NVIDIA)
+*Your operating system distribution may already have packages for NVIDIA CUDA. Distro packages are often preferable, but instructions are distro-specific. Please consult distro-specific docs for dependencies if available!*
+
+Install `cmake` and `golang` as well as [NVIDIA CUDA](https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html) development and runtime packages.
+Then generate dependencies:
+```
+go generate -tags cuda ./...
+```
+Then build the binary:
+```
+go build -tags cuda .
+```
+
+### ROCm (AMD)
+*Your operating system distribution may already have packages for AMD ROCm and CLBlast. Distro packages are often preferable, but instructions are distro-specific. Please consult distro-specific docs for dependencies if available!*
+
+Install [CLBlast](https://github.com/CNugteren/CLBlast/blob/master/doc/installation.md) and [ROCm](https://rocm.docs.amd.com/en/latest/deploy/linux/quick_start.html) developement packages first, as well as `cmake` and `golang`.
+Adjust the paths below (correct for Arch) as appropriate for your distributions install locations and generate dependencies:
+```
+CLBlast_DIR=/usr/lib/cmake/CLBlast ROCM_PATH=/opt/rocm go generate -tags rocm ./...
+```
+Then build the binary:
+```
+go build -tags rocm
+```
+
+### Running local builds
 Next, start the server:
 
 ```
diff --git a/llm/accelerator_cuda.go b/llm/accelerator_cuda.go
new file mode 100644
index 000000000..f21d6d623
--- /dev/null
+++ b/llm/accelerator_cuda.go
@@ -0,0 +1,67 @@
+//go:build cuda
+
+package llm
+
+import (
+	"bufio"
+	"bytes"
+	"errors"
+	"fmt"
+	"log"
+	"os/exec"
+	"path"
+	"strconv"
+	"strings"
+
+	"github.com/jmorganca/ollama/format"
+)
+
+var (
+	errNvidiaSMI     = errors.New("warning: gpu support may not be enabled, check that you have installed GPU drivers: nvidia-smi command failed")
+	errAvailableVRAM = errors.New("not enough VRAM available, falling back to CPU only")
+)
+
+// acceleratedRunner returns the runner for this accelerator given the provided buildPath string.
+func acceleratedRunner(buildPath string) []ModelRunner {
+	return []ModelRunner{
+		ModelRunner{
+			Path:        path.Join(buildPath, "cuda", "bin", "ollama-runner"),
+			Accelerated: true,
+		},
+	}
+}
+
+// CheckVRAM returns the free VRAM in bytes on Linux machines with NVIDIA GPUs
+func CheckVRAM() (int64, error) {
+	cmd := exec.Command("nvidia-smi", "--query-gpu=memory.free", "--format=csv,noheader,nounits")
+	var stdout bytes.Buffer
+	cmd.Stdout = &stdout
+	err := cmd.Run()
+	if err != nil {
+		return 0, errNoAccel
+	}
+
+	var freeMiB int64
+	scanner := bufio.NewScanner(&stdout)
+	for scanner.Scan() {
+		line := scanner.Text()
+		if strings.Contains(line, "[Insufficient Permissions]") {
+			return 0, fmt.Errorf("GPU support may not enabled, check you have installed GPU drivers and have the necessary permissions to run nvidia-smi")
+		}
+
+		vram, err := strconv.ParseInt(strings.TrimSpace(line), 10, 64)
+		if err != nil {
+			return 0, fmt.Errorf("failed to parse available VRAM: %v", err)
+		}
+
+		freeMiB += vram
+	}
+
+	freeBytes := freeMiB * 1024 * 1024
+	if freeBytes < 2*format.GigaByte {
+		log.Printf("less than 2 GB VRAM available")
+		return 0, errAvailableVRAM
+	}
+
+	return freeBytes, nil
+}
diff --git a/llm/accelerator_none.go b/llm/accelerator_none.go
new file mode 100644
index 000000000..442d884a7
--- /dev/null
+++ b/llm/accelerator_none.go
@@ -0,0 +1,21 @@
+//go:build !rocm && !cuda
+
+package llm
+
+import (
+	"errors"
+)
+
+var (
+	errNoAccel = errors.New("no accelerator support in this binary")
+)
+
+// acceleratedRunner returns the runner for this accelerator given the provided buildPath string.
+func acceleratedRunner(buildPath string) []ModelRunner {
+	return make([]ModelRunner, 0, 1)
+}
+
+// CheckVRAM is a stub with no accelerator.
+func CheckVRAM() (int64, error) {
+	return 0, errNoGPU
+}
diff --git a/llm/accelerator_rocm.go b/llm/accelerator_rocm.go
new file mode 100644
index 000000000..e71b4ea6c
--- /dev/null
+++ b/llm/accelerator_rocm.go
@@ -0,0 +1,85 @@
+//go:build rocm
+
+package llm
+
+import (
+	"bytes"
+	"encoding/csv"
+	"errors"
+	"fmt"
+	"io"
+	"log"
+	"os"
+	"os/exec"
+	"path"
+	"path/filepath"
+	"strconv"
+	"strings"
+)
+
+var errNoAccel = errors.New("rocm-smi command failed")
+
+// acceleratedRunner returns the runner for this accelerator given the provided buildPath string.
+func acceleratedRunner(buildPath string) []ModelRunner {
+	return []ModelRunner{
+		ModelRunner{
+			Path:        path.Join(buildPath, "rocm", "bin", "ollama-runner"),
+			Accelerated: true,
+		},
+	}
+}
+
+// CheckVRAM returns the available VRAM in MiB on Linux machines with AMD GPUs
+func CheckVRAM() (int64, error) {
+	rocmHome := os.Getenv("ROCM_PATH")
+	if rocmHome == "" {
+		rocmHome = os.Getenv("ROCM_HOME")
+	}
+	if rocmHome == "" {
+		log.Println("warning: ROCM_PATH is not set. Trying a likely fallback path, but it is recommended to set this variable in the environment.")
+		rocmHome = "/opt/rocm"
+	}
+	cmd := exec.Command(filepath.Join(rocmHome, "bin/rocm-smi"), "--showmeminfo", "VRAM", "--csv")
+	var stdout bytes.Buffer
+	cmd.Stdout = &stdout
+	err := cmd.Run()
+	if err != nil {
+		return 0, errNoAccel
+	}
+	csvData := csv.NewReader(&stdout)
+	// llama.cpp or ROCm don't seem to understand splitting the VRAM allocations across them properly, so try to find the biggest card instead :(. FIXME.
+	totalBiggestCard := int64(0)
+	bigCardName := ""
+	for {
+		record, err := csvData.Read()
+		if err == io.EOF {
+			break
+		}
+		if err != nil {
+			return 0, fmt.Errorf("failed to parse available VRAM: %v", err)
+		}
+		if !strings.HasPrefix(record[0], "card") {
+			continue
+		}
+		cardTotal, err := strconv.ParseInt(record[1], 10, 64)
+		if err != nil {
+			return 0, err
+		}
+		cardUsed, err := strconv.ParseInt(record[2], 10, 64)
+		if err != nil {
+			return 0, err
+		}
+		possible := (cardTotal - cardUsed)
+		log.Printf("ROCm found %d MiB of available VRAM on device %q", possible/1024/1024, record[0])
+		if possible > totalBiggestCard {
+			totalBiggestCard = possible
+			bigCardName = record[0]
+		}
+	}
+	if totalBiggestCard == 0 {
+		log.Printf("found ROCm GPU but failed to parse free VRAM!")
+		return 0, errNoAccel
+	}
+	log.Printf("ROCm selecting device %q", bigCardName)
+	return totalBiggestCard, nil
+}
diff --git a/llm/llama.cpp/generate_linux_cuda.go b/llm/llama.cpp/generate_linux_cuda.go
new file mode 100644
index 000000000..86a959774
--- /dev/null
+++ b/llm/llama.cpp/generate_linux_cuda.go
@@ -0,0 +1,24 @@
+//go:build cuda
+
+package llm
+
+//go:generate git submodule init
+
+//go:generate git submodule update --force ggml
+//go:generate git -C ggml apply ../patches/0001-add-detokenize-endpoint.patch
+//go:generate git -C ggml apply ../patches/0002-34B-model-support.patch
+//go:generate git -C ggml apply ../patches/0005-ggml-support-CUDA-s-half-type-for-aarch64-1455-2670.patch
+//go:generate git -C ggml apply ../patches/0001-copy-cuda-runtime-libraries.patch
+
+//go:generate rm -rf ggml/build/cuda
+//go:generate cmake -S ggml -B ggml/build/cuda -DLLAMA_CUBLAS=on -DLLAMA_ACCELERATE=on -DLLAMA_K_QUANTS=on
+//go:generate cmake --build ggml/build/cuda --target server --config Release
+//go:generate mv ggml/build/cuda/bin/server ggml/build/cuda/bin/ollama-runner
+
+//go:generate git -C gguf apply ../patches/0001-copy-cuda-runtime-libraries.patch
+//go:generate git -C gguf apply ../patches/0001-update-default-log-target.patch
+
+//go:generate rm -rf gguf/build/cuda
+//go:generate cmake -S gguf -B gguf/build/cuda -DLLAMA_CUBLAS=on -DLLAMA_ACCELERATE=on -DLLAMA_K_QUANTS=on -DLLAMA_NATIVE=off -DLLAMA_AVX=on -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off
+//go:generate cmake --build gguf/build/cuda --target server --config Release
+//go:generate mv gguf/build/cuda/bin/server gguf/build/cuda/bin/ollama-runner
diff --git a/llm/llama.cpp/generate_linux_rocm.go b/llm/llama.cpp/generate_linux_rocm.go
new file mode 100644
index 000000000..1766be845
--- /dev/null
+++ b/llm/llama.cpp/generate_linux_rocm.go
@@ -0,0 +1,25 @@
+//go:build rocm
+
+package llm
+
+//go:generate git submodule init
+
+//go:generate git submodule update --force ggml
+//go:generate git -C ggml apply ../patches/0001-add-detokenize-endpoint.patch
+//go:generate git -C ggml apply ../patches/0002-34B-model-support.patch
+//go:generate git -C ggml apply ../patches/0005-ggml-support-CUDA-s-half-type-for-aarch64-1455-2670.patch
+//go:generate git -C ggml apply ../patches/0001-copy-cuda-runtime-libraries.patch
+
+//go:generate git submodule update --force gguf
+//go:generate git -C gguf apply ../patches/0001-copy-cuda-runtime-libraries.patch
+//go:generate git -C gguf apply ../patches/0001-update-default-log-target.patch
+
+//go:generate rm -rf ggml/build/rocm
+//go:generate cmake -S ggml -B ggml/build/rocm -DLLAMA_CLBLAST=on -DLLAMA_ACCELERATE=on -DLLAMA_K_QUANTS=on
+//go:generate cmake --build ggml/build/rocm --target server --config Release
+//go:generate mv ggml/build/rocm/bin/server ggml/build/rocm/bin/ollama-runner
+
+//go:generate rm -rf gguf/build/rocm
+//go:generate cmake -S gguf -B gguf/build/rocm -DLLAMA_HIPBLAS=on -DLLAMA_ACCELERATE=on -DLLAMA_K_QUANTS=on -DLLAMA_NATIVE=off -DLLAMA_AVX=on -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off -DCMAKE_C_COMPILER=$ROCM_PATH/llvm/bin/clang -DCMAKE_CXX_COMPILER=$ROCM_PATH/llvm/bin/clang++ -DAMDGPU_TARGETS='gfx803;gfx900;gfx906:xnack-;gfx908:xnack-;gfx90a:xnack+;gfx90a:xnack-;gfx1010;gfx1012;gfx1030;gfx1100;gfx1101;gfx1102' -DGPU_TARGETS='gfx803;gfx900;gfx906:xnack-;gfx908:xnack-;gfx90a:xnack+;gfx90a:xnack-;gfx1010;gfx1012;gfx1030;gfx1100;gfx1101;gfx1102'
+//go:generate cmake --build gguf/build/rocm --target server --config Release
+//go:generate mv gguf/build/rocm/bin/server gguf/build/rocm/bin/ollama-runner