mirror of
https://github.com/ollama/ollama.git
synced 2025-04-06 19:08:27 +02:00
tmp
This commit is contained in:
parent
760e8fa656
commit
8b51db204f
33
llama/patches/0016-add-ollama-debug.patch
Normal file
33
llama/patches/0016-add-ollama-debug.patch
Normal file
@ -0,0 +1,33 @@
|
||||
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
|
||||
From: Michael Yang <mxyng@pm.me>
|
||||
Date: Fri, 7 Feb 2025 16:51:55 -0800
|
||||
Subject: [PATCH] add ollama debug
|
||||
|
||||
---
|
||||
ggml/src/ggml-cpu/ggml-cpu.c | 6 ++++++
|
||||
1 file changed, 6 insertions(+)
|
||||
|
||||
diff --git a/ggml/src/ggml-cpu/ggml-cpu.c b/ggml/src/ggml-cpu/ggml-cpu.c
|
||||
index b307d554..a23bbe98 100644
|
||||
--- a/ggml/src/ggml-cpu/ggml-cpu.c
|
||||
+++ b/ggml/src/ggml-cpu/ggml-cpu.c
|
||||
@@ -13,6 +13,8 @@
|
||||
#include "amx/amx.h"
|
||||
#include "ggml.h"
|
||||
|
||||
+#include "ollama-debug.h"
|
||||
+
|
||||
#if defined(_MSC_VER) || defined(__MINGW32__)
|
||||
#include <malloc.h> // using malloc.h with MSC/MINGW
|
||||
#elif !defined(__FreeBSD__) && !defined(__NetBSD__) && !defined(__OpenBSD__)
|
||||
@@ -13645,6 +13647,10 @@ static thread_ret_t ggml_graph_compute_thread(void * data) {
|
||||
|
||||
ggml_compute_forward(¶ms, node);
|
||||
|
||||
+#ifdef OLLAMA_DEBUG
|
||||
+ ollama_debug(node, false);
|
||||
+#endif
|
||||
+
|
||||
if (state->ith == 0 && cplan->abort_callback &&
|
||||
cplan->abort_callback(cplan->abort_callback_data)) {
|
||||
tp->abort = true;
|
@ -99,15 +99,17 @@ func New(r *os.File) (ml.Backend, error) {
|
||||
}),
|
||||
backend: C.ggml_backend_dev_init(d.d, nil),
|
||||
})
|
||||
case C.GGML_BACKEND_DEVICE_TYPE_GPU:
|
||||
slog.Info("gpu", "device", d)
|
||||
gpus = append(gpus, Context{
|
||||
ctx: C.ggml_init(C.struct_ggml_init_params{
|
||||
mem_size: C.size_t(int(C.ggml_tensor_overhead()) * (len(meta.Tensors().Items()) + 1 + int(meta.KV().BlockCount())*2)),
|
||||
no_alloc: true,
|
||||
}),
|
||||
backend: C.ggml_backend_dev_init(d.d, nil),
|
||||
})
|
||||
|
||||
C.ggml_backend_cpu_set_n_threads(cpus[len(cpus)-1].backend, C.int(1))
|
||||
// case C.GGML_BACKEND_DEVICE_TYPE_GPU:
|
||||
// slog.Info("gpu", "device", d)
|
||||
// gpus = append(gpus, Context{
|
||||
// ctx: C.ggml_init(C.struct_ggml_init_params{
|
||||
// mem_size: C.size_t(int(C.ggml_tensor_overhead()) * (len(meta.Tensors().Items()) + 1 + int(meta.KV().BlockCount())*2)),
|
||||
// no_alloc: true,
|
||||
// }),
|
||||
// backend: C.ggml_backend_dev_init(d.d, nil),
|
||||
// })
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,6 @@
|
||||
protect *.go
|
||||
protect *-embed.*
|
||||
protect ollama-debug.*
|
||||
include include/
|
||||
include src/
|
||||
include src/CMakeLists.txt
|
||||
|
11
ml/backend/ggml/ggml/include/ollama-debug.h
vendored
Normal file
11
ml/backend/ggml/ggml/include/ollama-debug.h
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
#include "ggml.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
void ollama_debug(const struct ggml_tensor *tensor, bool verbose);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
6
ml/backend/ggml/ggml/src/debug.go
Normal file
6
ml/backend/ggml/ggml/src/debug.go
Normal file
@ -0,0 +1,6 @@
|
||||
//go:build debug
|
||||
|
||||
package ggml
|
||||
|
||||
// #cgo CPPFLAGS: -DOLLAMA_DEBUG
|
||||
import "C"
|
4
ml/backend/ggml/ggml/src/ggml-cpu/ggml-cpu.c
vendored
4
ml/backend/ggml/ggml/src/ggml-cpu/ggml-cpu.c
vendored
@ -13,6 +13,8 @@
|
||||
#include "amx/amx.h"
|
||||
#include "ggml.h"
|
||||
|
||||
#include "ollama-debug.h"
|
||||
|
||||
#if defined(_MSC_VER) || defined(__MINGW32__)
|
||||
#include <malloc.h> // using malloc.h with MSC/MINGW
|
||||
#elif !defined(__FreeBSD__) && !defined(__NetBSD__) && !defined(__OpenBSD__)
|
||||
@ -13645,6 +13647,8 @@ static thread_ret_t ggml_graph_compute_thread(void * data) {
|
||||
|
||||
ggml_compute_forward(¶ms, node);
|
||||
|
||||
ollama_debug(node, true);
|
||||
|
||||
if (state->ith == 0 && cplan->abort_callback &&
|
||||
cplan->abort_callback(cplan->abort_callback_data)) {
|
||||
tp->abort = true;
|
||||
|
110
ml/backend/ggml/ggml/src/ollama-debug.c
vendored
Normal file
110
ml/backend/ggml/ggml/src/ollama-debug.c
vendored
Normal file
@ -0,0 +1,110 @@
|
||||
#include <string.h>
|
||||
|
||||
#include "ollama-debug.h"
|
||||
|
||||
static int mul(int64_t *dims, int ndims) {
|
||||
int result = 1;
|
||||
for (int i = 0; i < ndims; i++) {
|
||||
result *= dims[i];
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static void repeat(char c, int n) {
|
||||
for (int i = 0; i < n; i++) {
|
||||
fprintf(stderr, "%c", c);
|
||||
}
|
||||
}
|
||||
|
||||
static void print_tensor(const void *tensor, void (*cb)(const void *, int),
|
||||
int shape,
|
||||
int64_t *dims, int ndims, int stride,
|
||||
int nitems, int pad) {
|
||||
fprintf(stderr, "[");
|
||||
for (int i = 0; i < dims[0]; i++) {
|
||||
if (i >= nitems && i < dims[0] - nitems) {
|
||||
fprintf(stderr, "... (%lld more), ", dims[0] - 2 * nitems);
|
||||
int skip = dims[0] - 2 * nitems;
|
||||
if (ndims > 1) {
|
||||
stride += mul(dims + 1, ndims - 1) * skip;
|
||||
repeat('\n', ndims - 1);
|
||||
repeat(' ', shape - ndims + 1 + pad);
|
||||
}
|
||||
i += skip - 1;
|
||||
} else if (ndims > 1) {
|
||||
print_tensor(tensor, cb, shape, dims + 1, ndims - 1, stride,
|
||||
nitems, pad);
|
||||
stride += mul(dims + 1, ndims - 1);
|
||||
if (i < dims[0] - 1) {
|
||||
fprintf(stderr, ", ");
|
||||
repeat('\n', ndims - 1);
|
||||
repeat(' ', shape - ndims + 1 + pad);
|
||||
}
|
||||
} else {
|
||||
cb(tensor, stride + i);
|
||||
if (i < dims[0] - 1) {
|
||||
fprintf(stderr, ", ");
|
||||
}
|
||||
}
|
||||
}
|
||||
fprintf(stderr, "]");
|
||||
}
|
||||
|
||||
static void print_tensor_f16(const void *tensor, int i) {
|
||||
fprintf(stderr, "%f", ggml_fp16_to_fp32(((const ggml_fp16_t *)tensor)[i]));
|
||||
}
|
||||
|
||||
static void print_tensor_f32(const void *tensor, int i) {
|
||||
fprintf(stderr, "%f", ((const float *)tensor)[i]);
|
||||
}
|
||||
|
||||
static void print_tensor_i32(const void *tensor, int i) {
|
||||
fprintf(stderr, "%d", ((const int32_t *)tensor)[i]);
|
||||
}
|
||||
|
||||
static void ollama_debug_tensor(const struct ggml_tensor *tensor, bool verbose, const char *prefix, int indent) {
|
||||
fprintf(stderr, "%s%s %s (%s): [%lld %lld %lld %lld]\n", prefix, tensor->name,
|
||||
ggml_op_name(tensor->op), ggml_type_name(tensor->type), tensor->ne[0],
|
||||
tensor->ne[1], tensor->ne[2], tensor->ne[3]);
|
||||
|
||||
if (!verbose) {
|
||||
return;
|
||||
}
|
||||
|
||||
for (int i = 0; i < indent; i++) {
|
||||
fprintf(stderr, " ");
|
||||
}
|
||||
|
||||
switch (tensor->type) {
|
||||
case GGML_TYPE_F16:
|
||||
print_tensor(ggml_get_data(tensor), print_tensor_f16, ggml_n_dims(tensor),
|
||||
(int64_t *)tensor->ne, ggml_n_dims(tensor), 0, 3, indent);
|
||||
break;
|
||||
case GGML_TYPE_F32:
|
||||
print_tensor(ggml_get_data(tensor), print_tensor_f32, ggml_n_dims(tensor),
|
||||
(int64_t *)tensor->ne, ggml_n_dims(tensor), 0, 3, indent);
|
||||
break;
|
||||
case GGML_TYPE_I32:
|
||||
print_tensor(ggml_get_data(tensor), print_tensor_i32, ggml_n_dims(tensor),
|
||||
(int64_t *)tensor->ne, ggml_n_dims(tensor), 0, 3, indent);
|
||||
break;
|
||||
default:
|
||||
fprintf(stderr, "<unsupported type>\n");
|
||||
return;
|
||||
}
|
||||
|
||||
fprintf(stderr, "\n");
|
||||
}
|
||||
|
||||
void ollama_debug(const struct ggml_tensor *tensor, bool verbose) {
|
||||
ollama_debug_tensor(tensor, verbose, ">>> ", 4);
|
||||
|
||||
if (tensor->src[0] != NULL) {
|
||||
ollama_debug_tensor(tensor->src[0], verbose, " ?? ", 4);
|
||||
}
|
||||
|
||||
if (tensor->src[1] != NULL) {
|
||||
ollama_debug_tensor(tensor->src[1], verbose, " ?? ", 4);
|
||||
}
|
||||
}
|
Loading…
x
Reference in New Issue
Block a user