mirror of
https://github.com/ollama/ollama.git
synced 2025-11-10 21:47:42 +01:00
* feat: Bump llama.cpp to df1b612 Branch: LlamaCPPBump-GraniteDocling Signed-off-by: Gabe Goodhart <ghart@us.ibm.com> * fix(mtmd): Correctly encode text chunks during mtmd tokenization There can be text chunks that appear interspersed with the image embeddings that contain template delimiter tokens for some models. These need to be correctly translated to text tokens. Branch: LlamaCPPBump-GraniteDocling Signed-off-by: Gabe Goodhart <ghart@us.ibm.com> * tests: Use MtmdChunk in image_test Branch: LlamaCPPBump-GraniteDocling Signed-off-by: Gabe Goodhart <ghart@us.ibm.com> * style: Fix unnecessary conversion linting Branch: LlamaCPPBump-GraniteDocling Signed-off-by: Gabe Goodhart <ghart@us.ibm.com> * fix(ggml): Revert changes to ggml_hip.cpp These changes were done largely by our code assistant and are likely wrong Branch: LlamaCPPBump-GraniteDocling Signed-off-by: Gabe Goodhart <ghart@us.ibm.com> * fix: Revert changes in mem_nvml.cpp Branch: LlamaCPPBump-GraniteDocling Signed-off-by: Gabe Goodhart <ghart@us.ibm.com> * feat: Update sync point to 1deee0 This brings in several more optimization commits and model support for EmbeddingGemma Branch: LlamaCPPBump-GraniteDocling Signed-off-by: Gabe Goodhart <ghart@us.ibm.com> * feat: Update patches for 1deee0 Branch: LlamaCPPBump-GraniteDocling Signed-off-by: Gabe Goodhart <ghart@us.ibm.com> * feat: sync for bump to 1deee0 Branch: LlamaCPPBump-GraniteDocling Signed-off-by: Gabe Goodhart <ghart@us.ibm.com> * fix: Bad patch updates with errant `+` Branch: LlamaCPPBump-GraniteDocling Signed-off-by: Gabe Goodhart <ghart@us.ibm.com> * feat: Bump llama.cpp/ggml to 7049736 Branch: LlamaCPPBump-GraniteDocling Signed-off-by: Gabe Goodhart <ghart@us.ibm.com> * fix: format-patches after latest bump Branch: LlamaCPPBump-GraniteDocling Signed-off-by: Gabe Goodhart <ghart@us.ibm.com> --------- Signed-off-by: Gabe Goodhart <ghart@us.ibm.com>
70 lines
3.5 KiB
Diff
70 lines
3.5 KiB
Diff
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
|
|
From: jmorganca <jmorganca@gmail.com>
|
|
Date: Tue, 8 Apr 2025 20:35:53 -0700
|
|
Subject: [PATCH] fix string arr kv loading
|
|
|
|
certain models would error when loading
|
|
kv metadata fields that contain an array of strings
|
|
such as vocab fields
|
|
---
|
|
ggml/include/gguf.h | 1 +
|
|
ggml/src/gguf.cpp | 7 +++++--
|
|
src/llama-vocab.cpp | 4 +---
|
|
3 files changed, 7 insertions(+), 5 deletions(-)
|
|
|
|
diff --git a/ggml/include/gguf.h b/ggml/include/gguf.h
|
|
index 79ee2020..3efb22f0 100644
|
|
--- a/ggml/include/gguf.h
|
|
+++ b/ggml/include/gguf.h
|
|
@@ -114,6 +114,7 @@ extern "C" {
|
|
// get raw pointer to the first element of the array with the given key_id
|
|
// for bool arrays, note that they are always stored as int8 on all platforms (usually this makes no difference)
|
|
GGML_API const void * gguf_get_arr_data(const struct gguf_context * ctx, int64_t key_id);
|
|
+ GGML_API size_t gguf_get_arr_data_n(const struct gguf_context * ctx, int64_t key_id);
|
|
|
|
// get ith C string from array with given key_id
|
|
GGML_API const char * gguf_get_arr_str (const struct gguf_context * ctx, int64_t key_id, size_t i);
|
|
diff --git a/ggml/src/gguf.cpp b/ggml/src/gguf.cpp
|
|
index 8cc4ef1c..d950dbdf 100644
|
|
--- a/ggml/src/gguf.cpp
|
|
+++ b/ggml/src/gguf.cpp
|
|
@@ -805,10 +805,14 @@ enum gguf_type gguf_get_arr_type(const struct gguf_context * ctx, int64_t key_id
|
|
|
|
const void * gguf_get_arr_data(const struct gguf_context * ctx, int64_t key_id) {
|
|
GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
|
|
- GGML_ASSERT(ctx->kv[key_id].get_type() != GGUF_TYPE_STRING);
|
|
return ctx->kv[key_id].data.data();
|
|
}
|
|
|
|
+size_t gguf_get_arr_data_n(const struct gguf_context * ctx, int64_t key_id) {
|
|
+ GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
|
|
+ return ctx->kv[key_id].data.size();
|
|
+}
|
|
+
|
|
const char * gguf_get_arr_str(const struct gguf_context * ctx, int64_t key_id, size_t i) {
|
|
GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
|
|
GGML_ASSERT(ctx->kv[key_id].get_type() == GGUF_TYPE_STRING);
|
|
@@ -902,7 +906,6 @@ const char * gguf_get_val_str(const struct gguf_context * ctx, int64_t key_id) {
|
|
const void * gguf_get_val_data(const struct gguf_context * ctx, int64_t key_id) {
|
|
GGML_ASSERT(key_id >= 0 && key_id < gguf_get_n_kv(ctx));
|
|
GGML_ASSERT(ctx->kv[key_id].get_ne() == 1);
|
|
- GGML_ASSERT(ctx->kv[key_id].get_type() != GGUF_TYPE_STRING);
|
|
return ctx->kv[key_id].data.data();
|
|
}
|
|
|
|
diff --git a/src/llama-vocab.cpp b/src/llama-vocab.cpp
|
|
index 3de95c67..217ede47 100644
|
|
--- a/src/llama-vocab.cpp
|
|
+++ b/src/llama-vocab.cpp
|
|
@@ -1768,9 +1768,7 @@ void llama_vocab::impl::load(llama_model_loader & ml, const LLM_KV & kv) {
|
|
const int precompiled_charsmap_keyidx = gguf_find_key(ctx, kv(LLM_KV_TOKENIZER_PRECOMPILED_CHARSMAP).c_str());
|
|
if (precompiled_charsmap_keyidx != -1) {
|
|
const gguf_type pc_type = gguf_get_arr_type(ctx, precompiled_charsmap_keyidx);
|
|
- GGML_ASSERT(pc_type == GGUF_TYPE_INT8 || pc_type == GGUF_TYPE_UINT8);
|
|
-
|
|
- const size_t n_precompiled_charsmap = gguf_get_arr_n(ctx, precompiled_charsmap_keyidx);
|
|
+ const size_t n_precompiled_charsmap = gguf_get_arr_data_n(ctx, precompiled_charsmap_keyidx);
|
|
const char * pc = (const char *) gguf_get_arr_data(ctx, precompiled_charsmap_keyidx);
|
|
precompiled_charsmap.assign(pc, pc + n_precompiled_charsmap);
|
|
#if defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
|