mirror of
https://github.com/ollama/ollama.git
synced 2025-11-13 09:07:18 +01:00
* feat: Bump llama.cpp to df1b612 Branch: LlamaCPPBump-GraniteDocling Signed-off-by: Gabe Goodhart <ghart@us.ibm.com> * fix(mtmd): Correctly encode text chunks during mtmd tokenization There can be text chunks that appear interspersed with the image embeddings that contain template delimiter tokens for some models. These need to be correctly translated to text tokens. Branch: LlamaCPPBump-GraniteDocling Signed-off-by: Gabe Goodhart <ghart@us.ibm.com> * tests: Use MtmdChunk in image_test Branch: LlamaCPPBump-GraniteDocling Signed-off-by: Gabe Goodhart <ghart@us.ibm.com> * style: Fix unnecessary conversion linting Branch: LlamaCPPBump-GraniteDocling Signed-off-by: Gabe Goodhart <ghart@us.ibm.com> * fix(ggml): Revert changes to ggml_hip.cpp These changes were done largely by our code assistant and are likely wrong Branch: LlamaCPPBump-GraniteDocling Signed-off-by: Gabe Goodhart <ghart@us.ibm.com> * fix: Revert changes in mem_nvml.cpp Branch: LlamaCPPBump-GraniteDocling Signed-off-by: Gabe Goodhart <ghart@us.ibm.com> * feat: Update sync point to 1deee0 This brings in several more optimization commits and model support for EmbeddingGemma Branch: LlamaCPPBump-GraniteDocling Signed-off-by: Gabe Goodhart <ghart@us.ibm.com> * feat: Update patches for 1deee0 Branch: LlamaCPPBump-GraniteDocling Signed-off-by: Gabe Goodhart <ghart@us.ibm.com> * feat: sync for bump to 1deee0 Branch: LlamaCPPBump-GraniteDocling Signed-off-by: Gabe Goodhart <ghart@us.ibm.com> * fix: Bad patch updates with errant `+` Branch: LlamaCPPBump-GraniteDocling Signed-off-by: Gabe Goodhart <ghart@us.ibm.com> * feat: Bump llama.cpp/ggml to 7049736 Branch: LlamaCPPBump-GraniteDocling Signed-off-by: Gabe Goodhart <ghart@us.ibm.com> * fix: format-patches after latest bump Branch: LlamaCPPBump-GraniteDocling Signed-off-by: Gabe Goodhart <ghart@us.ibm.com> --------- Signed-off-by: Gabe Goodhart <ghart@us.ibm.com>
59 lines
3.1 KiB
Diff
59 lines
3.1 KiB
Diff
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
|
|
From: Oliver Simons <osimons@nvidia.com>
|
|
Date: Tue, 22 Jul 2025 11:02:28 +0200
|
|
Subject: [PATCH] Enable CUDA Graphs for gemma3n.
|
|
|
|
Similar to
|
|
https://github.com/ggml-org/llama.cpp/pull/14741,
|
|
though ollama has a slightly different model graph
|
|
than llama.cpp which requires different workaround
|
|
checks.
|
|
---
|
|
ggml/src/ggml-cuda/ggml-cuda.cu | 18 ++++++++++++++++++
|
|
1 file changed, 18 insertions(+)
|
|
|
|
diff --git a/ggml/src/ggml-cuda/ggml-cuda.cu b/ggml/src/ggml-cuda/ggml-cuda.cu
|
|
index 5b852f69..827e3205 100644
|
|
--- a/ggml/src/ggml-cuda/ggml-cuda.cu
|
|
+++ b/ggml/src/ggml-cuda/ggml-cuda.cu
|
|
@@ -2689,14 +2689,26 @@ static bool check_node_graph_compatibility_and_refresh_copy_ops(ggml_backend_cud
|
|
// Loop over nodes in GGML graph to obtain info needed for CUDA graph
|
|
cuda_ctx->cuda_graph->cpy_dest_ptrs.clear();
|
|
|
|
+ // This fix was added in llama.cpp and Ollama in parallel, but with
|
|
+ // different tensor names.
|
|
+ // llama.cpp: https://github.com/ggml-org/llama.cpp/pull/14741
|
|
+ // ollama: https://github.com/ollama/ollama/pull/11525
|
|
+
|
|
+ const std::string gemma3n_per_layer_proj_src1_name_ollama = " (reshaped)";
|
|
+ const std::string gemma3n_node_name_ollama = "node_";
|
|
+
|
|
const std::string gemma3n_per_layer_proj_src0_name = "inp_per_layer_selected";
|
|
const std::string gemma3n_per_layer_proj_src1_name = "per_layer_proj";
|
|
+
|
|
+ const std::string ffn_moe_bias_suffix = "_exps.bias";
|
|
+
|
|
const std::string ffn_moe_gate_bias_prefix = "ffn_moe_gate_biased";
|
|
const std::string ffn_moe_up_bias_prefix = "ffn_moe_up_biased";
|
|
const std::string ffn_moe_down_bias_prefix = "ffn_moe_down_biased";
|
|
const std::string nemotron_h_block_out_prefix = "nemotron_h_block_out";
|
|
const std::string mamba2_y_add_d_prefix = "mamba2_y_add_d";
|
|
|
|
+
|
|
for (int i = 0; i < cgraph->n_nodes; i++) {
|
|
ggml_tensor * node = cgraph->nodes[i];
|
|
|
|
@@ -2720,6 +2732,12 @@ static bool check_node_graph_compatibility_and_refresh_copy_ops(ggml_backend_cud
|
|
|
|
if (node->op == GGML_OP_ADD &&
|
|
node->src[1] && node->src[1]->ne[1] > 1 &&
|
|
+ // ollama
|
|
+ // workarounds to exclude Gemma3n's `project_per_layer_input` operation from the batch-size heuristic, specific to ollama's implementation of gemma3n
|
|
+ // number of layers is different for per_layer_proj between gemma3n:2b and gemma3n:4b, which is why we don't check that value here
|
|
+ !(node->ne[0] == 256 && node->ne[2] == 1 && node->ne[3] == 1 && node->src[0] ? std::string(node->src[0]->name).find(gemma3n_node_name_ollama) != std::string::npos : false && node->src[1] ? node->src[1]->name == gemma3n_per_layer_proj_src1_name_ollama : false) &&
|
|
+ node->src[1] ? std::string(node->src[1]->name).find(ffn_moe_bias_suffix) == std::string::npos : false &&
|
|
+ // upstream
|
|
(node->src[0] ? node->src[0]->name != gemma3n_per_layer_proj_src0_name : true) &&
|
|
(node->src[1] ? node->src[1]->name != gemma3n_per_layer_proj_src1_name : true) &&
|
|
strncmp(node->name, ffn_moe_gate_bias_prefix.c_str(), ffn_moe_gate_bias_prefix.size()) != 0 &&
|