mirror of
https://github.com/ollama/ollama.git
synced 2025-08-03 07:23:35 +02:00
llama: update llama.cpp vendor code to commit d7cfe1ff (#9356)
This commit is contained in:
2
llama/llama.cpp/src/llama-kv-cache.h
vendored
2
llama/llama.cpp/src/llama-kv-cache.h
vendored
@@ -37,7 +37,7 @@ struct llama_kv_cache {
|
||||
bool can_shift = false;
|
||||
|
||||
// Note: The value of head isn't only used to optimize searching
|
||||
// for a free KV slot. llama_decode_internal also uses it, so it
|
||||
// for a free KV slot. llama_decode_impl also uses it, so it
|
||||
// cannot be freely changed after a slot has been allocated.
|
||||
uint32_t head = 0;
|
||||
uint32_t size = 0;
|
||||
|
Reference in New Issue
Block a user