mirror of
https://github.com/ollama/ollama.git
synced 2025-08-23 22:43:08 +02:00
chore: remove redundant words in comment (#12028)
Signed-off-by: zoupingshi <hangfachang@outlook.com>
This commit is contained in:
@@ -46,7 +46,7 @@ func NewInputCache(lc *llama.Context, kvSize int, numSlots int, multiUserCache b
|
||||
}
|
||||
|
||||
// Locking: Operations on InputCacheSlot (including finding one
|
||||
// through LoadCacheSlot) require a lock to be be held that serializes
|
||||
// through LoadCacheSlot) require a lock to be held that serializes
|
||||
// these operations with each other and llama.Decode
|
||||
|
||||
type InputCacheSlot struct {
|
||||
|
@@ -78,7 +78,7 @@ func (c *InputCache) Close() {
|
||||
}
|
||||
|
||||
// Locking: Operations on InputCacheSlot (including finding one
|
||||
// through LoadCacheSlot) require a lock to be be held that serializes
|
||||
// through LoadCacheSlot) require a lock to be held that serializes
|
||||
// these operations with each other and processBatch
|
||||
|
||||
type InputCacheSlot struct {
|
||||
|
Reference in New Issue
Block a user