disable output_all (#11959)

This commit is contained in:
Michael Yang
2025-08-18 17:45:40 -07:00
committed by GitHub
parent 9cfbffafc5
commit f804e8a460
3 changed files with 25 additions and 3 deletions

View File

@@ -962,8 +962,7 @@ int llama_context::decode(const llama_batch & batch_inp) {
const int64_t n_vocab = vocab.n_tokens();
const int64_t n_embd = hparams.n_embd;
// when computing embeddings, all tokens are output
const bool output_all = cparams.embeddings;
const bool output_all = false;
if (!balloc->init(batch_inp, vocab, memory.get(), n_embd, cparams.kv_unified ? LLAMA_MAX_SEQ : cparams.n_seq_max, output_all)) {
LLAMA_LOG_ERROR("%s: failed to initialize batch\n", __func__);