mirror of
https://github.com/ollama/ollama.git
synced 2025-03-26 09:42:10 +01:00
omit prompt and generate settings from final response
This commit is contained in:
parent
52663284cf
commit
44869c59d6
2
llm/ext_server/server.cpp
vendored
2
llm/ext_server/server.cpp
vendored
@ -1186,8 +1186,6 @@ struct llama_server_context
|
||||
{"model", params.model_alias},
|
||||
{"tokens_predicted", slot.n_decoded},
|
||||
{"tokens_evaluated", slot.n_prompt_tokens},
|
||||
{"generation_settings", get_formated_generation(slot)},
|
||||
{"prompt", slot.prompt},
|
||||
{"truncated", slot.truncated},
|
||||
{"stopped_eos", slot.stopped_eos},
|
||||
{"stopped_word", slot.stopped_word},
|
||||
|
Loading…
x
Reference in New Issue
Block a user