mirror of
https://github.com/danswer-ai/danswer.git
synced 2025-08-04 14:12:48 +02:00
Do not stream Quote when using freeform prompt (#277)
This commit is contained in:
@@ -202,6 +202,7 @@ def process_answer(
|
||||
def stream_json_answer_end(answer_so_far: str, next_token: str) -> bool:
|
||||
next_token = next_token.replace('\\"', "")
|
||||
# If the previous character is an escape token, don't consider the first character of next_token
|
||||
# This does not work if it's an escaped escape sign before the " but this is rare, not worth handling
|
||||
if answer_so_far and answer_so_far[-1] == "\\":
|
||||
next_token = next_token[1:]
|
||||
if '"' in next_token:
|
||||
@@ -230,9 +231,15 @@ def process_model_tokens(
|
||||
"""Yields Answer tokens back out in a dict for streaming to frontend
|
||||
When Answer section ends, yields dict with answer_finished key
|
||||
Collects all the tokens at the end to form the complete model output"""
|
||||
quote_pat = f"\n{QUOTE_PAT}"
|
||||
# Sometimes worse model outputs new line instead of :
|
||||
quote_loose = f"\n{quote_pat[:-1]}\n"
|
||||
# Sometime model outputs two newlines before quote section
|
||||
quote_pat_full = f"\n{quote_pat}"
|
||||
model_output: str = ""
|
||||
found_answer_start = False if is_json_prompt else True
|
||||
found_answer_end = False
|
||||
hold_quote = ""
|
||||
for token in tokens:
|
||||
model_previous = model_output
|
||||
model_output += token
|
||||
@@ -246,13 +253,20 @@ def process_model_tokens(
|
||||
continue
|
||||
|
||||
if found_answer_start and not found_answer_end:
|
||||
if (is_json_prompt and stream_json_answer_end(model_previous, token)) or (
|
||||
not is_json_prompt and f"\n{QUOTE_PAT}" in model_output
|
||||
):
|
||||
if is_json_prompt and stream_json_answer_end(model_previous, token):
|
||||
found_answer_end = True
|
||||
yield {"answer_finished": True}
|
||||
continue
|
||||
yield {"answer_data": token}
|
||||
elif not is_json_prompt:
|
||||
if quote_pat in hold_quote + token or quote_loose in hold_quote + token:
|
||||
found_answer_end = True
|
||||
yield {"answer_finished": True}
|
||||
continue
|
||||
if hold_quote + token in quote_pat_full:
|
||||
hold_quote += token
|
||||
continue
|
||||
yield {"answer_data": hold_quote + token}
|
||||
hold_quote = ""
|
||||
|
||||
quotes = extract_quotes_from_completed_token_stream(model_output, context_docs)
|
||||
yield structure_quotes_for_response(quotes)
|
||||
|
@@ -138,8 +138,6 @@ export const searchRequestStreamed = async ({
|
||||
) {
|
||||
answer += ".";
|
||||
updateCurrentAnswer(answer);
|
||||
} else {
|
||||
updateCurrentAnswer("");
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
Reference in New Issue
Block a user