mirror of
https://github.com/danswer-ai/danswer.git
synced 2025-09-17 19:20:11 +02:00
Fix Quotes Prompting (#3137)
This commit is contained in:
@@ -263,6 +263,7 @@ class Answer:
|
|||||||
message_history=self.message_history,
|
message_history=self.message_history,
|
||||||
llm_config=self.llm.config,
|
llm_config=self.llm.config,
|
||||||
single_message_history=self.single_message_history,
|
single_message_history=self.single_message_history,
|
||||||
|
raw_user_text=self.question,
|
||||||
)
|
)
|
||||||
prompt_builder.update_system_prompt(
|
prompt_builder.update_system_prompt(
|
||||||
default_build_system_message(self.prompt_config)
|
default_build_system_message(self.prompt_config)
|
||||||
|
@@ -59,6 +59,7 @@ class AnswerPromptBuilder:
|
|||||||
message_history: list[PreviousMessage],
|
message_history: list[PreviousMessage],
|
||||||
llm_config: LLMConfig,
|
llm_config: LLMConfig,
|
||||||
single_message_history: str | None = None,
|
single_message_history: str | None = None,
|
||||||
|
raw_user_text: str | None = None,
|
||||||
) -> None:
|
) -> None:
|
||||||
self.max_tokens = compute_max_llm_input_tokens(llm_config)
|
self.max_tokens = compute_max_llm_input_tokens(llm_config)
|
||||||
|
|
||||||
@@ -88,6 +89,12 @@ class AnswerPromptBuilder:
|
|||||||
|
|
||||||
self.new_messages_and_token_cnts: list[tuple[BaseMessage, int]] = []
|
self.new_messages_and_token_cnts: list[tuple[BaseMessage, int]] = []
|
||||||
|
|
||||||
|
self.raw_user_message = (
|
||||||
|
HumanMessage(content=raw_user_text)
|
||||||
|
if raw_user_text is not None
|
||||||
|
else user_message
|
||||||
|
)
|
||||||
|
|
||||||
def update_system_prompt(self, system_message: SystemMessage | None) -> None:
|
def update_system_prompt(self, system_message: SystemMessage | None) -> None:
|
||||||
if not system_message:
|
if not system_message:
|
||||||
self.system_message_and_token_cnt = None
|
self.system_message_and_token_cnt = None
|
||||||
|
@@ -55,9 +55,12 @@ def build_next_prompt_for_search_like_tool(
|
|||||||
)
|
)
|
||||||
)
|
)
|
||||||
elif answer_style_config.quotes_config:
|
elif answer_style_config.quotes_config:
|
||||||
|
# For Quotes, the system prompt is included in the user prompt
|
||||||
|
prompt_builder.update_system_prompt(None)
|
||||||
|
|
||||||
prompt_builder.update_user_prompt(
|
prompt_builder.update_user_prompt(
|
||||||
build_quotes_user_message(
|
build_quotes_user_message(
|
||||||
message=prompt_builder.user_message_and_token_cnt[0],
|
message=prompt_builder.raw_user_message,
|
||||||
context_docs=final_context_documents,
|
context_docs=final_context_documents,
|
||||||
history_str=prompt_builder.single_message_history or "",
|
history_str=prompt_builder.single_message_history or "",
|
||||||
prompt=prompt_config,
|
prompt=prompt_config,
|
||||||
|
@@ -28,7 +28,7 @@ If there is no relevant information, just say "No relevant information found."
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
||||||
def wait_on_run(client: OpenAI, run, thread):
|
def wait_on_run(client: OpenAI, run, thread): # type: ignore
|
||||||
while run.status == "queued" or run.status == "in_progress":
|
while run.status == "queued" or run.status == "in_progress":
|
||||||
run = client.beta.threads.runs.retrieve(
|
run = client.beta.threads.runs.retrieve(
|
||||||
thread_id=thread.id,
|
thread_id=thread.id,
|
||||||
@@ -38,7 +38,7 @@ def wait_on_run(client: OpenAI, run, thread):
|
|||||||
return run
|
return run
|
||||||
|
|
||||||
|
|
||||||
def show_response(messages) -> None:
|
def show_response(messages) -> None: # type: ignore
|
||||||
# Get only the assistant's response text
|
# Get only the assistant's response text
|
||||||
for message in messages.data[::-1]:
|
for message in messages.data[::-1]:
|
||||||
if message.role == "assistant":
|
if message.role == "assistant":
|
||||||
|
Reference in New Issue
Block a user