some qol improvements for search pipeline (#1809)

This commit is contained in:
hagen-danswer 2024-07-11 17:42:11 -07:00 committed by GitHub
parent 3e645a510e
commit 991ee79e47
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 21 additions and 19 deletions

1
.gitignore vendored
View File

@ -6,3 +6,4 @@
/deployment/data/nginx/app.conf
.vscode/launch.json
*.sw?
/backend/tests/regression/answer_quality/search_test_config.yaml

View File

@ -55,21 +55,14 @@ def get_answer_from_query(query: str, run_suffix: str) -> tuple[list[str], str]:
body["user"] = None
try:
response_json = requests.post(url, headers=headers, json=body).json()
content_list = [
context.get("content", "")
for context in response_json.get("contexts", {}).get("contexts", [])
]
answer = response_json.get("answer")
context_data_list = response_json.get("contexts", {}).get("contexts", [])
answer = response_json.get("answer", "")
except Exception as e:
print("Failed to answer the questions, trying again")
print(f"error: {str(e)}")
raise e
print("\nquery: ", query)
print("answer: ", answer)
print("content_list: ", content_list)
return content_list, answer
return context_data_list, answer
def check_if_query_ready(run_suffix: str) -> bool:

View File

@ -112,7 +112,8 @@ def start_docker_compose(
run_suffix: str, launch_web_ui: bool, use_cloud_gpu: bool
) -> None:
print("Starting Docker Compose...")
os.chdir("../deployment/docker_compose")
os.chdir(os.path.dirname(__file__))
os.chdir("../../../../deployment/docker_compose/")
command = f"docker compose -f docker-compose.search-testing.yml -p danswer-stack{run_suffix} up -d"
command += " --build"
command += " --force-recreate"

View File

@ -16,26 +16,33 @@ def _get_and_write_relari_outputs(
while not check_if_query_ready(run_suffix):
time.sleep(5)
count = 0
with open(output_file_path, "w", encoding="utf-8") as file:
for sample in samples:
retrieved_context, answer = get_answer_from_query(
query=sample["question"],
print(f"On question number {count}")
query = sample["question"]
print(f"query: {query}")
context_data_list, answer = get_answer_from_query(
query=query,
run_suffix=run_suffix,
)
if not answer:
print("NO ANSWER GIVEN FOR QUESTION:", sample["question"])
continue
print(f"answer: {answer[:50]}...")
if not context_data_list:
print("No context found")
else:
print(f"{len(context_data_list)} context docs found")
print("\n")
output = {
"label": sample["uid"],
"question": sample["question"],
"question_data": sample,
"answer": answer,
"retrieved_context": retrieved_context,
"context_data_list": context_data_list,
}
file.write(json.dumps(output) + "\n")
file.flush()
count += 1
def _write_metadata_file(run_suffix: str, metadata_file_path: str) -> None: