Evaluation Pipeline Touchup (#1783)

This commit is contained in:
Yuhong Sun
2024-07-06 13:17:05 -07:00
committed by GitHub
parent c5bb3fde94
commit d35d7ee833
5 changed files with 11 additions and 3 deletions

View File

@ -68,6 +68,7 @@ def set_env_variables(
if use_cloud_gpu:
env_vars["MODEL_SERVER_HOST"] = remote_server_ip
env_vars["MODEL_SERVER_PORT"] = remote_server_port
env_vars["INDEXING_MODEL_SERVER_HOST"] = remote_server_ip
for env_var_name, env_var in env_vars.items():
os.environ[env_var_name] = env_var
@ -78,10 +79,9 @@ def start_docker_compose(
run_suffix: str, launch_web_ui: bool, use_cloud_gpu: bool
) -> None:
print("Starting Docker Compose...")
os.chdir(os.path.expanduser("~/danswer/deployment/docker_compose"))
os.chdir("../deployment/docker_compose")
command = f"docker compose -f docker-compose.search-testing.yml -p danswer-stack{run_suffix} up -d"
command += " --build"
command += " --pull always"
command += " --force-recreate"
if not launch_web_ui:
command += " --scale web_server=0"

View File

@ -105,4 +105,8 @@ def main() -> None:
if __name__ == "__main__":
"""
To run a different set of questions, update the questions_file in search_test_config.yaml
If there is more than one instance of Danswer running, specify the suffix in search_test_config.yaml
"""
main()

View File

@ -1,3 +1,6 @@
# Copy this to search_test_config.yaml and fill in the values to run the eval pipeline
# Don't forget to also update the .env_eval file with the correct values
# Directory where test results will be saved
output_folder: "~/danswer_test_results"