Slack to respect LLM chunk filter settings (#768)

This commit is contained in:
Yuhong Sun 2023-11-26 01:06:12 -08:00 committed by GitHub
parent 8391d89bea
commit 65d38ac8c3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 10 additions and 1 deletions

View File

@ -23,6 +23,7 @@ from danswer.danswerbot.slack.utils import get_channel_name_from_id
from danswer.danswerbot.slack.utils import respond_in_thread
from danswer.db.engine import get_sqlalchemy_engine
from danswer.dynamic_configs.interface import ConfigNotFoundError
from danswer.search.search_nlp_models import warm_up_models
from danswer.utils.logger import setup_logger
@ -294,6 +295,8 @@ def process_slack_event(client: SocketModeClient, req: SocketModeRequest) -> Non
# without issue.
if __name__ == "__main__":
try:
warm_up_models()
socket_client = _get_socket_client()
socket_client.socket_mode_request_listeners.append(process_slack_event) # type: ignore

View File

@ -520,7 +520,11 @@ def full_chunk_search_generator(
)
llm_filter_task_id = post_processing_tasks[-1].result_id
post_processing_results = run_functions_in_parallel(post_processing_tasks)
post_processing_results = (
run_functions_in_parallel(post_processing_tasks)
if post_processing_tasks
else {}
)
reranked_chunks = cast(
list[InferenceChunk] | None,
post_processing_results.get(str(rerank_task_id)) if rerank_task_id else None,

View File

@ -87,6 +87,8 @@ services:
- GEN_AI_API_KEY=${GEN_AI_API_KEY:-}
- GEN_AI_API_ENDPOINT=${GEN_AI_API_ENDPOINT:-}
- GEN_AI_API_VERSION=${GEN_AI_API_VERSION:-}
- DISABLE_LLM_FILTER_EXTRACTION=${DISABLE_LLM_FILTER_EXTRACTION:-}
- DISABLE_LLM_CHUNK_FILTER=${DISABLE_LLM_CHUNK_FILTER:-}
- POSTGRES_HOST=relational_db
- VESPA_HOST=index
- NUM_INDEXING_WORKERS=${NUM_INDEXING_WORKERS:-}