mirror of
https://github.com/danswer-ai/danswer.git
synced 2025-06-01 02:30:18 +02:00
* fresh indexing feature branch * cherry pick test * Revert "cherry pick test" This reverts commit 2a624220687affdda3de347e30f2011136f64bda. * set multitenant so that vespa fields match when indexing * cleanup pass * mypy * pass through env var to control celery indexing concurrency * comments on task kickoff and some logging improvements * disentangle configuration for different workers and beats. * use get_session_with_tenant * comment out all of update.py * rename to RedisConnectorIndexingFenceData * first check num_indexing_workers * refactor RedisConnectorIndexingFenceData * comment out on_worker_process_init * missed a file * scope db sessions to short lengths * update launch.json template * fix types * code review
21 lines
830 B
Python
21 lines
830 B
Python
import danswer.background.celery.configs.base as shared_config
|
|
|
|
broker_url = shared_config.broker_url
|
|
broker_connection_retry_on_startup = shared_config.broker_connection_retry_on_startup
|
|
broker_pool_limit = shared_config.broker_pool_limit
|
|
broker_transport_options = shared_config.broker_transport_options
|
|
|
|
redis_socket_keepalive = shared_config.redis_socket_keepalive
|
|
redis_retry_on_timeout = shared_config.redis_retry_on_timeout
|
|
redis_backend_health_check_interval = shared_config.redis_backend_health_check_interval
|
|
|
|
result_backend = shared_config.result_backend
|
|
result_expires = shared_config.result_expires # 86400 seconds is the default
|
|
|
|
task_default_priority = shared_config.task_default_priority
|
|
task_acks_late = shared_config.task_acks_late
|
|
|
|
worker_concurrency = 4
|
|
worker_pool = "threads"
|
|
worker_prefetch_multiplier = 1
|