[supervisord] nodaemon=true user=root logfile=/var/log/supervisord.log # Indexing is the heaviest job, also requires some CPU intensive steps # Cannot place this in Celery for now because Celery must run as a single process (see note below) # Indexing uses multi-processing to speed things up [program:document_indexing] environment=CURRENT_PROCESS_IS_AN_INDEXING_JOB=true command=python danswer/background/update.py stdout_logfile=/var/log/document_indexing.log stdout_logfile_maxbytes=16MB redirect_stderr=true autorestart=true # Background jobs that must be run async due to long time to completion # NOTE: due to an issue with Celery + SQLAlchemy # (https://github.com/celery/celery/issues/7007#issuecomment-1740139367) # we must use the threads pool instead of the default prefork pool for now # in order to avoid intermittent errors like: # `billiard.exceptions.WorkerLostError: Worker exited prematurely: signal 11 (SIGSEGV)`. # # This means workers will not be able take advantage of multiple CPU cores # on a system, but this should be okay for now since all our celery tasks are # relatively compute-light (e.g. they tend to just make a bunch of requests to # Vespa / Postgres) [program:celery_worker_primary] command=celery -A danswer.background.celery.celery_run:celery_app worker --pool=threads --concurrency=4 --prefetch-multiplier=1 --loglevel=INFO --hostname=primary@%%n -Q celery stdout_logfile=/var/log/celery_worker_primary.log stdout_logfile_maxbytes=16MB redirect_stderr=true autorestart=true startsecs=10 stopasgroup=true [program:celery_worker_light] command=bash -c "celery -A danswer.background.celery.celery_run:celery_app worker \ --pool=threads \ --concurrency=${CELERY_WORKER_LIGHT_CONCURRENCY:-24} \ --prefetch-multiplier=${CELERY_WORKER_LIGHT_PREFETCH_MULTIPLIER:-8} \ --loglevel=INFO \ --hostname=light@%%n \ -Q vespa_metadata_sync,connector_deletion" stdout_logfile=/var/log/celery_worker_light.log stdout_logfile_maxbytes=16MB redirect_stderr=true autorestart=true startsecs=10 stopasgroup=true [program:celery_worker_heavy] command=celery -A danswer.background.celery.celery_run:celery_app worker --pool=threads --concurrency=4 --prefetch-multiplier=1 --loglevel=INFO --hostname=heavy@%%n -Q connector_pruning stdout_logfile=/var/log/celery_worker_heavy.log stdout_logfile_maxbytes=16MB redirect_stderr=true autorestart=true startsecs=10 stopasgroup=true # Job scheduler for periodic tasks [program:celery_beat] command=celery -A danswer.background.celery.celery_run:celery_app beat stdout_logfile=/var/log/celery_beat.log stdout_logfile_maxbytes=16MB redirect_stderr=true startsecs=10 stopasgroup=true # Listens for Slack messages and responds with answers # for all channels that the DanswerBot has been added to. # If not setup, this will just fail 5 times and then stop. # More details on setup here: https://docs.danswer.dev/slack_bot_setup [program:slack_bot] command=python danswer/danswerbot/slack/listener.py stdout_logfile=/var/log/slack_bot.log stdout_logfile_maxbytes=16MB redirect_stderr=true autorestart=true startretries=5 startsecs=60 # Pushes all logs from the above programs to stdout # No log rotation here, since it's stdout it's handled by the Docker container logging [program:log-redirect-handler] command=tail -qF /var/log/celery_beat.log /var/log/celery_worker_primary.log /var/log/celery_worker_light.log /var/log/celery_worker_heavy.log /var/log/document_indexing.log /var/log/slack_bot.log stdout_logfile=/dev/stdout stdout_logfile_maxbytes = 0 # must be set to 0 when stdout_logfile=/dev/stdout autorestart=true