64 lines
2.6 KiB
Plaintext
Raw Permalink Normal View History

[supervisord]
nodaemon=true
2023-10-30 20:49:39 -07:00
logfile=/var/log/supervisord.log
2023-10-30 20:49:39 -07:00
# Indexing is the heaviest job, also requires some CPU intensive steps
# Cannot place this in Celery for now because Celery must run as a single process (see note below)
# Indexing uses multi-processing to speed things up
[program:document_indexing]
environment=CURRENT_PROCESS_IS_AN_INDEXING_JOB=true
command=python danswer/background/update.py
stdout_logfile=/var/log/update.log
stdout_logfile_maxbytes=52428800
2023-08-18 19:02:32 -07:00
redirect_stderr=true
autorestart=true
2023-10-16 14:59:42 -07:00
# Background jobs that must be run async due to long time to completion
# NOTE: due to an issue with Celery + SQLAlchemy
# (https://github.com/celery/celery/issues/7007#issuecomment-1740139367)
# we must use the threads pool instead of the default prefork pool for now
# in order to avoid intermittent errors like:
# `billiard.exceptions.WorkerLostError: Worker exited prematurely: signal 11 (SIGSEGV)`.
#
# This means workers will not be able take advantage of multiple CPU cores
# on a system, but this should be okay for now since all our celery tasks are
# relatively compute-light (e.g. they tend to just make a bunch of requests to
# Vespa / Postgres)
2023-10-16 14:59:42 -07:00
[program:celery_worker]
2023-10-28 14:41:36 -07:00
command=celery -A danswer.background.celery worker --pool=threads --autoscale=3,10 --loglevel=INFO --logfile=/var/log/celery_worker.log
2023-10-16 14:59:42 -07:00
stdout_logfile=/var/log/celery_worker_supervisor.log
stdout_logfile_maxbytes=52428800
2023-08-18 19:02:32 -07:00
redirect_stderr=true
autorestart=true
2023-10-16 14:59:42 -07:00
# Job scheduler for periodic tasks
[program:celery_beat]
command=celery -A danswer.background.celery beat --loglevel=INFO --logfile=/var/log/celery_beat.log
stdout_logfile=/var/log/celery_beat_supervisor.log
stdout_logfile_maxbytes=52428800
2023-08-18 19:02:32 -07:00
redirect_stderr=true
autorestart=true
2023-07-03 14:26:33 -07:00
2023-10-16 14:59:42 -07:00
# Listens for Slack messages and responds with answers
2023-07-03 14:26:33 -07:00
# for all channels that the DanswerBot has been added to.
# If not setup, this will just fail 5 times and then stop.
# More details on setup here: https://docs.danswer.dev/slack_bot_setup
[program:slack_bot_listener]
2023-10-28 14:41:36 -07:00
command=python danswer/danswerbot/slack/listener.py
2023-07-03 14:26:33 -07:00
stdout_logfile=/var/log/slack_bot_listener.log
stdout_logfile_maxbytes=52428800
2023-08-18 19:02:32 -07:00
redirect_stderr=true
2023-07-03 14:26:33 -07:00
autorestart=true
startretries=5
startsecs=60
2023-08-18 19:02:32 -07:00
2023-10-16 14:59:42 -07:00
# Pushes all logs from the above programs to stdout
# No log rotation here, since it's stdout it's handled by the Docker container loglevel
# To be standard across all the services
2023-08-18 19:02:32 -07:00
[program:log-redirect-handler]
2023-10-16 14:59:42 -07:00
command=tail -qF /var/log/update.log /var/log/celery_worker.log /var/log/celery_worker_supervisor.log /var/log/celery_beat.log /var/log/celery_beat_supervisor.log /var/log/slack_bot_listener.log
2023-08-18 19:02:32 -07:00
stdout_logfile=/dev/stdout
stdout_logfile_maxbytes=0
2023-08-18 19:02:32 -07:00
redirect_stderr=true
autorestart=true