first cut at redis (#2226)

* first cut at redis

* fix startup dependencies on redis

* kombu cleanup - fail silently

* mypy

* add redis_host environment override

* update REDIS_HOST env var in docker-compose.dev.yml

* update the rest of the docker files

* update contributing guide

* renaming cache to cache_volume

* add redis password to various deployments

* try setting up pr testing for helm

* fix indent

* hopefully this release version actually exists

* fix command line option to --chart-dirs

* fetch-depth 0

* edit values.yaml

* try setting ct working directory

* bypass testing only on change for now

* move files and lint them

* update helm testing

* some issues suggest using --config works

* add vespa repo

* add postgresql repo

* increase timeout

* try amd64 runner

* fix redis password reference

* add comment to helm chart testing workflow

* rename helm testing workflow to disable it

---------

Co-authored-by: Richard Kuo <rkuo@rkuo.com>
This commit is contained in:
rkuo-danswer
2024-09-06 12:21:29 -07:00
committed by GitHub
parent aeb6060854
commit 2933c3598b
43 changed files with 268 additions and 23 deletions

View File

@ -12,6 +12,7 @@ services:
depends_on:
- relational_db
- index
- cache
- inference_model_server
restart: always
ports:
@ -62,6 +63,7 @@ services:
# Other services
- POSTGRES_HOST=relational_db
- VESPA_HOST=index
- REDIS_HOST=cache
- WEB_DOMAIN=${WEB_DOMAIN:-} # For frontend redirect auth purpose
# Don't change the NLP model configs unless you know what you're doing
- DOCUMENT_ENCODER_MODEL=${DOCUMENT_ENCODER_MODEL:-}
@ -107,6 +109,7 @@ services:
depends_on:
- relational_db
- index
- cache
- inference_model_server
- indexing_model_server
restart: always
@ -137,6 +140,7 @@ services:
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD:-}
- POSTGRES_DB=${POSTGRES_DB:-}
- VESPA_HOST=index
- REDIS_HOST=cache
- WEB_DOMAIN=${WEB_DOMAIN:-} # For frontend redirect auth purpose for OAuth2 connectors
# Don't change the NLP model configs unless you know what you're doing
- DOCUMENT_ENCODER_MODEL=${DOCUMENT_ENCODER_MODEL:-}
@ -330,9 +334,19 @@ services:
# in order to make this work on both Unix-like systems and windows
command: >
/bin/sh -c "dos2unix /etc/nginx/conf.d/run-nginx.sh
&& /etc/nginx/conf.d/run-nginx.sh app.conf.template.dev"
&& /etc/nginx/conf.d/run-nginx.sh app.conf.template.dev"
cache:
image: redis:7.4-alpine
restart: always
ports:
- '6379:6379'
command: redis-server
volumes:
- cache_volume:/data
volumes:
cache_volume:
db_volume:
vespa_volume: # Created by the container itself

View File

@ -12,6 +12,7 @@ services:
depends_on:
- relational_db
- index
- cache
- inference_model_server
restart: always
ports:
@ -58,6 +59,7 @@ services:
# Other services
- POSTGRES_HOST=relational_db
- VESPA_HOST=index
- REDIS_HOST=cache
- WEB_DOMAIN=${WEB_DOMAIN:-} # For frontend redirect auth purpose
# Don't change the NLP model configs unless you know what you're doing
- DOCUMENT_ENCODER_MODEL=${DOCUMENT_ENCODER_MODEL:-}
@ -99,6 +101,7 @@ services:
depends_on:
- relational_db
- index
- cache
- inference_model_server
- indexing_model_server
restart: always
@ -129,6 +132,7 @@ services:
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD:-}
- POSTGRES_DB=${POSTGRES_DB:-}
- VESPA_HOST=index
- REDIS_HOST=cache
- WEB_DOMAIN=${WEB_DOMAIN:-} # For frontend redirect auth purpose for OAuth2 connectors
# Don't change the NLP model configs unless you know what you're doing
- DOCUMENT_ENCODER_MODEL=${DOCUMENT_ENCODER_MODEL:-}
@ -341,9 +345,20 @@ services:
command: >
/bin/sh -c "dos2unix /etc/nginx/conf.d/run-nginx.sh
&& /etc/nginx/conf.d/run-nginx.sh app.conf.template.dev"
cache:
image: redis:7.4-alpine
restart: always
ports:
- '6379:6379'
command: redis-server
volumes:
- cache_volume:/data
volumes:
cache_volume:
db_volume:
vespa_volume:
# Created by the container itself

View File

@ -12,6 +12,7 @@ services:
depends_on:
- relational_db
- index
- cache
- inference_model_server
restart: always
env_file:
@ -20,6 +21,7 @@ services:
- AUTH_TYPE=${AUTH_TYPE:-oidc}
- POSTGRES_HOST=relational_db
- VESPA_HOST=index
- REDIS_HOST=cache
- MODEL_SERVER_HOST=${MODEL_SERVER_HOST:-inference_model_server}
extra_hosts:
- "host.docker.internal:host-gateway"
@ -39,6 +41,7 @@ services:
depends_on:
- relational_db
- index
- cache
- inference_model_server
- indexing_model_server
restart: always
@ -48,6 +51,7 @@ services:
- AUTH_TYPE=${AUTH_TYPE:-oidc}
- POSTGRES_HOST=relational_db
- VESPA_HOST=index
- REDIS_HOST=cache
- MODEL_SERVER_HOST=${MODEL_SERVER_HOST:-inference_model_server}
- INDEXING_MODEL_SERVER_HOST=${INDEXING_MODEL_SERVER_HOST:-indexing_model_server}
extra_hosts:
@ -204,7 +208,18 @@ services:
- .env.nginx
cache:
image: redis:7.4-alpine
restart: always
ports:
- '6379:6379'
command: redis-server
volumes:
- cache_volume:/data
volumes:
cache_volume:
db_volume:
vespa_volume:
# Created by the container itself

View File

@ -12,6 +12,7 @@ services:
depends_on:
- relational_db
- index
- cache
- inference_model_server
restart: always
env_file:
@ -20,6 +21,7 @@ services:
- AUTH_TYPE=${AUTH_TYPE:-oidc}
- POSTGRES_HOST=relational_db
- VESPA_HOST=index
- REDIS_HOST=cache
- MODEL_SERVER_HOST=${MODEL_SERVER_HOST:-inference_model_server}
extra_hosts:
- "host.docker.internal:host-gateway"
@ -39,6 +41,7 @@ services:
depends_on:
- relational_db
- index
- cache
- inference_model_server
- indexing_model_server
restart: always
@ -48,6 +51,7 @@ services:
- AUTH_TYPE=${AUTH_TYPE:-oidc}
- POSTGRES_HOST=relational_db
- VESPA_HOST=index
- REDIS_HOST=cache
- MODEL_SERVER_HOST=${MODEL_SERVER_HOST:-inference_model_server}
- INDEXING_MODEL_SERVER_HOST=${INDEXING_MODEL_SERVER_HOST:-indexing_model_server}
extra_hosts:
@ -221,7 +225,18 @@ services:
entrypoint: "/bin/sh -c 'trap exit TERM; while :; do certbot renew; sleep 12h & wait $${!}; done;'"
cache:
image: redis:7.4-alpine
restart: always
ports:
- '6379:6379'
command: redis-server
volumes:
- cache_volume:/data
volumes:
cache_volume:
db_volume:
vespa_volume:
# Created by the container itself

View File

@ -12,6 +12,7 @@ services:
depends_on:
- relational_db
- index
- cache
restart: always
ports:
- "8080"
@ -21,6 +22,7 @@ services:
- AUTH_TYPE=disabled
- POSTGRES_HOST=relational_db
- VESPA_HOST=index
- REDIS_HOST=cache
- MODEL_SERVER_HOST=${MODEL_SERVER_HOST:-inference_model_server}
- MODEL_SERVER_PORT=${MODEL_SERVER_PORT:-}
- ENV_SEED_CONFIGURATION=${ENV_SEED_CONFIGURATION:-}
@ -43,6 +45,7 @@ services:
depends_on:
- relational_db
- index
- cache
restart: always
env_file:
- .env_eval
@ -50,6 +53,7 @@ services:
- AUTH_TYPE=disabled
- POSTGRES_HOST=relational_db
- VESPA_HOST=index
- REDIS_HOST=cache
- MODEL_SERVER_HOST=${MODEL_SERVER_HOST:-inference_model_server}
- MODEL_SERVER_PORT=${MODEL_SERVER_PORT:-}
- INDEXING_MODEL_SERVER_HOST=${INDEXING_MODEL_SERVER_HOST:-indexing_model_server}
@ -200,7 +204,18 @@ services:
&& /etc/nginx/conf.d/run-nginx.sh app.conf.template.dev"
cache:
image: redis:7.4-alpine
restart: always
ports:
- '6379:6379'
command: redis-server
volumes:
- cache_volume:/data
volumes:
cache_volume:
db_volume:
driver: local
driver_opts: