Refactor environment variable handling using ConfigMap for Kubernetes deployment (#515)

---------

Co-authored-by: Reese Jenner <reesevader@hotmail.co.uk>
Co-authored-by: Yuhong Sun <yuhongsun96@gmail.com>
This commit is contained in:
voarsh2 2023-12-23 05:33:36 +00:00 committed by GitHub
parent 241b886976
commit 016a087b10
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 168 additions and 128 deletions

View File

@ -44,7 +44,7 @@ MASK_CREDENTIAL_PREFIX = (
SECRET = os.environ.get("SECRET", "")
SESSION_EXPIRE_TIME_SECONDS = int(
os.environ.get("SESSION_EXPIRE_TIME_SECONDS", 86400)
os.environ.get("SESSION_EXPIRE_TIME_SECONDS") or 86400
) # 1 day
# set `VALID_EMAIL_DOMAINS` to a comma seperated list of domains in order to
@ -189,22 +189,11 @@ MODEL_SERVER_HOST = os.environ.get("MODEL_SERVER_HOST") or None
MODEL_SERVER_ALLOWED_HOST = os.environ.get("MODEL_SERVER_HOST") or "0.0.0.0"
MODEL_SERVER_PORT = int(os.environ.get("MODEL_SERVER_PORT") or "9000")
EMBEDDING_MODEL_SERVER_HOST = (
os.environ.get("EMBEDDING_MODEL_SERVER_HOST") or MODEL_SERVER_HOST
)
CROSS_ENCODER_MODEL_SERVER_HOST = (
os.environ.get("CROSS_ENCODER_MODEL_SERVER_HOST") or MODEL_SERVER_HOST
)
INTENT_MODEL_SERVER_HOST = (
os.environ.get("INTENT_MODEL_SERVER_HOST") or MODEL_SERVER_HOST
)
# specify this env variable directly to have a different model server for the background
# indexing job vs the api server so that background indexing does not effect query-time
# performance
BACKGROUND_JOB_EMBEDDING_MODEL_SERVER_HOST = (
os.environ.get("BACKGROUND_JOB_EMBEDDING_MODEL_SERVER_HOST")
or EMBEDDING_MODEL_SERVER_HOST
INDEXING_MODEL_SERVER_HOST = (
os.environ.get("INDEXING_MODEL_SERVER_HOST") or MODEL_SERVER_HOST
)

View File

@ -9,11 +9,9 @@ from sentence_transformers import SentenceTransformer # type: ignore
from transformers import AutoTokenizer # type: ignore
from transformers import TFDistilBertForSequenceClassification # type: ignore
from danswer.configs.app_configs import BACKGROUND_JOB_EMBEDDING_MODEL_SERVER_HOST
from danswer.configs.app_configs import CROSS_ENCODER_MODEL_SERVER_HOST
from danswer.configs.app_configs import CURRENT_PROCESS_IS_AN_INDEXING_JOB
from danswer.configs.app_configs import EMBEDDING_MODEL_SERVER_HOST
from danswer.configs.app_configs import INTENT_MODEL_SERVER_HOST
from danswer.configs.app_configs import INDEXING_MODEL_SERVER_HOST
from danswer.configs.app_configs import MODEL_SERVER_HOST
from danswer.configs.app_configs import MODEL_SERVER_PORT
from danswer.configs.model_configs import CROSS_EMBED_CONTEXT_SIZE
from danswer.configs.model_configs import CROSS_ENCODER_MODEL_ENSEMBLE
@ -99,12 +97,13 @@ def get_local_intent_model(
def build_model_server_url(
model_server_host: str,
model_server_host: str | None,
model_server_port: int | None,
) -> str:
model_server_url = model_server_host + (
f":{model_server_port}" if model_server_port else ""
)
) -> str | None:
if not model_server_host or model_server_port is None:
return None
model_server_url = f"{model_server_host}:{model_server_port}"
# use protocol if provided
if "http" in model_server_url:
@ -119,27 +118,23 @@ class EmbeddingModel:
self,
model_name: str = DOCUMENT_ENCODER_MODEL,
max_seq_length: int = DOC_EMBEDDING_CONTEXT_SIZE,
# `model_server_host` one has to default to `None` since it's
# default value is conditional
model_server_host: str | None = None,
model_server_host: str | None = MODEL_SERVER_HOST,
indexing_model_server_host: str | None = INDEXING_MODEL_SERVER_HOST,
model_server_port: int = MODEL_SERVER_PORT,
is_indexing: bool = CURRENT_PROCESS_IS_AN_INDEXING_JOB,
) -> None:
if model_server_host is None:
model_server_host = (
BACKGROUND_JOB_EMBEDDING_MODEL_SERVER_HOST
if CURRENT_PROCESS_IS_AN_INDEXING_JOB
else EMBEDDING_MODEL_SERVER_HOST
)
self.model_name = model_name
self.max_seq_length = max_seq_length
used_model_server_host = (
indexing_model_server_host if is_indexing else model_server_host
)
model_server_url = build_model_server_url(
used_model_server_host, model_server_port
)
self.embed_server_endpoint = (
(
build_model_server_url(model_server_host, model_server_port)
+ "/encoder/bi-encoder-embed"
)
if model_server_host
else None
model_server_url + "/encoder/bi-encoder-embed" if model_server_url else None
)
def load_model(self) -> SentenceTransformer | None:
@ -182,17 +177,16 @@ class CrossEncoderEnsembleModel:
self,
model_names: list[str] = CROSS_ENCODER_MODEL_ENSEMBLE,
max_seq_length: int = CROSS_EMBED_CONTEXT_SIZE,
model_server_host: str | None = CROSS_ENCODER_MODEL_SERVER_HOST,
model_server_host: str | None = MODEL_SERVER_HOST,
model_server_port: int = MODEL_SERVER_PORT,
) -> None:
self.model_names = model_names
self.max_seq_length = max_seq_length
model_server_url = build_model_server_url(model_server_host, model_server_port)
self.rerank_server_endpoint = (
(
build_model_server_url(model_server_host, model_server_port)
+ "/encoder/cross-encoder-scores"
)
if model_server_host
model_server_url + "/encoder/cross-encoder-scores"
if model_server_url
else None
)
@ -237,18 +231,15 @@ class IntentModel:
self,
model_name: str = INTENT_MODEL_VERSION,
max_seq_length: int = QUERY_MAX_CONTEXT_SIZE,
model_server_host: str | None = INTENT_MODEL_SERVER_HOST,
model_server_host: str | None = MODEL_SERVER_HOST,
model_server_port: int = MODEL_SERVER_PORT,
) -> None:
self.model_name = model_name
self.max_seq_length = max_seq_length
model_server_url = build_model_server_url(model_server_host, model_server_port)
self.intent_server_endpoint = (
(
build_model_server_url(model_server_host, model_server_port)
+ "/custom/intent-model"
)
if model_server_host
else None
model_server_url + "/custom/intent-model" if model_server_url else None
)
def load_model(self) -> SentenceTransformer | None:

View File

@ -16,6 +16,13 @@ services:
ports:
- "8080:8080"
environment:
# Auth Settings
- AUTH_TYPE=${AUTH_TYPE:-disabled}
- SESSION_EXPIRE_TIME_SECONDS=${SESSION_EXPIRE_TIME_SECONDS:-}
- VALID_EMAIL_DOMAINS=${VALID_EMAIL_DOMAINS:-}
- GOOGLE_OAUTH_CLIENT_ID=${GOOGLE_OAUTH_CLIENT_ID:-}
- GOOGLE_OAUTH_CLIENT_SECRET=${GOOGLE_OAUTH_CLIENT_SECRET:-}
# Gen AI Settings
- GEN_AI_MODEL_PROVIDER=${GEN_AI_MODEL_PROVIDER:-openai}
- GEN_AI_MODEL_VERSION=${GEN_AI_MODEL_VERSION:-gpt-3.5-turbo}
- FAST_GEN_AI_MODEL_VERSION=${FAST_GEN_AI_MODEL_VERSION:-gpt-3.5-turbo}
@ -23,43 +30,34 @@ services:
- GEN_AI_API_ENDPOINT=${GEN_AI_API_ENDPOINT:-}
- GEN_AI_API_VERSION=${GEN_AI_API_VERSION:-}
- GEN_AI_LLM_PROVIDER_TYPE=${GEN_AI_LLM_PROVIDER_TYPE:-}
- NUM_DOCUMENT_TOKENS_FED_TO_GENERATIVE_MODEL=${NUM_DOCUMENT_TOKENS_FED_TO_GENERATIVE_MODEL:-}
- POSTGRES_HOST=relational_db
- VESPA_HOST=index
- AUTH_TYPE=${AUTH_TYPE:-disabled}
- QA_TIMEOUT=${QA_TIMEOUT:-}
- VALID_EMAIL_DOMAINS=${VALID_EMAIL_DOMAINS:-}
- GOOGLE_OAUTH_CLIENT_ID=${GOOGLE_OAUTH_CLIENT_ID:-}
- GOOGLE_OAUTH_CLIENT_SECRET=${GOOGLE_OAUTH_CLIENT_SECRET:-}
- NOTION_CONNECTOR_ENABLE_RECURSIVE_PAGE_LOOKUP=${NOTION_CONNECTOR_ENABLE_RECURSIVE_PAGE_LOOKUP:-}
- NUM_DOCUMENT_TOKENS_FED_TO_GENERATIVE_MODEL=${NUM_DOCUMENT_TOKENS_FED_TO_GENERATIVE_MODEL:-}
- DISABLE_LLM_FILTER_EXTRACTION=${DISABLE_LLM_FILTER_EXTRACTION:-}
- DISABLE_LLM_CHUNK_FILTER=${DISABLE_LLM_CHUNK_FILTER:-}
- DISABLE_LLM_CHOOSE_SEARCH=${DISABLE_LLM_CHOOSE_SEARCH:-}
# Recency Bias for search results, decay at 1 / (1 + DOC_TIME_DECAY * x years)
- DOC_TIME_DECAY=${DOC_TIME_DECAY:-}
# Hybrid Search Alpha (0 for entirely keyword, 1 for entirely vector)
- HYBRID_ALPHA=${HYBRID_ALPHA:-}
# Query Options
- DOC_TIME_DECAY=${DOC_TIME_DECAY:-} # Recency Bias for search results, decay at 1 / (1 + DOC_TIME_DECAY * x years)
- HYBRID_ALPHA=${HYBRID_ALPHA:-} # Hybrid Search Alpha (0 for entirely keyword, 1 for entirely vector)
- EDIT_KEYWORD_QUERY=${EDIT_KEYWORD_QUERY:-}
- MULTILINGUAL_QUERY_EXPANSION=${MULTILINGUAL_QUERY_EXPANSION:-}
- QA_PROMPT_OVERRIDE=${QA_PROMPT_OVERRIDE:-}
# Other services
- POSTGRES_HOST=relational_db
- VESPA_HOST=index
- WEB_DOMAIN=${WEB_DOMAIN:-} # For frontend redirect auth purpose
# Don't change the NLP model configs unless you know what you're doing
- DOCUMENT_ENCODER_MODEL=${DOCUMENT_ENCODER_MODEL:-}
- NORMALIZE_EMBEDDINGS=${NORMALIZE_EMBEDDINGS:-}
- ASYM_QUERY_PREFIX=${ASYM_QUERY_PREFIX:-}
- ASYM_PASSAGE_PREFIX=${ASYM_PASSAGE_PREFIX:-}
- ENABLE_RERANKING_REAL_TIME_FLOW=${ENABLE_RERANKING_REAL_TIME_FLOW:-}
- QA_PROMPT_OVERRIDE=${QA_PROMPT_OVERRIDE:-}
- EDIT_KEYWORD_QUERY=${EDIT_KEYWORD_QUERY:-}
- MULTILINGUAL_QUERY_EXPANSION=${MULTILINGUAL_QUERY_EXPANSION:-}
- ENABLE_RERANKING_ASYNC_FLOW=${ENABLE_RERANKING_ASYNC_FLOW:-}
- MODEL_SERVER_HOST=${MODEL_SERVER_HOST:-}
- EMBEDDING_MODEL_SERVER_HOST=${EMBEDDING_MODEL_SERVER_HOST:-}
- CROSS_ENCODER_MODEL_SERVER_HOST=${CROSS_ENCODER_MODEL_SERVER_HOST:-}
- INTENT_MODEL_SERVER_HOST=${INTENT_MODEL_SERVER_HOST:-}
- MODEL_SERVER_PORT=${MODEL_SERVER_PORT:-}
# Leave this on pretty please? Nothing sensitive is collected!
# https://docs.danswer.dev/more/telemetry
- DISABLE_TELEMETRY=${DISABLE_TELEMETRY:-}
# Set to debug to get more fine-grained logs
- LOG_LEVEL=${LOG_LEVEL:-info}
# Log all of the prompts to the LLM
- LOG_ALL_MODEL_INTERACTIONS=${LOG_ALL_MODEL_INTERACTIONS:-}
- LOG_LEVEL=${LOG_LEVEL:-info} # Set to debug to get more fine-grained logs
- LOG_ALL_MODEL_INTERACTIONS=${LOG_ALL_MODEL_INTERACTIONS:-} # Log all of the prompts to the LLM
# If set to `true` will enable additional logs about Vespa query performance
# (time spent on finding the right docs + time spent fetching summaries from disk)
- LOG_VESPA_TIMING_INFORMATION=${LOG_VESPA_TIMING_INFORMATION:-}
@ -80,6 +78,7 @@ services:
- index
restart: always
environment:
# Gen AI Settings (Needed by DanswerBot)
- GEN_AI_MODEL_PROVIDER=${GEN_AI_MODEL_PROVIDER:-openai}
- GEN_AI_MODEL_VERSION=${GEN_AI_MODEL_VERSION:-gpt-3.5-turbo}
- FAST_GEN_AI_MODEL_VERSION=${FAST_GEN_AI_MODEL_VERSION:-gpt-3.5-turbo}
@ -87,19 +86,38 @@ services:
- GEN_AI_API_ENDPOINT=${GEN_AI_API_ENDPOINT:-}
- GEN_AI_API_VERSION=${GEN_AI_API_VERSION:-}
- GEN_AI_LLM_PROVIDER_TYPE=${GEN_AI_LLM_PROVIDER_TYPE:-}
- QA_TIMEOUT=${QA_TIMEOUT:-}
- NUM_DOCUMENT_TOKENS_FED_TO_GENERATIVE_MODEL=${NUM_DOCUMENT_TOKENS_FED_TO_GENERATIVE_MODEL:-}
- DISABLE_LLM_FILTER_EXTRACTION=${DISABLE_LLM_FILTER_EXTRACTION:-}
- DISABLE_LLM_CHUNK_FILTER=${DISABLE_LLM_CHUNK_FILTER:-}
- DISABLE_LLM_CHOOSE_SEARCH=${DISABLE_LLM_CHOOSE_SEARCH:-}
# Query Options
- DOC_TIME_DECAY=${DOC_TIME_DECAY:-} # Recency Bias for search results, decay at 1 / (1 + DOC_TIME_DECAY * x years)
- HYBRID_ALPHA=${HYBRID_ALPHA:-} # Hybrid Search Alpha (0 for entirely keyword, 1 for entirely vector)
- EDIT_KEYWORD_QUERY=${EDIT_KEYWORD_QUERY:-}
- MULTILINGUAL_QUERY_EXPANSION=${MULTILINGUAL_QUERY_EXPANSION:-}
- QA_PROMPT_OVERRIDE=${QA_PROMPT_OVERRIDE:-}
# Other Services
- POSTGRES_HOST=relational_db
- VESPA_HOST=index
- WEB_DOMAIN=${WEB_DOMAIN:-} # For frontend redirect auth purpose for OAuth2 connectors
# Don't change the NLP model configs unless you know what you're doing
- DOCUMENT_ENCODER_MODEL=${DOCUMENT_ENCODER_MODEL:-}
- NORMALIZE_EMBEDDINGS=${NORMALIZE_EMBEDDINGS:-}
- ASYM_QUERY_PREFIX=${ASYM_QUERY_PREFIX:-} # Needed by DanswerBot
- ASYM_PASSAGE_PREFIX=${ASYM_PASSAGE_PREFIX:-}
- MODEL_SERVER_HOST=${MODEL_SERVER_HOST:-}
- MODEL_SERVER_PORT=${MODEL_SERVER_PORT:-}
- INDEXING_MODEL_SERVER_HOST=${INDEXING_MODEL_SERVER_HOST:-}
- MIN_THREADS_ML_MODELS=${MIN_THREADS_ML_MODELS:-}
# Indexing Configs
- NUM_INDEXING_WORKERS=${NUM_INDEXING_WORKERS:-}
- DASK_JOB_CLIENT_ENABLED=${DASK_JOB_CLIENT_ENABLED:-}
# Connector Configs
- CONTINUE_ON_CONNECTOR_FAILURE=${CONTINUE_ON_CONNECTOR_FAILURE:-}
- EXPERIMENTAL_CHECKPOINTING_ENABLED=${EXPERIMENTAL_CHECKPOINTING_ENABLED:-}
- CONFLUENCE_CONNECTOR_LABELS_TO_SKIP=${CONFLUENCE_CONNECTOR_LABELS_TO_SKIP:-}
- GONG_CONNECTOR_START_TIME=${GONG_CONNECTOR_START_TIME:-}
- EXPERIMENTAL_SIMPLE_JOB_CLIENT_ENABLED=${EXPERIMENTAL_SIMPLE_JOB_CLIENT_ENABLED:-}
- EXPERIMENTAL_CHECKPOINTING_ENABLED=${EXPERIMENTAL_CHECKPOINTING_ENABLED:-}
- NOTION_CONNECTOR_ENABLE_RECURSIVE_PAGE_LOOKUP=${NOTION_CONNECTOR_ENABLE_RECURSIVE_PAGE_LOOKUP:-}
# Danswer SlackBot Configs
- DANSWER_BOT_SLACK_APP_TOKEN=${DANSWER_BOT_SLACK_APP_TOKEN:-}
- DANSWER_BOT_SLACK_BOT_TOKEN=${DANSWER_BOT_SLACK_BOT_TOKEN:-}
@ -108,33 +126,13 @@ services:
- DANSWER_BOT_RESPOND_EVERY_CHANNEL=${DANSWER_BOT_RESPOND_EVERY_CHANNEL:-}
- DANSWER_BOT_DISABLE_COT=${DANSWER_BOT_DISABLE_COT:-} # Currently unused
- NOTIFY_SLACKBOT_NO_ANSWER=${NOTIFY_SLACKBOT_NO_ANSWER:-}
# Recency Bias for search results, decay at 1 / (1 + DOC_TIME_DECAY * x years)
- DOC_TIME_DECAY=${DOC_TIME_DECAY:-}
# Hybrid Search Alpha (0 for entirely keyword, 1 for entirely vector)
- HYBRID_ALPHA=${HYBRID_ALPHA:-}
# Don't change the NLP model configs unless you know what you're doing
- DOCUMENT_ENCODER_MODEL=${DOCUMENT_ENCODER_MODEL:-}
- NORMALIZE_EMBEDDINGS=${NORMALIZE_EMBEDDINGS:-}
- SIM_SCORE_RANGE_LOW=${SIM_SCORE_RANGE_LOW:-}
- SIM_SCORE_RANGE_HIGH=${SIM_SCORE_RANGE_HIGH:-}
- ASYM_QUERY_PREFIX=${ASYM_QUERY_PREFIX:-}
- ASYM_PASSAGE_PREFIX=${ASYM_PASSAGE_PREFIX:-}
- ENABLE_RERANKING_ASYNC_FLOW=${ENABLE_RERANKING_ASYNC_FLOW:-}
- QA_PROMPT_OVERRIDE=${QA_PROMPT_OVERRIDE:-}
- EDIT_KEYWORD_QUERY=${EDIT_KEYWORD_QUERY:-}
- MULTILINGUAL_QUERY_EXPANSION=${MULTILINGUAL_QUERY_EXPANSION:-}
- MIN_THREADS_ML_MODELS=${MIN_THREADS_ML_MODELS:-}
- MODEL_SERVER_HOST=${MODEL_SERVER_HOST:-}
- EMBEDDING_MODEL_SERVER_HOST=${EMBEDDING_MODEL_SERVER_HOST:-}
- BACKGROUND_JOB_EMBEDDING_MODEL_SERVER_HOST=${BACKGROUND_JOB_EMBEDDING_MODEL_SERVER_HOST:-}
- MODEL_SERVER_PORT=${MODEL_SERVER_PORT:-}
# Logging
# Leave this on pretty please? Nothing sensitive is collected!
# https://docs.danswer.dev/more/telemetry
- DISABLE_TELEMETRY=${DISABLE_TELEMETRY:-}
# Set to debug to get more fine-grained logs
- LOG_LEVEL=${LOG_LEVEL:-info}
# Log all of the prompts to the LLM
- LOG_ALL_MODEL_INTERACTIONS=${LOG_ALL_MODEL_INTERACTIONS:-}
- LOG_LEVEL=${LOG_LEVEL:-info} # Set to debug to get more fine-grained logs
- LOG_ALL_MODEL_INTERACTIONS=${LOG_ALL_MODEL_INTERACTIONS:-} # Log all of the prompts to the LLM
- LOG_VESPA_TIMING_INFORMATION=${LOG_VESPA_TIMING_INFORMATION:-}
volumes:
- local_dynamic_storage:/home/storage
- file_connector_tmp_storage:/home/file_connector_storage

View File

@ -39,23 +39,11 @@ spec:
uvicorn danswer.main:app --host 0.0.0.0 --port 8080
ports:
- containerPort: 8080
env:
- name: AUTH_TYPE
value: google_oauth
- name: POSTGRES_HOST
value: relational-db-service
- name: VESPA_HOST
value: document-index-service
- name: GOOGLE_OAUTH_CLIENT_ID
valueFrom:
secretKeyRef:
name: danswer-secrets
key: google_oauth_client_id
- name: GOOGLE_OAUTH_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: danswer-secrets
key: google_oauth_client_secret
# There are some extra values since this is shared between services
# There are no conflicts though, extra env variables are simply ignored
envFrom:
- configMapRef:
name: env-configmap
volumeMounts:
- name: dynamic-storage
mountPath: /home/storage

View File

@ -17,11 +17,11 @@ spec:
image: danswer/danswer-backend:latest
imagePullPolicy: IfNotPresent
command: ["/usr/bin/supervisord"]
env:
- name: POSTGRES_HOST
value: relational-db-service
- name: VESPA_HOST
value: document-index-service
# There are some extra values since this is shared between services
# There are no conflicts though, extra env variables are simply ignored
envFrom:
- configMapRef:
name: env-configmap
volumeMounts:
- name: dynamic-storage
mountPath: /home/storage

View File

@ -0,0 +1,69 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: env-configmap
data:
# Auth Setting, also check the secrets file
AUTH_TYPE: "disabled" # Change this for production uses unless Danswer is only accessible behind VPN
SESSION_EXPIRE_TIME_SECONDS: "86400" # 1 Day Default
VALID_EMAIL_DOMAINS: "" # Can be something like danswer.ai, as an extra double-check
# Gen AI Settings
GEN_AI_MODEL_PROVIDER: "openai"
GEN_AI_MODEL_VERSION: "gpt-3.5-turbo" # Use GPT-4 if you have it
FAST_GEN_AI_MODEL_VERSION: "gpt-3.5-turbo"
GEN_AI_API_KEY: ""
GEN_AI_API_ENDPOINT: ""
GEN_AI_API_VERSION: ""
GEN_AI_LLM_PROVIDER_TYPE: ""
QA_TIMEOUT: "60"
NUM_DOCUMENT_TOKENS_FED_TO_GENERATIVE_MODEL: ""
DISABLE_LLM_FILTER_EXTRACTION: ""
DISABLE_LLM_CHUNK_FILTER: ""
DISABLE_LLM_CHOOSE_SEARCH: ""
# Query Options
DOC_TIME_DECAY: ""
HYBRID_ALPHA: ""
EDIT_KEYWORD_QUERY: ""
MULTILINGUAL_QUERY_EXPANSION: ""
QA_PROMPT_OVERRIDE: ""
# Other Services
POSTGRES_HOST: "relational-db-service"
VESPA_HOST: "document-index-service"
# Don't change the NLP models unless you know what you're doing
DOCUMENT_ENCODER_MODEL: ""
NORMALIZE_EMBEDDINGS: ""
ASYM_QUERY_PREFIX: ""
ASYM_PASSAGE_PREFIX: ""
ENABLE_RERANKING_REAL_TIME_FLOW: ""
ENABLE_RERANKING_ASYNC_FLOW: ""
MODEL_SERVER_HOST: ""
MODEL_SERVER_PORT: ""
INDEXING_MODEL_SERVER_HOST: ""
MIN_THREADS_ML_MODELS: ""
# Indexing Configs
NUM_INDEXING_WORKERS: ""
DASK_JOB_CLIENT_ENABLED: ""
CONTINUE_ON_CONNECTOR_FAILURE: ""
EXPERIMENTAL_CHECKPOINTING_ENABLED: ""
CONFLUENCE_CONNECTOR_LABELS_TO_SKIP: ""
GONG_CONNECTOR_START_TIME: ""
NOTION_CONNECTOR_ENABLE_RECURSIVE_PAGE_LOOKUP: ""
# DanswerBot SlackBot Configs
DANSWER_BOT_SLACK_APP_TOKEN: ""
DANSWER_BOT_SLACK_BOT_TOKEN: ""
DANSWER_BOT_DISABLE_DOCS_ONLY_ANSWER: ""
DANSWER_BOT_DISPLAY_ERROR_MSGS: ""
DANSWER_BOT_RESPOND_EVERY_CHANNEL: ""
DANSWER_BOT_DISABLE_COT: "" # Currently unused
NOTIFY_SLACKBOT_NO_ANSWER: ""
# Logging
# Optional Telemetry, please keep it on (nothing sensitive is collected)? <3
# https://docs.danswer.dev/more/telemetry
DISABLE_TELEMETRY: ""
LOG_LEVEL: ""
LOG_ALL_MODEL_INTERACTIONS: ""
LOG_VESPA_TIMING_INFORMATION: ""
# Shared or Non-backend Related
INTERNAL_URL: "http://api-server-service:80" # for web server
WEB_DOMAIN: "http://localhost:3000" # for web server and api server
DOMAIN: "localhost" # for nginx

View File

@ -37,7 +37,10 @@ spec:
- containerPort: 80
env:
- name: DOMAIN
value: localhost
valueFrom:
configMapKeyRef:
name: env-configmap
key: DOMAIN
volumeMounts:
- name: nginx-conf
mountPath: /etc/nginx/conf.d

View File

@ -31,6 +31,8 @@ spec:
imagePullPolicy: IfNotPresent
ports:
- containerPort: 3000
env:
- name: INTERNAL_URL
value: "http://api-server-service:80"
# There are some extra values since this is shared between services
# There are no conflicts though, extra env variables are simply ignored
envFrom:
- configMapRef:
name: env-configmap