mirror of
https://github.com/danswer-ai/danswer.git
synced 2025-05-03 16:30:21 +02:00
Multi tenant tests (#3919)
* ensure fail on multi tenant successfully * attempted fix * udpate ingration tests * minor update * improve * improve workflow * fix migrations * many more logs * quick fix * improve * fix typo * quick nit * attempted fix * very minor clean up
This commit is contained in:
parent
bfa4fbd691
commit
48ac690a70
44
.github/workflows/pr-integration-tests.yml
vendored
44
.github/workflows/pr-integration-tests.yml
vendored
@ -94,16 +94,20 @@ jobs:
|
|||||||
cd deployment/docker_compose
|
cd deployment/docker_compose
|
||||||
ENABLE_PAID_ENTERPRISE_EDITION_FEATURES=true \
|
ENABLE_PAID_ENTERPRISE_EDITION_FEATURES=true \
|
||||||
MULTI_TENANT=true \
|
MULTI_TENANT=true \
|
||||||
AUTH_TYPE=basic \
|
LOG_LEVEL=DEBUG \
|
||||||
|
AUTH_TYPE=cloud \
|
||||||
REQUIRE_EMAIL_VERIFICATION=false \
|
REQUIRE_EMAIL_VERIFICATION=false \
|
||||||
DISABLE_TELEMETRY=true \
|
DISABLE_TELEMETRY=true \
|
||||||
IMAGE_TAG=test \
|
IMAGE_TAG=test \
|
||||||
docker compose -f docker-compose.dev.yml -p danswer-stack up -d
|
DEV_MODE=true \
|
||||||
|
docker compose -f docker-compose.multitenant-dev.yml -p danswer-stack up -d
|
||||||
id: start_docker_multi_tenant
|
id: start_docker_multi_tenant
|
||||||
|
|
||||||
# In practice, `cloud` Auth type would require OAUTH credentials to be set.
|
# In practice, `cloud` Auth type would require OAUTH credentials to be set.
|
||||||
- name: Run Multi-Tenant Integration Tests
|
- name: Run Multi-Tenant Integration Tests
|
||||||
run: |
|
run: |
|
||||||
|
echo "Waiting for 3 minutes to ensure API server is ready..."
|
||||||
|
sleep 180
|
||||||
echo "Running integration tests..."
|
echo "Running integration tests..."
|
||||||
docker run --rm --network danswer-stack_default \
|
docker run --rm --network danswer-stack_default \
|
||||||
--name test-runner \
|
--name test-runner \
|
||||||
@ -112,6 +116,7 @@ jobs:
|
|||||||
-e POSTGRES_PASSWORD=password \
|
-e POSTGRES_PASSWORD=password \
|
||||||
-e POSTGRES_DB=postgres \
|
-e POSTGRES_DB=postgres \
|
||||||
-e VESPA_HOST=index \
|
-e VESPA_HOST=index \
|
||||||
|
-e LOG_LEVEL=DEBUG \
|
||||||
-e REDIS_HOST=cache \
|
-e REDIS_HOST=cache \
|
||||||
-e API_SERVER_HOST=api_server \
|
-e API_SERVER_HOST=api_server \
|
||||||
-e OPENAI_API_KEY=${OPENAI_API_KEY} \
|
-e OPENAI_API_KEY=${OPENAI_API_KEY} \
|
||||||
@ -119,6 +124,10 @@ jobs:
|
|||||||
-e TEST_WEB_HOSTNAME=test-runner \
|
-e TEST_WEB_HOSTNAME=test-runner \
|
||||||
-e AUTH_TYPE=cloud \
|
-e AUTH_TYPE=cloud \
|
||||||
-e MULTI_TENANT=true \
|
-e MULTI_TENANT=true \
|
||||||
|
-e REQUIRE_EMAIL_VERIFICATION=false \
|
||||||
|
-e DISABLE_TELEMETRY=true \
|
||||||
|
-e IMAGE_TAG=test \
|
||||||
|
-e DEV_MODE=true \
|
||||||
onyxdotapp/onyx-integration:test \
|
onyxdotapp/onyx-integration:test \
|
||||||
/app/tests/integration/multitenant_tests
|
/app/tests/integration/multitenant_tests
|
||||||
continue-on-error: true
|
continue-on-error: true
|
||||||
@ -126,17 +135,17 @@ jobs:
|
|||||||
|
|
||||||
- name: Check multi-tenant test results
|
- name: Check multi-tenant test results
|
||||||
run: |
|
run: |
|
||||||
if [ ${{ steps.run_tests.outcome }} == 'failure' ]; then
|
if [ ${{ steps.run_multitenant_tests.outcome }} == 'failure' ]; then
|
||||||
echo "Integration tests failed. Exiting with error."
|
echo "Multi-tenant integration tests failed. Exiting with error."
|
||||||
exit 1
|
exit 1
|
||||||
else
|
else
|
||||||
echo "All integration tests passed successfully."
|
echo "All multi-tenant integration tests passed successfully."
|
||||||
fi
|
fi
|
||||||
|
|
||||||
- name: Stop multi-tenant Docker containers
|
- name: Stop multi-tenant Docker containers
|
||||||
run: |
|
run: |
|
||||||
cd deployment/docker_compose
|
cd deployment/docker_compose
|
||||||
docker compose -f docker-compose.dev.yml -p danswer-stack down -v
|
docker compose -f docker-compose.multitenant-dev.yml -p danswer-stack down -v
|
||||||
|
|
||||||
- name: Start Docker containers
|
- name: Start Docker containers
|
||||||
run: |
|
run: |
|
||||||
@ -146,6 +155,7 @@ jobs:
|
|||||||
REQUIRE_EMAIL_VERIFICATION=false \
|
REQUIRE_EMAIL_VERIFICATION=false \
|
||||||
DISABLE_TELEMETRY=true \
|
DISABLE_TELEMETRY=true \
|
||||||
IMAGE_TAG=test \
|
IMAGE_TAG=test \
|
||||||
|
LOG_LEVEL=DEBUG \
|
||||||
docker compose -f docker-compose.dev.yml -p danswer-stack up -d
|
docker compose -f docker-compose.dev.yml -p danswer-stack up -d
|
||||||
id: start_docker
|
id: start_docker
|
||||||
|
|
||||||
@ -194,6 +204,7 @@ jobs:
|
|||||||
-e POSTGRES_DB=postgres \
|
-e POSTGRES_DB=postgres \
|
||||||
-e VESPA_HOST=index \
|
-e VESPA_HOST=index \
|
||||||
-e REDIS_HOST=cache \
|
-e REDIS_HOST=cache \
|
||||||
|
-e LOG_LEVEL=DEBUG \
|
||||||
-e API_SERVER_HOST=api_server \
|
-e API_SERVER_HOST=api_server \
|
||||||
-e OPENAI_API_KEY=${OPENAI_API_KEY} \
|
-e OPENAI_API_KEY=${OPENAI_API_KEY} \
|
||||||
-e SLACK_BOT_TOKEN=${SLACK_BOT_TOKEN} \
|
-e SLACK_BOT_TOKEN=${SLACK_BOT_TOKEN} \
|
||||||
@ -216,27 +227,30 @@ jobs:
|
|||||||
echo "All integration tests passed successfully."
|
echo "All integration tests passed successfully."
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# save before stopping the containers so the logs can be captured
|
# ------------------------------------------------------------
|
||||||
- name: Save Docker logs
|
# Always gather logs BEFORE "down":
|
||||||
if: success() || failure()
|
- name: Dump API server logs
|
||||||
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
cd deployment/docker_compose
|
cd deployment/docker_compose
|
||||||
docker compose -f docker-compose.dev.yml -p danswer-stack logs > docker-compose.log
|
docker compose -f docker-compose.dev.yml -p danswer-stack logs --no-color api_server > $GITHUB_WORKSPACE/api_server.log || true
|
||||||
mv docker-compose.log ${{ github.workspace }}/docker-compose.log
|
|
||||||
|
|
||||||
- name: Stop Docker containers
|
- name: Dump all-container logs (optional)
|
||||||
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
cd deployment/docker_compose
|
cd deployment/docker_compose
|
||||||
docker compose -f docker-compose.dev.yml -p danswer-stack down -v
|
docker compose -f docker-compose.dev.yml -p danswer-stack logs --no-color > $GITHUB_WORKSPACE/docker-compose.log || true
|
||||||
|
|
||||||
- name: Upload logs
|
- name: Upload logs
|
||||||
if: success() || failure()
|
if: always()
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: docker-logs
|
name: docker-all-logs
|
||||||
path: ${{ github.workspace }}/docker-compose.log
|
path: ${{ github.workspace }}/docker-compose.log
|
||||||
|
# ------------------------------------------------------------
|
||||||
|
|
||||||
- name: Stop Docker containers
|
- name: Stop Docker containers
|
||||||
|
if: always()
|
||||||
run: |
|
run: |
|
||||||
cd deployment/docker_compose
|
cd deployment/docker_compose
|
||||||
docker compose -f docker-compose.dev.yml -p danswer-stack down -v
|
docker compose -f docker-compose.dev.yml -p danswer-stack down -v
|
||||||
|
@ -5,7 +5,6 @@ Revises: 47e5bef3a1d7
|
|||||||
Create Date: 2024-11-06 13:15:53.302644
|
Create Date: 2024-11-06 13:15:53.302644
|
||||||
|
|
||||||
"""
|
"""
|
||||||
import logging
|
|
||||||
from typing import cast
|
from typing import cast
|
||||||
from alembic import op
|
from alembic import op
|
||||||
import sqlalchemy as sa
|
import sqlalchemy as sa
|
||||||
@ -20,13 +19,8 @@ down_revision = "47e5bef3a1d7"
|
|||||||
branch_labels: None = None
|
branch_labels: None = None
|
||||||
depends_on: None = None
|
depends_on: None = None
|
||||||
|
|
||||||
# Configure logging
|
|
||||||
logger = logging.getLogger("alembic.runtime.migration")
|
|
||||||
logger.setLevel(logging.INFO)
|
|
||||||
|
|
||||||
|
|
||||||
def upgrade() -> None:
|
def upgrade() -> None:
|
||||||
logger.info(f"{revision}: create_table: slack_bot")
|
|
||||||
# Create new slack_bot table
|
# Create new slack_bot table
|
||||||
op.create_table(
|
op.create_table(
|
||||||
"slack_bot",
|
"slack_bot",
|
||||||
@ -63,7 +57,6 @@ def upgrade() -> None:
|
|||||||
)
|
)
|
||||||
|
|
||||||
# Handle existing Slack bot tokens first
|
# Handle existing Slack bot tokens first
|
||||||
logger.info(f"{revision}: Checking for existing Slack bot.")
|
|
||||||
bot_token = None
|
bot_token = None
|
||||||
app_token = None
|
app_token = None
|
||||||
first_row_id = None
|
first_row_id = None
|
||||||
@ -71,15 +64,12 @@ def upgrade() -> None:
|
|||||||
try:
|
try:
|
||||||
tokens = cast(dict, get_kv_store().load("slack_bot_tokens_config_key"))
|
tokens = cast(dict, get_kv_store().load("slack_bot_tokens_config_key"))
|
||||||
except Exception:
|
except Exception:
|
||||||
logger.warning("No existing Slack bot tokens found.")
|
|
||||||
tokens = {}
|
tokens = {}
|
||||||
|
|
||||||
bot_token = tokens.get("bot_token")
|
bot_token = tokens.get("bot_token")
|
||||||
app_token = tokens.get("app_token")
|
app_token = tokens.get("app_token")
|
||||||
|
|
||||||
if bot_token and app_token:
|
if bot_token and app_token:
|
||||||
logger.info(f"{revision}: Found bot and app tokens.")
|
|
||||||
|
|
||||||
session = Session(bind=op.get_bind())
|
session = Session(bind=op.get_bind())
|
||||||
new_slack_bot = SlackBot(
|
new_slack_bot = SlackBot(
|
||||||
name="Slack Bot (Migrated)",
|
name="Slack Bot (Migrated)",
|
||||||
@ -170,10 +160,9 @@ def upgrade() -> None:
|
|||||||
# Clean up old tokens if they existed
|
# Clean up old tokens if they existed
|
||||||
try:
|
try:
|
||||||
if bot_token and app_token:
|
if bot_token and app_token:
|
||||||
logger.info(f"{revision}: Removing old bot and app tokens.")
|
|
||||||
get_kv_store().delete("slack_bot_tokens_config_key")
|
get_kv_store().delete("slack_bot_tokens_config_key")
|
||||||
except Exception:
|
except Exception:
|
||||||
logger.warning("tried to delete tokens in dynamic config but failed")
|
pass
|
||||||
# Rename the table
|
# Rename the table
|
||||||
op.rename_table(
|
op.rename_table(
|
||||||
"slack_bot_config__standard_answer_category",
|
"slack_bot_config__standard_answer_category",
|
||||||
@ -190,8 +179,6 @@ def upgrade() -> None:
|
|||||||
# Drop the table with CASCADE to handle dependent objects
|
# Drop the table with CASCADE to handle dependent objects
|
||||||
op.execute("DROP TABLE slack_bot_config CASCADE")
|
op.execute("DROP TABLE slack_bot_config CASCADE")
|
||||||
|
|
||||||
logger.info(f"{revision}: Migration complete.")
|
|
||||||
|
|
||||||
|
|
||||||
def downgrade() -> None:
|
def downgrade() -> None:
|
||||||
# Recreate the old slack_bot_config table
|
# Recreate the old slack_bot_config table
|
||||||
@ -273,7 +260,7 @@ def downgrade() -> None:
|
|||||||
}
|
}
|
||||||
get_kv_store().store("slack_bot_tokens_config_key", tokens)
|
get_kv_store().store("slack_bot_tokens_config_key", tokens)
|
||||||
except Exception:
|
except Exception:
|
||||||
logger.warning("Failed to save tokens back to KV store")
|
pass
|
||||||
|
|
||||||
# Drop the new tables in reverse order
|
# Drop the new tables in reverse order
|
||||||
op.drop_table("slack_channel_config")
|
op.drop_table("slack_channel_config")
|
||||||
|
@ -64,6 +64,7 @@ async def _get_tenant_id_from_request(
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
# Look up token data in Redis
|
# Look up token data in Redis
|
||||||
|
|
||||||
token_data = await retrieve_auth_token_data_from_redis(request)
|
token_data = await retrieve_auth_token_data_from_redis(request)
|
||||||
|
|
||||||
if not token_data:
|
if not token_data:
|
||||||
|
@ -24,6 +24,7 @@ from ee.onyx.server.tenants.user_mapping import get_tenant_id_for_email
|
|||||||
from ee.onyx.server.tenants.user_mapping import user_owns_a_tenant
|
from ee.onyx.server.tenants.user_mapping import user_owns_a_tenant
|
||||||
from onyx.auth.users import exceptions
|
from onyx.auth.users import exceptions
|
||||||
from onyx.configs.app_configs import CONTROL_PLANE_API_BASE_URL
|
from onyx.configs.app_configs import CONTROL_PLANE_API_BASE_URL
|
||||||
|
from onyx.configs.app_configs import DEV_MODE
|
||||||
from onyx.configs.constants import MilestoneRecordType
|
from onyx.configs.constants import MilestoneRecordType
|
||||||
from onyx.db.engine import get_session_with_tenant
|
from onyx.db.engine import get_session_with_tenant
|
||||||
from onyx.db.engine import get_sqlalchemy_engine
|
from onyx.db.engine import get_sqlalchemy_engine
|
||||||
@ -85,7 +86,8 @@ async def create_tenant(email: str, referral_source: str | None = None) -> str:
|
|||||||
# Provision tenant on data plane
|
# Provision tenant on data plane
|
||||||
await provision_tenant(tenant_id, email)
|
await provision_tenant(tenant_id, email)
|
||||||
# Notify control plane
|
# Notify control plane
|
||||||
await notify_control_plane(tenant_id, email, referral_source)
|
if not DEV_MODE:
|
||||||
|
await notify_control_plane(tenant_id, email, referral_source)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"Tenant provisioning failed: {e}")
|
logger.error(f"Tenant provisioning failed: {e}")
|
||||||
await rollback_tenant_provisioning(tenant_id)
|
await rollback_tenant_provisioning(tenant_id)
|
||||||
|
@ -34,6 +34,7 @@ from onyx.auth.users import current_curator_or_admin_user
|
|||||||
from onyx.auth.users import current_user
|
from onyx.auth.users import current_user
|
||||||
from onyx.auth.users import optional_user
|
from onyx.auth.users import optional_user
|
||||||
from onyx.configs.app_configs import AUTH_TYPE
|
from onyx.configs.app_configs import AUTH_TYPE
|
||||||
|
from onyx.configs.app_configs import DEV_MODE
|
||||||
from onyx.configs.app_configs import ENABLE_EMAIL_INVITES
|
from onyx.configs.app_configs import ENABLE_EMAIL_INVITES
|
||||||
from onyx.configs.app_configs import SESSION_EXPIRE_TIME_SECONDS
|
from onyx.configs.app_configs import SESSION_EXPIRE_TIME_SECONDS
|
||||||
from onyx.configs.app_configs import VALID_EMAIL_DOMAINS
|
from onyx.configs.app_configs import VALID_EMAIL_DOMAINS
|
||||||
@ -286,7 +287,7 @@ def bulk_invite_users(
|
|||||||
detail=f"Invalid email address: {email} - {str(e)}",
|
detail=f"Invalid email address: {email} - {str(e)}",
|
||||||
)
|
)
|
||||||
|
|
||||||
if MULTI_TENANT:
|
if MULTI_TENANT and not DEV_MODE:
|
||||||
try:
|
try:
|
||||||
fetch_ee_implementation_or_noop(
|
fetch_ee_implementation_or_noop(
|
||||||
"onyx.server.tenants.provisioning", "add_users_to_tenant", None
|
"onyx.server.tenants.provisioning", "add_users_to_tenant", None
|
||||||
|
@ -70,8 +70,8 @@ COPY supervisord.conf /etc/supervisor/conf.d/supervisord.conf
|
|||||||
# Set up application files
|
# Set up application files
|
||||||
COPY ./onyx /app/onyx
|
COPY ./onyx /app/onyx
|
||||||
COPY ./shared_configs /app/shared_configs
|
COPY ./shared_configs /app/shared_configs
|
||||||
COPY ./alembic /app/alembic
|
|
||||||
COPY ./alembic_tenants /app/alembic_tenants
|
COPY ./alembic_tenants /app/alembic_tenants
|
||||||
|
COPY ./alembic /app/alembic
|
||||||
COPY ./alembic.ini /app/alembic.ini
|
COPY ./alembic.ini /app/alembic.ini
|
||||||
COPY ./pytest.ini /app/pytest.ini
|
COPY ./pytest.ini /app/pytest.ini
|
||||||
COPY supervisord.conf /usr/etc/supervisord.conf
|
COPY supervisord.conf /usr/etc/supervisord.conf
|
||||||
|
@ -24,35 +24,6 @@ def generate_auth_token() -> str:
|
|||||||
|
|
||||||
|
|
||||||
class TenantManager:
|
class TenantManager:
|
||||||
@staticmethod
|
|
||||||
def create(
|
|
||||||
tenant_id: str | None = None,
|
|
||||||
initial_admin_email: str | None = None,
|
|
||||||
referral_source: str | None = None,
|
|
||||||
) -> dict[str, str]:
|
|
||||||
body = {
|
|
||||||
"tenant_id": tenant_id,
|
|
||||||
"initial_admin_email": initial_admin_email,
|
|
||||||
"referral_source": referral_source,
|
|
||||||
}
|
|
||||||
|
|
||||||
token = generate_auth_token()
|
|
||||||
headers = {
|
|
||||||
"Authorization": f"Bearer {token}",
|
|
||||||
"X-API-KEY": "",
|
|
||||||
"Content-Type": "application/json",
|
|
||||||
}
|
|
||||||
|
|
||||||
response = requests.post(
|
|
||||||
url=f"{API_SERVER_URL}/tenants/create",
|
|
||||||
json=body,
|
|
||||||
headers=headers,
|
|
||||||
)
|
|
||||||
|
|
||||||
response.raise_for_status()
|
|
||||||
|
|
||||||
return response.json()
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def get_all_users(
|
def get_all_users(
|
||||||
user_performing_action: DATestUser | None = None,
|
user_performing_action: DATestUser | None = None,
|
||||||
|
@ -92,6 +92,7 @@ class UserManager:
|
|||||||
|
|
||||||
# Set cookies in the headers
|
# Set cookies in the headers
|
||||||
test_user.headers["Cookie"] = f"fastapiusersauth={session_cookie}; "
|
test_user.headers["Cookie"] = f"fastapiusersauth={session_cookie}; "
|
||||||
|
test_user.cookies = {"fastapiusersauth": session_cookie}
|
||||||
return test_user
|
return test_user
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
@ -102,6 +103,7 @@ class UserManager:
|
|||||||
response = requests.get(
|
response = requests.get(
|
||||||
url=f"{API_SERVER_URL}/me",
|
url=f"{API_SERVER_URL}/me",
|
||||||
headers=user_to_verify.headers,
|
headers=user_to_verify.headers,
|
||||||
|
cookies=user_to_verify.cookies,
|
||||||
)
|
)
|
||||||
|
|
||||||
if user_to_verify.is_active is False:
|
if user_to_verify.is_active is False:
|
||||||
|
@ -242,6 +242,18 @@ def reset_postgres_multitenant() -> None:
|
|||||||
schema_name = schema[0]
|
schema_name = schema[0]
|
||||||
cur.execute(f'DROP SCHEMA "{schema_name}" CASCADE')
|
cur.execute(f'DROP SCHEMA "{schema_name}" CASCADE')
|
||||||
|
|
||||||
|
# Drop tables in the public schema
|
||||||
|
cur.execute(
|
||||||
|
"""
|
||||||
|
SELECT tablename FROM pg_tables
|
||||||
|
WHERE schemaname = 'public'
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
public_tables = cur.fetchall()
|
||||||
|
for table in public_tables:
|
||||||
|
table_name = table[0]
|
||||||
|
cur.execute(f'DROP TABLE IF EXISTS public."{table_name}" CASCADE')
|
||||||
|
|
||||||
cur.close()
|
cur.close()
|
||||||
conn.close()
|
conn.close()
|
||||||
|
|
||||||
|
@ -44,6 +44,7 @@ class DATestUser(BaseModel):
|
|||||||
headers: dict
|
headers: dict
|
||||||
role: UserRole
|
role: UserRole
|
||||||
is_active: bool
|
is_active: bool
|
||||||
|
cookies: dict = {}
|
||||||
|
|
||||||
|
|
||||||
class DATestPersonaLabel(BaseModel):
|
class DATestPersonaLabel(BaseModel):
|
||||||
|
@ -4,7 +4,6 @@ from tests.integration.common_utils.managers.cc_pair import CCPairManager
|
|||||||
from tests.integration.common_utils.managers.chat import ChatSessionManager
|
from tests.integration.common_utils.managers.chat import ChatSessionManager
|
||||||
from tests.integration.common_utils.managers.document import DocumentManager
|
from tests.integration.common_utils.managers.document import DocumentManager
|
||||||
from tests.integration.common_utils.managers.llm_provider import LLMProviderManager
|
from tests.integration.common_utils.managers.llm_provider import LLMProviderManager
|
||||||
from tests.integration.common_utils.managers.tenant import TenantManager
|
|
||||||
from tests.integration.common_utils.managers.user import UserManager
|
from tests.integration.common_utils.managers.user import UserManager
|
||||||
from tests.integration.common_utils.test_models import DATestAPIKey
|
from tests.integration.common_utils.test_models import DATestAPIKey
|
||||||
from tests.integration.common_utils.test_models import DATestCCPair
|
from tests.integration.common_utils.test_models import DATestCCPair
|
||||||
@ -13,25 +12,28 @@ from tests.integration.common_utils.test_models import DATestUser
|
|||||||
|
|
||||||
|
|
||||||
def test_multi_tenant_access_control(reset_multitenant: None) -> None:
|
def test_multi_tenant_access_control(reset_multitenant: None) -> None:
|
||||||
# Create Tenant 1 and its Admin User
|
# Creating an admin user (first user created is automatically an admin and also proviions the tenant
|
||||||
TenantManager.create("tenant_dev1", "test1@test.com", "Data Plane Registration")
|
admin_user1: DATestUser = UserManager.create(
|
||||||
test_user1: DATestUser = UserManager.create(name="test1", email="test1@test.com")
|
email="admin@onyx-test.com",
|
||||||
assert UserManager.is_role(test_user1, UserRole.ADMIN)
|
)
|
||||||
|
|
||||||
|
assert UserManager.is_role(admin_user1, UserRole.ADMIN)
|
||||||
|
|
||||||
# Create Tenant 2 and its Admin User
|
# Create Tenant 2 and its Admin User
|
||||||
TenantManager.create("tenant_dev2", "test2@test.com", "Data Plane Registration")
|
admin_user2: DATestUser = UserManager.create(
|
||||||
test_user2: DATestUser = UserManager.create(name="test2", email="test2@test.com")
|
email="admin2@onyx-test.com",
|
||||||
assert UserManager.is_role(test_user2, UserRole.ADMIN)
|
)
|
||||||
|
assert UserManager.is_role(admin_user2, UserRole.ADMIN)
|
||||||
|
|
||||||
# Create connectors for Tenant 1
|
# Create connectors for Tenant 1
|
||||||
cc_pair_1: DATestCCPair = CCPairManager.create_from_scratch(
|
cc_pair_1: DATestCCPair = CCPairManager.create_from_scratch(
|
||||||
user_performing_action=test_user1,
|
user_performing_action=admin_user1,
|
||||||
)
|
)
|
||||||
api_key_1: DATestAPIKey = APIKeyManager.create(
|
api_key_1: DATestAPIKey = APIKeyManager.create(
|
||||||
user_performing_action=test_user1,
|
user_performing_action=admin_user1,
|
||||||
)
|
)
|
||||||
api_key_1.headers.update(test_user1.headers)
|
api_key_1.headers.update(admin_user1.headers)
|
||||||
LLMProviderManager.create(user_performing_action=test_user1)
|
LLMProviderManager.create(user_performing_action=admin_user1)
|
||||||
|
|
||||||
# Seed documents for Tenant 1
|
# Seed documents for Tenant 1
|
||||||
cc_pair_1.documents = []
|
cc_pair_1.documents = []
|
||||||
@ -49,13 +51,13 @@ def test_multi_tenant_access_control(reset_multitenant: None) -> None:
|
|||||||
|
|
||||||
# Create connectors for Tenant 2
|
# Create connectors for Tenant 2
|
||||||
cc_pair_2: DATestCCPair = CCPairManager.create_from_scratch(
|
cc_pair_2: DATestCCPair = CCPairManager.create_from_scratch(
|
||||||
user_performing_action=test_user2,
|
user_performing_action=admin_user2,
|
||||||
)
|
)
|
||||||
api_key_2: DATestAPIKey = APIKeyManager.create(
|
api_key_2: DATestAPIKey = APIKeyManager.create(
|
||||||
user_performing_action=test_user2,
|
user_performing_action=admin_user2,
|
||||||
)
|
)
|
||||||
api_key_2.headers.update(test_user2.headers)
|
api_key_2.headers.update(admin_user2.headers)
|
||||||
LLMProviderManager.create(user_performing_action=test_user2)
|
LLMProviderManager.create(user_performing_action=admin_user2)
|
||||||
|
|
||||||
# Seed documents for Tenant 2
|
# Seed documents for Tenant 2
|
||||||
cc_pair_2.documents = []
|
cc_pair_2.documents = []
|
||||||
@ -76,17 +78,17 @@ def test_multi_tenant_access_control(reset_multitenant: None) -> None:
|
|||||||
|
|
||||||
# Create chat sessions for each user
|
# Create chat sessions for each user
|
||||||
chat_session1: DATestChatSession = ChatSessionManager.create(
|
chat_session1: DATestChatSession = ChatSessionManager.create(
|
||||||
user_performing_action=test_user1
|
user_performing_action=admin_user1
|
||||||
)
|
)
|
||||||
chat_session2: DATestChatSession = ChatSessionManager.create(
|
chat_session2: DATestChatSession = ChatSessionManager.create(
|
||||||
user_performing_action=test_user2
|
user_performing_action=admin_user2
|
||||||
)
|
)
|
||||||
|
|
||||||
# User 1 sends a message and gets a response
|
# User 1 sends a message and gets a response
|
||||||
response1 = ChatSessionManager.send_message(
|
response1 = ChatSessionManager.send_message(
|
||||||
chat_session_id=chat_session1.id,
|
chat_session_id=chat_session1.id,
|
||||||
message="What is in Tenant 1's documents?",
|
message="What is in Tenant 1's documents?",
|
||||||
user_performing_action=test_user1,
|
user_performing_action=admin_user1,
|
||||||
)
|
)
|
||||||
# Assert that the search tool was used
|
# Assert that the search tool was used
|
||||||
assert response1.tool_name == "run_search"
|
assert response1.tool_name == "run_search"
|
||||||
@ -100,14 +102,16 @@ def test_multi_tenant_access_control(reset_multitenant: None) -> None:
|
|||||||
), "Tenant 2 document IDs should not be in the response"
|
), "Tenant 2 document IDs should not be in the response"
|
||||||
|
|
||||||
# Assert that the contents are correct
|
# Assert that the contents are correct
|
||||||
for doc in response1.tool_result or []:
|
assert any(
|
||||||
assert doc["content"] == "Tenant 1 Document Content"
|
doc["content"] == "Tenant 1 Document Content"
|
||||||
|
for doc in response1.tool_result or []
|
||||||
|
), "Tenant 1 Document Content not found in any document"
|
||||||
|
|
||||||
# User 2 sends a message and gets a response
|
# User 2 sends a message and gets a response
|
||||||
response2 = ChatSessionManager.send_message(
|
response2 = ChatSessionManager.send_message(
|
||||||
chat_session_id=chat_session2.id,
|
chat_session_id=chat_session2.id,
|
||||||
message="What is in Tenant 2's documents?",
|
message="What is in Tenant 2's documents?",
|
||||||
user_performing_action=test_user2,
|
user_performing_action=admin_user2,
|
||||||
)
|
)
|
||||||
# Assert that the search tool was used
|
# Assert that the search tool was used
|
||||||
assert response2.tool_name == "run_search"
|
assert response2.tool_name == "run_search"
|
||||||
@ -119,15 +123,18 @@ def test_multi_tenant_access_control(reset_multitenant: None) -> None:
|
|||||||
assert not response_doc_ids.intersection(
|
assert not response_doc_ids.intersection(
|
||||||
tenant1_doc_ids
|
tenant1_doc_ids
|
||||||
), "Tenant 1 document IDs should not be in the response"
|
), "Tenant 1 document IDs should not be in the response"
|
||||||
|
|
||||||
# Assert that the contents are correct
|
# Assert that the contents are correct
|
||||||
for doc in response2.tool_result or []:
|
assert any(
|
||||||
assert doc["content"] == "Tenant 2 Document Content"
|
doc["content"] == "Tenant 2 Document Content"
|
||||||
|
for doc in response2.tool_result or []
|
||||||
|
), "Tenant 2 Document Content not found in any document"
|
||||||
|
|
||||||
# User 1 tries to access Tenant 2's documents
|
# User 1 tries to access Tenant 2's documents
|
||||||
response_cross = ChatSessionManager.send_message(
|
response_cross = ChatSessionManager.send_message(
|
||||||
chat_session_id=chat_session1.id,
|
chat_session_id=chat_session1.id,
|
||||||
message="What is in Tenant 2's documents?",
|
message="What is in Tenant 2's documents?",
|
||||||
user_performing_action=test_user1,
|
user_performing_action=admin_user1,
|
||||||
)
|
)
|
||||||
# Assert that the search tool was used
|
# Assert that the search tool was used
|
||||||
assert response_cross.tool_name == "run_search"
|
assert response_cross.tool_name == "run_search"
|
||||||
@ -140,7 +147,7 @@ def test_multi_tenant_access_control(reset_multitenant: None) -> None:
|
|||||||
response_cross2 = ChatSessionManager.send_message(
|
response_cross2 = ChatSessionManager.send_message(
|
||||||
chat_session_id=chat_session2.id,
|
chat_session_id=chat_session2.id,
|
||||||
message="What is in Tenant 1's documents?",
|
message="What is in Tenant 1's documents?",
|
||||||
user_performing_action=test_user2,
|
user_performing_action=admin_user2,
|
||||||
)
|
)
|
||||||
# Assert that the search tool was used
|
# Assert that the search tool was used
|
||||||
assert response_cross2.tool_name == "run_search"
|
assert response_cross2.tool_name == "run_search"
|
||||||
|
@ -4,14 +4,12 @@ from onyx.db.models import UserRole
|
|||||||
from tests.integration.common_utils.managers.cc_pair import CCPairManager
|
from tests.integration.common_utils.managers.cc_pair import CCPairManager
|
||||||
from tests.integration.common_utils.managers.connector import ConnectorManager
|
from tests.integration.common_utils.managers.connector import ConnectorManager
|
||||||
from tests.integration.common_utils.managers.credential import CredentialManager
|
from tests.integration.common_utils.managers.credential import CredentialManager
|
||||||
from tests.integration.common_utils.managers.tenant import TenantManager
|
|
||||||
from tests.integration.common_utils.managers.user import UserManager
|
from tests.integration.common_utils.managers.user import UserManager
|
||||||
from tests.integration.common_utils.test_models import DATestUser
|
from tests.integration.common_utils.test_models import DATestUser
|
||||||
|
|
||||||
|
|
||||||
# Test flow from creating tenant to registering as a user
|
# Test flow from creating tenant to registering as a user
|
||||||
def test_tenant_creation(reset_multitenant: None) -> None:
|
def test_tenant_creation(reset_multitenant: None) -> None:
|
||||||
TenantManager.create("tenant_dev", "test@test.com", "Data Plane Registration")
|
|
||||||
test_user: DATestUser = UserManager.create(name="test", email="test@test.com")
|
test_user: DATestUser = UserManager.create(name="test", email="test@test.com")
|
||||||
|
|
||||||
assert UserManager.is_role(test_user, UserRole.ADMIN)
|
assert UserManager.is_role(test_user, UserRole.ADMIN)
|
||||||
|
423
deployment/docker_compose/docker-compose.multitenant-dev.yml
Normal file
423
deployment/docker_compose/docker-compose.multitenant-dev.yml
Normal file
@ -0,0 +1,423 @@
|
|||||||
|
services:
|
||||||
|
api_server:
|
||||||
|
image: onyxdotapp/onyx-backend:${IMAGE_TAG:-latest}
|
||||||
|
build:
|
||||||
|
context: ../../backend
|
||||||
|
dockerfile: Dockerfile
|
||||||
|
command: >
|
||||||
|
/bin/sh -c "
|
||||||
|
alembic -n schema_private upgrade head &&
|
||||||
|
echo \"Starting Onyx Api Server\" &&
|
||||||
|
uvicorn onyx.main:app --host 0.0.0.0 --port 8080"
|
||||||
|
depends_on:
|
||||||
|
- relational_db
|
||||||
|
- index
|
||||||
|
- cache
|
||||||
|
- inference_model_server
|
||||||
|
restart: always
|
||||||
|
ports:
|
||||||
|
- "8080:8080"
|
||||||
|
environment:
|
||||||
|
- ENABLE_PAID_ENTERPRISE_EDITION_FEATURES=true
|
||||||
|
- MULTI_TENANT=true
|
||||||
|
- LOG_LEVEL=DEBUG
|
||||||
|
- AUTH_TYPE=cloud
|
||||||
|
- REQUIRE_EMAIL_VERIFICATION=false
|
||||||
|
- DISABLE_TELEMETRY=true
|
||||||
|
- IMAGE_TAG=test
|
||||||
|
- DEV_MODE=true
|
||||||
|
# Auth Settings
|
||||||
|
- SESSION_EXPIRE_TIME_SECONDS=${SESSION_EXPIRE_TIME_SECONDS:-}
|
||||||
|
- ENCRYPTION_KEY_SECRET=${ENCRYPTION_KEY_SECRET:-}
|
||||||
|
- VALID_EMAIL_DOMAINS=${VALID_EMAIL_DOMAINS:-}
|
||||||
|
- GOOGLE_OAUTH_CLIENT_ID=${GOOGLE_OAUTH_CLIENT_ID:-}
|
||||||
|
- GOOGLE_OAUTH_CLIENT_SECRET=${GOOGLE_OAUTH_CLIENT_SECRET:-}
|
||||||
|
- SMTP_SERVER=${SMTP_SERVER:-}
|
||||||
|
- SMTP_PORT=${SMTP_PORT:-587}
|
||||||
|
- SMTP_USER=${SMTP_USER:-}
|
||||||
|
- SMTP_PASS=${SMTP_PASS:-}
|
||||||
|
- ENABLE_EMAIL_INVITES=${ENABLE_EMAIL_INVITES:-}
|
||||||
|
- EMAIL_FROM=${EMAIL_FROM:-}
|
||||||
|
- OAUTH_CLIENT_ID=${OAUTH_CLIENT_ID:-}
|
||||||
|
- OAUTH_CLIENT_SECRET=${OAUTH_CLIENT_SECRET:-}
|
||||||
|
- OPENID_CONFIG_URL=${OPENID_CONFIG_URL:-}
|
||||||
|
- TRACK_EXTERNAL_IDP_EXPIRY=${TRACK_EXTERNAL_IDP_EXPIRY:-}
|
||||||
|
- CORS_ALLOWED_ORIGIN=${CORS_ALLOWED_ORIGIN:-}
|
||||||
|
# Gen AI Settings
|
||||||
|
- GEN_AI_MAX_TOKENS=${GEN_AI_MAX_TOKENS:-}
|
||||||
|
- QA_TIMEOUT=${QA_TIMEOUT:-}
|
||||||
|
- MAX_CHUNKS_FED_TO_CHAT=${MAX_CHUNKS_FED_TO_CHAT:-}
|
||||||
|
- DISABLE_LLM_CHOOSE_SEARCH=${DISABLE_LLM_CHOOSE_SEARCH:-}
|
||||||
|
- DISABLE_LLM_QUERY_REPHRASE=${DISABLE_LLM_QUERY_REPHRASE:-}
|
||||||
|
- DISABLE_GENERATIVE_AI=${DISABLE_GENERATIVE_AI:-}
|
||||||
|
- DISABLE_LITELLM_STREAMING=${DISABLE_LITELLM_STREAMING:-}
|
||||||
|
- LITELLM_EXTRA_HEADERS=${LITELLM_EXTRA_HEADERS:-}
|
||||||
|
- BING_API_KEY=${BING_API_KEY:-}
|
||||||
|
- DISABLE_LLM_DOC_RELEVANCE=${DISABLE_LLM_DOC_RELEVANCE:-}
|
||||||
|
- GEN_AI_API_KEY=${GEN_AI_API_KEY:-}
|
||||||
|
- TOKEN_BUDGET_GLOBALLY_ENABLED=${TOKEN_BUDGET_GLOBALLY_ENABLED:-}
|
||||||
|
# Query Options
|
||||||
|
- DOC_TIME_DECAY=${DOC_TIME_DECAY:-}
|
||||||
|
- HYBRID_ALPHA=${HYBRID_ALPHA:-}
|
||||||
|
- EDIT_KEYWORD_QUERY=${EDIT_KEYWORD_QUERY:-}
|
||||||
|
- MULTILINGUAL_QUERY_EXPANSION=${MULTILINGUAL_QUERY_EXPANSION:-}
|
||||||
|
- LANGUAGE_HINT=${LANGUAGE_HINT:-}
|
||||||
|
- LANGUAGE_CHAT_NAMING_HINT=${LANGUAGE_CHAT_NAMING_HINT:-}
|
||||||
|
- QA_PROMPT_OVERRIDE=${QA_PROMPT_OVERRIDE:-}
|
||||||
|
# Other services
|
||||||
|
- POSTGRES_HOST=relational_db
|
||||||
|
- POSTGRES_DEFAULT_SCHEMA=${POSTGRES_DEFAULT_SCHEMA:-}
|
||||||
|
- VESPA_HOST=index
|
||||||
|
- REDIS_HOST=cache
|
||||||
|
- WEB_DOMAIN=${WEB_DOMAIN:-}
|
||||||
|
# Don't change the NLP model configs unless you know what you're doing
|
||||||
|
- EMBEDDING_BATCH_SIZE=${EMBEDDING_BATCH_SIZE:-}
|
||||||
|
- DOCUMENT_ENCODER_MODEL=${DOCUMENT_ENCODER_MODEL:-}
|
||||||
|
- DOC_EMBEDDING_DIM=${DOC_EMBEDDING_DIM:-}
|
||||||
|
- NORMALIZE_EMBEDDINGS=${NORMALIZE_EMBEDDINGS:-}
|
||||||
|
- ASYM_QUERY_PREFIX=${ASYM_QUERY_PREFIX:-}
|
||||||
|
- DISABLE_RERANK_FOR_STREAMING=${DISABLE_RERANK_FOR_STREAMING:-}
|
||||||
|
- MODEL_SERVER_HOST=${MODEL_SERVER_HOST:-inference_model_server}
|
||||||
|
- MODEL_SERVER_PORT=${MODEL_SERVER_PORT:-}
|
||||||
|
- LOG_ALL_MODEL_INTERACTIONS=${LOG_ALL_MODEL_INTERACTIONS:-}
|
||||||
|
- LOG_DANSWER_MODEL_INTERACTIONS=${LOG_DANSWER_MODEL_INTERACTIONS:-}
|
||||||
|
- LOG_INDIVIDUAL_MODEL_TOKENS=${LOG_INDIVIDUAL_MODEL_TOKENS:-}
|
||||||
|
- LOG_VESPA_TIMING_INFORMATION=${LOG_VESPA_TIMING_INFORMATION:-}
|
||||||
|
- LOG_ENDPOINT_LATENCY=${LOG_ENDPOINT_LATENCY:-}
|
||||||
|
- LOG_POSTGRES_LATENCY=${LOG_POSTGRES_LATENCY:-}
|
||||||
|
- LOG_POSTGRES_CONN_COUNTS=${LOG_POSTGRES_CONN_COUNTS:-}
|
||||||
|
- CELERY_BROKER_POOL_LIMIT=${CELERY_BROKER_POOL_LIMIT:-}
|
||||||
|
- LITELLM_CUSTOM_ERROR_MESSAGE_MAPPINGS=${LITELLM_CUSTOM_ERROR_MESSAGE_MAPPINGS:-}
|
||||||
|
# Egnyte OAuth Configs
|
||||||
|
- EGNYTE_CLIENT_ID=${EGNYTE_CLIENT_ID:-}
|
||||||
|
- EGNYTE_CLIENT_SECRET=${EGNYTE_CLIENT_SECRET:-}
|
||||||
|
- EGNYTE_LOCALHOST_OVERRIDE=${EGNYTE_LOCALHOST_OVERRIDE:-}
|
||||||
|
# Linear OAuth Configs
|
||||||
|
- LINEAR_CLIENT_ID=${LINEAR_CLIENT_ID:-}
|
||||||
|
- LINEAR_CLIENT_SECRET=${LINEAR_CLIENT_SECRET:-}
|
||||||
|
# Analytics Configs
|
||||||
|
- SENTRY_DSN=${SENTRY_DSN:-}
|
||||||
|
# Chat Configs
|
||||||
|
- HARD_DELETE_CHATS=${HARD_DELETE_CHATS:-}
|
||||||
|
# Enables the use of bedrock models or IAM Auth
|
||||||
|
- AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID:-}
|
||||||
|
- AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY:-}
|
||||||
|
- AWS_REGION_NAME=${AWS_REGION_NAME:-}
|
||||||
|
- API_KEY_HASH_ROUNDS=${API_KEY_HASH_ROUNDS:-}
|
||||||
|
# Seeding configuration
|
||||||
|
- USE_IAM_AUTH=${USE_IAM_AUTH:-}
|
||||||
|
extra_hosts:
|
||||||
|
- "host.docker.internal:host-gateway"
|
||||||
|
logging:
|
||||||
|
driver: json-file
|
||||||
|
options:
|
||||||
|
max-size: "50m"
|
||||||
|
max-file: "6"
|
||||||
|
|
||||||
|
background:
|
||||||
|
image: onyxdotapp/onyx-backend:${IMAGE_TAG:-latest}
|
||||||
|
build:
|
||||||
|
context: ../../backend
|
||||||
|
dockerfile: Dockerfile
|
||||||
|
command: >
|
||||||
|
/bin/sh -c "
|
||||||
|
if [ -f /etc/ssl/certs/custom-ca.crt ]; then
|
||||||
|
update-ca-certificates;
|
||||||
|
fi &&
|
||||||
|
/usr/bin/supervisord -c /etc/supervisor/conf.d/supervisord.conf"
|
||||||
|
depends_on:
|
||||||
|
- relational_db
|
||||||
|
- index
|
||||||
|
- cache
|
||||||
|
- inference_model_server
|
||||||
|
- indexing_model_server
|
||||||
|
restart: always
|
||||||
|
environment:
|
||||||
|
- ENABLE_PAID_ENTERPRISE_EDITION_FEATURES=true
|
||||||
|
- MULTI_TENANT=true
|
||||||
|
- LOG_LEVEL=DEBUG
|
||||||
|
- AUTH_TYPE=cloud
|
||||||
|
- REQUIRE_EMAIL_VERIFICATION=false
|
||||||
|
- DISABLE_TELEMETRY=true
|
||||||
|
- IMAGE_TAG=test
|
||||||
|
- ENCRYPTION_KEY_SECRET=${ENCRYPTION_KEY_SECRET:-}
|
||||||
|
- JWT_PUBLIC_KEY_URL=${JWT_PUBLIC_KEY_URL:-}
|
||||||
|
# Gen AI Settings (Needed by OnyxBot)
|
||||||
|
- GEN_AI_MAX_TOKENS=${GEN_AI_MAX_TOKENS:-}
|
||||||
|
- QA_TIMEOUT=${QA_TIMEOUT:-}
|
||||||
|
- MAX_CHUNKS_FED_TO_CHAT=${MAX_CHUNKS_FED_TO_CHAT:-}
|
||||||
|
- DISABLE_LLM_CHOOSE_SEARCH=${DISABLE_LLM_CHOOSE_SEARCH:-}
|
||||||
|
- DISABLE_LLM_QUERY_REPHRASE=${DISABLE_LLM_QUERY_REPHRASE:-}
|
||||||
|
- DISABLE_GENERATIVE_AI=${DISABLE_GENERATIVE_AI:-}
|
||||||
|
- GENERATIVE_MODEL_ACCESS_CHECK_FREQ=${GENERATIVE_MODEL_ACCESS_CHECK_FREQ:-}
|
||||||
|
- DISABLE_LITELLM_STREAMING=${DISABLE_LITELLM_STREAMING:-}
|
||||||
|
- LITELLM_EXTRA_HEADERS=${LITELLM_EXTRA_HEADERS:-}
|
||||||
|
- GEN_AI_API_KEY=${GEN_AI_API_KEY:-}
|
||||||
|
- BING_API_KEY=${BING_API_KEY:-}
|
||||||
|
# Query Options
|
||||||
|
- DOC_TIME_DECAY=${DOC_TIME_DECAY:-}
|
||||||
|
- HYBRID_ALPHA=${HYBRID_ALPHA:-}
|
||||||
|
- EDIT_KEYWORD_QUERY=${EDIT_KEYWORD_QUERY:-}
|
||||||
|
- MULTILINGUAL_QUERY_EXPANSION=${MULTILINGUAL_QUERY_EXPANSION:-}
|
||||||
|
- LANGUAGE_HINT=${LANGUAGE_HINT:-}
|
||||||
|
- LANGUAGE_CHAT_NAMING_HINT=${LANGUAGE_CHAT_NAMING_HINT:-}
|
||||||
|
- QA_PROMPT_OVERRIDE=${QA_PROMPT_OVERRIDE:-}
|
||||||
|
# Other Services
|
||||||
|
- POSTGRES_HOST=relational_db
|
||||||
|
- POSTGRES_USER=${POSTGRES_USER:-}
|
||||||
|
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD:-}
|
||||||
|
- POSTGRES_DB=${POSTGRES_DB:-}
|
||||||
|
- POSTGRES_DEFAULT_SCHEMA=${POSTGRES_DEFAULT_SCHEMA:-}
|
||||||
|
- VESPA_HOST=index
|
||||||
|
- REDIS_HOST=cache
|
||||||
|
- WEB_DOMAIN=${WEB_DOMAIN:-}
|
||||||
|
# Don't change the NLP model configs unless you know what you're doing
|
||||||
|
- DOCUMENT_ENCODER_MODEL=${DOCUMENT_ENCODER_MODEL:-}
|
||||||
|
- DOC_EMBEDDING_DIM=${DOC_EMBEDDING_DIM:-}
|
||||||
|
- NORMALIZE_EMBEDDINGS=${NORMALIZE_EMBEDDINGS:-}
|
||||||
|
- ASYM_QUERY_PREFIX=${ASYM_QUERY_PREFIX:-}
|
||||||
|
- ASYM_PASSAGE_PREFIX=${ASYM_PASSAGE_PREFIX:-}
|
||||||
|
- MODEL_SERVER_HOST=${MODEL_SERVER_HOST:-inference_model_server}
|
||||||
|
- MODEL_SERVER_PORT=${MODEL_SERVER_PORT:-}
|
||||||
|
- INDEXING_MODEL_SERVER_HOST=${INDEXING_MODEL_SERVER_HOST:-indexing_model_server}
|
||||||
|
# Indexing Configs
|
||||||
|
- VESPA_SEARCHER_THREADS=${VESPA_SEARCHER_THREADS:-}
|
||||||
|
- NUM_INDEXING_WORKERS=${NUM_INDEXING_WORKERS:-}
|
||||||
|
- ENABLED_CONNECTOR_TYPES=${ENABLED_CONNECTOR_TYPES:-}
|
||||||
|
- DISABLE_INDEX_UPDATE_ON_SWAP=${DISABLE_INDEX_UPDATE_ON_SWAP:-}
|
||||||
|
- DASK_JOB_CLIENT_ENABLED=${DASK_JOB_CLIENT_ENABLED:-}
|
||||||
|
- CONTINUE_ON_CONNECTOR_FAILURE=${CONTINUE_ON_CONNECTOR_FAILURE:-}
|
||||||
|
- EXPERIMENTAL_CHECKPOINTING_ENABLED=${EXPERIMENTAL_CHECKPOINTING_ENABLED:-}
|
||||||
|
- CONFLUENCE_CONNECTOR_LABELS_TO_SKIP=${CONFLUENCE_CONNECTOR_LABELS_TO_SKIP:-}
|
||||||
|
- JIRA_CONNECTOR_LABELS_TO_SKIP=${JIRA_CONNECTOR_LABELS_TO_SKIP:-}
|
||||||
|
- WEB_CONNECTOR_VALIDATE_URLS=${WEB_CONNECTOR_VALIDATE_URLS:-}
|
||||||
|
- JIRA_API_VERSION=${JIRA_API_VERSION:-}
|
||||||
|
- GONG_CONNECTOR_START_TIME=${GONG_CONNECTOR_START_TIME:-}
|
||||||
|
- NOTION_CONNECTOR_ENABLE_RECURSIVE_PAGE_LOOKUP=${NOTION_CONNECTOR_ENABLE_RECURSIVE_PAGE_LOOKUP:-}
|
||||||
|
- GITHUB_CONNECTOR_BASE_URL=${GITHUB_CONNECTOR_BASE_URL:-}
|
||||||
|
- MAX_DOCUMENT_CHARS=${MAX_DOCUMENT_CHARS:-}
|
||||||
|
- MAX_FILE_SIZE_BYTES=${MAX_FILE_SIZE_BYTES:-}
|
||||||
|
# Egnyte OAuth Configs
|
||||||
|
- EGNYTE_CLIENT_ID=${EGNYTE_CLIENT_ID:-}
|
||||||
|
- EGNYTE_CLIENT_SECRET=${EGNYTE_CLIENT_SECRET:-}
|
||||||
|
- EGNYTE_LOCALHOST_OVERRIDE=${EGNYTE_LOCALHOST_OVERRIDE:-}
|
||||||
|
# Lienar OAuth Configs
|
||||||
|
- LINEAR_CLIENT_ID=${LINEAR_CLIENT_ID:-}
|
||||||
|
- LINEAR_CLIENT_SECRET=${LINEAR_CLIENT_SECRET:-}
|
||||||
|
# Celery Configs (defaults are set in the supervisord.conf file.
|
||||||
|
# prefer doing that to have one source of defaults)
|
||||||
|
- CELERY_WORKER_INDEXING_CONCURRENCY=${CELERY_WORKER_INDEXING_CONCURRENCY:-}
|
||||||
|
- CELERY_WORKER_LIGHT_CONCURRENCY=${CELERY_WORKER_LIGHT_CONCURRENCY:-}
|
||||||
|
- CELERY_WORKER_LIGHT_PREFETCH_MULTIPLIER=${CELERY_WORKER_LIGHT_PREFETCH_MULTIPLIER:-}
|
||||||
|
|
||||||
|
# Onyx SlackBot Configs
|
||||||
|
- DANSWER_BOT_DISABLE_DOCS_ONLY_ANSWER=${DANSWER_BOT_DISABLE_DOCS_ONLY_ANSWER:-}
|
||||||
|
- DANSWER_BOT_FEEDBACK_VISIBILITY=${DANSWER_BOT_FEEDBACK_VISIBILITY:-}
|
||||||
|
- DANSWER_BOT_DISPLAY_ERROR_MSGS=${DANSWER_BOT_DISPLAY_ERROR_MSGS:-}
|
||||||
|
- DANSWER_BOT_RESPOND_EVERY_CHANNEL=${DANSWER_BOT_RESPOND_EVERY_CHANNEL:-}
|
||||||
|
- DANSWER_BOT_DISABLE_COT=${DANSWER_BOT_DISABLE_COT:-} # Currently unused
|
||||||
|
- NOTIFY_SLACKBOT_NO_ANSWER=${NOTIFY_SLACKBOT_NO_ANSWER:-}
|
||||||
|
- DANSWER_BOT_MAX_QPM=${DANSWER_BOT_MAX_QPM:-}
|
||||||
|
- DANSWER_BOT_MAX_WAIT_TIME=${DANSWER_BOT_MAX_WAIT_TIME:-}
|
||||||
|
# Logging
|
||||||
|
# Leave this on pretty please? Nothing sensitive is collected!
|
||||||
|
# https://docs.onyx.app/more/telemetry
|
||||||
|
- DISABLE_TELEMETRY=${DISABLE_TELEMETRY:-}
|
||||||
|
- LOG_LEVEL=${LOG_LEVEL:-info} # Set to debug to get more fine-grained logs
|
||||||
|
- LOG_ALL_MODEL_INTERACTIONS=${LOG_ALL_MODEL_INTERACTIONS:-} # LiteLLM Verbose Logging
|
||||||
|
# Log all of Onyx prompts and interactions with the LLM
|
||||||
|
- LOG_DANSWER_MODEL_INTERACTIONS=${LOG_DANSWER_MODEL_INTERACTIONS:-}
|
||||||
|
- LOG_INDIVIDUAL_MODEL_TOKENS=${LOG_INDIVIDUAL_MODEL_TOKENS:-}
|
||||||
|
- LOG_VESPA_TIMING_INFORMATION=${LOG_VESPA_TIMING_INFORMATION:-}
|
||||||
|
|
||||||
|
# Analytics Configs
|
||||||
|
- SENTRY_DSN=${SENTRY_DSN:-}
|
||||||
|
|
||||||
|
# Enterprise Edition stuff
|
||||||
|
- ENABLE_PAID_ENTERPRISE_EDITION_FEATURES=${ENABLE_PAID_ENTERPRISE_EDITION_FEATURES:-false}
|
||||||
|
- USE_IAM_AUTH=${USE_IAM_AUTH:-}
|
||||||
|
- AWS_REGION_NAME=${AWS_REGION_NAME:-}
|
||||||
|
- AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID-}
|
||||||
|
- AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY-}
|
||||||
|
# Uncomment the line below to use if IAM_AUTH is true and you are using iam auth for postgres
|
||||||
|
# volumes:
|
||||||
|
# - ./bundle.pem:/app/bundle.pem:ro
|
||||||
|
extra_hosts:
|
||||||
|
- "host.docker.internal:host-gateway"
|
||||||
|
logging:
|
||||||
|
driver: json-file
|
||||||
|
options:
|
||||||
|
max-size: "50m"
|
||||||
|
max-file: "6"
|
||||||
|
# Uncomment the following lines if you need to include a custom CA certificate
|
||||||
|
# This section enables the use of a custom CA certificate
|
||||||
|
# If present, the custom CA certificate is mounted as a volume
|
||||||
|
# The container checks for its existence and updates the system's CA certificates
|
||||||
|
# This allows for secure communication with services using custom SSL certificates
|
||||||
|
# Optional volume mount for CA certificate
|
||||||
|
# volumes:
|
||||||
|
# # Maps to the CA_CERT_PATH environment variable in the Dockerfile
|
||||||
|
# - ${CA_CERT_PATH:-./custom-ca.crt}:/etc/ssl/certs/custom-ca.crt:ro
|
||||||
|
|
||||||
|
web_server:
|
||||||
|
image: onyxdotapp/onyx-web-server:${IMAGE_TAG:-latest}
|
||||||
|
build:
|
||||||
|
context: ../../web
|
||||||
|
dockerfile: Dockerfile
|
||||||
|
args:
|
||||||
|
- NEXT_PUBLIC_DISABLE_STREAMING=${NEXT_PUBLIC_DISABLE_STREAMING:-false}
|
||||||
|
- NEXT_PUBLIC_NEW_CHAT_DIRECTS_TO_SAME_PERSONA=${NEXT_PUBLIC_NEW_CHAT_DIRECTS_TO_SAME_PERSONA:-false}
|
||||||
|
- NEXT_PUBLIC_POSITIVE_PREDEFINED_FEEDBACK_OPTIONS=${NEXT_PUBLIC_POSITIVE_PREDEFINED_FEEDBACK_OPTIONS:-}
|
||||||
|
- NEXT_PUBLIC_NEGATIVE_PREDEFINED_FEEDBACK_OPTIONS=${NEXT_PUBLIC_NEGATIVE_PREDEFINED_FEEDBACK_OPTIONS:-}
|
||||||
|
- NEXT_PUBLIC_DISABLE_LOGOUT=${NEXT_PUBLIC_DISABLE_LOGOUT:-}
|
||||||
|
- NEXT_PUBLIC_DEFAULT_SIDEBAR_OPEN=${NEXT_PUBLIC_DEFAULT_SIDEBAR_OPEN:-}
|
||||||
|
- NEXT_PUBLIC_FORGOT_PASSWORD_ENABLED=${NEXT_PUBLIC_FORGOT_PASSWORD_ENABLED:-}
|
||||||
|
# Enterprise Edition only
|
||||||
|
- NEXT_PUBLIC_THEME=${NEXT_PUBLIC_THEME:-}
|
||||||
|
# DO NOT TURN ON unless you have EXPLICIT PERMISSION from Onyx.
|
||||||
|
- NEXT_PUBLIC_DO_NOT_USE_TOGGLE_OFF_DANSWER_POWERED=${NEXT_PUBLIC_DO_NOT_USE_TOGGLE_OFF_DANSWER_POWERED:-false}
|
||||||
|
depends_on:
|
||||||
|
- api_server
|
||||||
|
restart: always
|
||||||
|
environment:
|
||||||
|
- INTERNAL_URL=http://api_server:8080
|
||||||
|
- WEB_DOMAIN=${WEB_DOMAIN:-}
|
||||||
|
- THEME_IS_DARK=${THEME_IS_DARK:-}
|
||||||
|
- DISABLE_LLM_DOC_RELEVANCE=${DISABLE_LLM_DOC_RELEVANCE:-}
|
||||||
|
|
||||||
|
# Enterprise Edition only
|
||||||
|
- ENABLE_PAID_ENTERPRISE_EDITION_FEATURES=${ENABLE_PAID_ENTERPRISE_EDITION_FEATURES:-false}
|
||||||
|
- NEXT_PUBLIC_CUSTOM_REFRESH_URL=${NEXT_PUBLIC_CUSTOM_REFRESH_URL:-}
|
||||||
|
|
||||||
|
inference_model_server:
|
||||||
|
image: onyxdotapp/onyx-model-server:${IMAGE_TAG:-latest}
|
||||||
|
build:
|
||||||
|
context: ../../backend
|
||||||
|
dockerfile: Dockerfile.model_server
|
||||||
|
command: >
|
||||||
|
/bin/sh -c "if [ \"${DISABLE_MODEL_SERVER:-false}\" = \"True\" ]; then
|
||||||
|
echo 'Skipping service...';
|
||||||
|
exit 0;
|
||||||
|
else
|
||||||
|
exec uvicorn model_server.main:app --host 0.0.0.0 --port 9000;
|
||||||
|
fi"
|
||||||
|
restart: on-failure
|
||||||
|
environment:
|
||||||
|
- MIN_THREADS_ML_MODELS=${MIN_THREADS_ML_MODELS:-}
|
||||||
|
# Set to debug to get more fine-grained logs
|
||||||
|
- LOG_LEVEL=${LOG_LEVEL:-info}
|
||||||
|
|
||||||
|
# Analytics Configs
|
||||||
|
- SENTRY_DSN=${SENTRY_DSN:-}
|
||||||
|
volumes:
|
||||||
|
# Not necessary, this is just to reduce download time during startup
|
||||||
|
- model_cache_huggingface:/root/.cache/huggingface/
|
||||||
|
logging:
|
||||||
|
driver: json-file
|
||||||
|
options:
|
||||||
|
max-size: "50m"
|
||||||
|
max-file: "6"
|
||||||
|
|
||||||
|
indexing_model_server:
|
||||||
|
image: onyxdotapp/onyx-model-server:${IMAGE_TAG:-latest}
|
||||||
|
build:
|
||||||
|
context: ../../backend
|
||||||
|
dockerfile: Dockerfile.model_server
|
||||||
|
command: >
|
||||||
|
/bin/sh -c "if [ \"${DISABLE_MODEL_SERVER:-false}\" = \"True\" ]; then
|
||||||
|
echo 'Skipping service...';
|
||||||
|
exit 0;
|
||||||
|
else
|
||||||
|
exec uvicorn model_server.main:app --host 0.0.0.0 --port 9000;
|
||||||
|
fi"
|
||||||
|
restart: on-failure
|
||||||
|
environment:
|
||||||
|
- INDEX_BATCH_SIZE=${INDEX_BATCH_SIZE:-}
|
||||||
|
- MIN_THREADS_ML_MODELS=${MIN_THREADS_ML_MODELS:-}
|
||||||
|
- INDEXING_ONLY=True
|
||||||
|
# Set to debug to get more fine-grained logs
|
||||||
|
- LOG_LEVEL=${LOG_LEVEL:-info}
|
||||||
|
- CLIENT_EMBEDDING_TIMEOUT=${CLIENT_EMBEDDING_TIMEOUT:-}
|
||||||
|
|
||||||
|
# Analytics Configs
|
||||||
|
- SENTRY_DSN=${SENTRY_DSN:-}
|
||||||
|
volumes:
|
||||||
|
# Not necessary, this is just to reduce download time during startup
|
||||||
|
- indexing_huggingface_model_cache:/root/.cache/huggingface/
|
||||||
|
logging:
|
||||||
|
driver: json-file
|
||||||
|
options:
|
||||||
|
max-size: "50m"
|
||||||
|
max-file: "6"
|
||||||
|
|
||||||
|
relational_db:
|
||||||
|
image: postgres:15.2-alpine
|
||||||
|
command: -c 'max_connections=250'
|
||||||
|
restart: always
|
||||||
|
environment:
|
||||||
|
- POSTGRES_USER=${POSTGRES_USER:-postgres}
|
||||||
|
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD:-password}
|
||||||
|
ports:
|
||||||
|
- "5432:5432"
|
||||||
|
volumes:
|
||||||
|
- db_volume:/var/lib/postgresql/data
|
||||||
|
|
||||||
|
# This container name cannot have an underscore in it due to Vespa expectations of the URL
|
||||||
|
index:
|
||||||
|
image: vespaengine/vespa:8.277.17
|
||||||
|
restart: always
|
||||||
|
ports:
|
||||||
|
- "19071:19071"
|
||||||
|
- "8081:8081"
|
||||||
|
volumes:
|
||||||
|
- vespa_volume:/opt/vespa/var
|
||||||
|
logging:
|
||||||
|
driver: json-file
|
||||||
|
options:
|
||||||
|
max-size: "50m"
|
||||||
|
max-file: "6"
|
||||||
|
|
||||||
|
nginx:
|
||||||
|
image: nginx:1.23.4-alpine
|
||||||
|
restart: always
|
||||||
|
# nginx will immediately crash with `nginx: [emerg] host not found in upstream`
|
||||||
|
# if api_server / web_server are not up
|
||||||
|
depends_on:
|
||||||
|
- api_server
|
||||||
|
- web_server
|
||||||
|
environment:
|
||||||
|
- DOMAIN=localhost
|
||||||
|
ports:
|
||||||
|
- "80:80"
|
||||||
|
- "3000:80" # allow for localhost:3000 usage, since that is the norm
|
||||||
|
volumes:
|
||||||
|
- ../data/nginx:/etc/nginx/conf.d
|
||||||
|
logging:
|
||||||
|
driver: json-file
|
||||||
|
options:
|
||||||
|
max-size: "50m"
|
||||||
|
max-file: "6"
|
||||||
|
# The specified script waits for the api_server to start up.
|
||||||
|
# Without this we've seen issues where nginx shows no error logs but
|
||||||
|
# does not recieve any traffic
|
||||||
|
# NOTE: we have to use dos2unix to remove Carriage Return chars from the file
|
||||||
|
# in order to make this work on both Unix-like systems and windows
|
||||||
|
command: >
|
||||||
|
/bin/sh -c "dos2unix /etc/nginx/conf.d/run-nginx.sh
|
||||||
|
&& /etc/nginx/conf.d/run-nginx.sh app.conf.template.dev"
|
||||||
|
|
||||||
|
cache:
|
||||||
|
image: redis:7.4-alpine
|
||||||
|
restart: always
|
||||||
|
ports:
|
||||||
|
- "6379:6379"
|
||||||
|
# docker silently mounts /data even without an explicit volume mount, which enables
|
||||||
|
# persistence. explicitly setting save and appendonly forces ephemeral behavior.
|
||||||
|
command: redis-server --save "" --appendonly no
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
db_volume:
|
||||||
|
vespa_volume: # Created by the container itself
|
||||||
|
|
||||||
|
model_cache_huggingface:
|
||||||
|
indexing_huggingface_model_cache:
|
Loading…
x
Reference in New Issue
Block a user