update for clarity

This commit is contained in:
pablodanswer 2024-09-26 16:55:47 -07:00
parent 8f3f905a99
commit 1f12b074df
14 changed files with 69 additions and 313 deletions

View File

@ -288,7 +288,6 @@ def check_for_document_sets_sync_task(tenant_id: str) -> None:
soft_time_limit=JOB_TIMEOUT,
)
def check_for_cc_pair_deletion_task(tenant_id: str) -> None:
print('\n\n\n\n\n\n\n\n\n\n\nscheduling deletion task')
"""Runs periodically to check if any deletion tasks should be run"""
with Session(get_sqlalchemy_engine(schema=tenant_id)) as db_session:
# check if any document sets are not synced
@ -463,7 +462,6 @@ def schedule_tenant_tasks():
logger.info(f"Scheduling tasks for tenants: {valid_tenants}")
for tenant_id in valid_tenants:
print(f"Scheduling tasks for tenant: {tenant_id}")
# Schedule tasks specific to each tenant
celery_app.conf.beat_schedule[f"check-for-document-set-sync-{tenant_id}"] = {
"task": "check_for_document_sets_sync_task",
@ -488,32 +486,4 @@ def schedule_tenant_tasks():
"args": (tenant_id,),
}
schedule_tenant_tasks()
# celery_app.conf.beat_schedule = {
# "check-for-document-set-sync": {
# "task": "check_for_document_sets_sync_task",
# "schedule": timedelta(seconds=5),
# },
# "check-for-cc-pair-deletion": {
# "task": "check_for_cc_pair_deletion_task",
# # don't need to check too often, since we kick off a deletion initially
# # during the API call that actually marks the CC pair for deletion
# "schedule": timedelta(minutes=1),
# },
# }
# celery_app.conf.beat_schedule.update(
# {
# "check-for-prune": {
# "task": "check_for_prune_task",
# "schedule": timedelta(seconds=5),
# },
# }
# )
# celery_app.conf.beat_schedule.update(
# {
# "kombu-message-cleanup": {
# "task": "kombu_message_cleanup_task",
# "schedule": timedelta(seconds=3600),
# },
# }
# )
schedule_tenant_tasks()

View File

@ -64,6 +64,7 @@ def _get_connector_runner(
attempt.connector_credential_pair.connector.connector_specific_config,
attempt.connector_credential_pair.credential,
db_session,
)
except Exception as e:
logger.exception(f"Unable to instantiate connector due to {e}")
@ -417,38 +418,3 @@ def run_indexing_entrypoint(index_attempt_id: int, tenant_id: str, is_ee: bool =
)
except Exception as e:
logger.exception(f"Indexing job with ID '{index_attempt_id}' for tenant {tenant_id} failed due to {e}")
# def run_indexing_entrypoint(index_attempt_id: int, is_ee: bool = False) -> None:
# """Entrypoint for indexing run when using dask distributed.
# Wraps the actual logic in a `try` block so that we can catch any exceptions
# and mark the attempt as failed."""
# try:
# if is_ee:
# global_version.set_ee()
# # set the indexing attempt ID so that all log messages from this process
# # will have it added as a prefix
# IndexAttemptSingleton.set_index_attempt_id(index_attempt_id)
# with Session(get_sqlalchemy_engine()) as db_session:
# # make sure that it is valid to run this indexing attempt + mark it
# # as in progress
# attempt = _prepare_index_attempt(db_session, index_attempt_id)
# logger.info(
# f"Indexing starting: "
# f"connector='{attempt.connector_credential_pair.connector.name}' "
# f"config='{attempt.connector_credential_pair.connector.connector_specific_config}' "
# f"credentials='{attempt.connector_credential_pair.connector_id}'"
# )
# _run_indexing(db_session, attempt)
# logger.info(
# f"Indexing finished: "
# f"connector='{attempt.connector_credential_pair.connector.name}' "
# f"config='{attempt.connector_credential_pair.connector.connector_specific_config}' "
# f"credentials='{attempt.connector_credential_pair.connector_id}'"
# )
# except Exception as e:
# logger.exception(f"Indexing job with ID '{index_attempt_id}' failed due to {e}")

View File

@ -402,29 +402,12 @@ def update_loop(
num_workers: int = NUM_INDEXING_WORKERS,
num_secondary_workers: int = NUM_SECONDARY_INDEXING_WORKERS,
) -> None:
# Initialize Dask clients outside the loop
client_primary = Client(n_workers=num_workers)
client_secondary = Client(n_workers=num_secondary_workers)
try:
while True:
tenants = get_all_tenant_ids()
# tenants = [
# 'public',
# '0f95cf24-c4dc-4fee-a1f2-1f190789c030',
# '2e03529d-f07f-4953-b06c-0cda3ac55443',
# '40d74dd5-0443-4f61-9320-50eb68a14c03',
# '424fe753-f2bd-41a3-9e75-c01b463c8e17',
# '801881f8-d7f5-4c79-907e-397d7d7c7862',
# '8c2e79fb-94e6-48e2-b46d-462f8056c86a',
# 'a0fdf812-c5e7-48f7-b861-26b78a4b3a6b',
# 'ae865bb6-c0d1-4ecc-a42a-db6c6208dce4',
# 'cc0e19aa-cbf0-4a4f-84fc-e19f1551ae80',
# 'ce763ab4-8a0a-4366-a167-b6f882133e38',
# 'de41dfa2-1f43-417c-9763-7baea619b67c',
# 'f53febd2-3861-495b-8b44-08840cf7f521'
# ]
valid_tenants = [tenant for tenant in tenants if not tenant.startswith('pg_')]
logger.info(f"Found valid tenants: {valid_tenants}")
tenants = valid_tenants
@ -469,92 +452,10 @@ def update_loop(
time.sleep(sleep_time)
finally:
# Ensure clients are closed when the loop exits
client_primary.close()
client_secondary.close()
# def update_loop(
# delay: int = 10,
# num_workers: int = NUM_INDEXING_WORKERS,
# num_secondary_workers: int = NUM_SECONDARY_INDEXING_WORKERS,
# ) -> None:
# engine = get_sqlalchemy_engine()
# with Session(engine) as db_session:
# check_index_swap(db_session=db_session)
# search_settings = get_current_search_settings(db_session)
# # So that the first time users aren't surprised by really slow speed of first
# # batch of documents indexed
# if search_settings.provider_type is None:
# logger.notice("Running a first inference to warm up embedding model")
# embedding_model = EmbeddingModel.from_db_model(
# search_settings=search_settings,
# server_host=INDEXING_MODEL_SERVER_HOST,
# server_port=MODEL_SERVER_PORT,
# )
# warm_up_bi_encoder(
# embedding_model=embedding_model,
# )
# client_primary: Client | SimpleJobClient
# client_secondary: Client | SimpleJobClient
# if DASK_JOB_CLIENT_ENABLED:
# cluster_primary = LocalCluster(
# n_workers=num_workers,
# threads_per_worker=1,
# # there are warning about high memory usage + "Event loop unresponsive"
# # which are not relevant to us since our workers are expected to use a
# # lot of memory + involve CPU intensive tasks that will not relinquish
# # the event loop
# silence_logs=logging.ERROR,
# )
# cluster_secondary = LocalCluster(
# n_workers=num_secondary_workers,
# threads_per_worker=1,
# silence_logs=logging.ERROR,
# )
# client_primary = Client(cluster_primary)
# client_secondary = Client(cluster_secondary)
# if LOG_LEVEL.lower() == "debug":
# client_primary.register_worker_plugin(ResourceLogger())
# else:
# client_primary = SimpleJobClient(n_workers=num_workers)
# client_secondary = SimpleJobClient(n_workers=num_secondary_workers)
# existing_jobs: dict[int, Future | SimpleJob] = {}
# while True:
# start = time.time()
# start_time_utc = datetime.utcfromtimestamp(start).strftime("%Y-%m-%d %H:%M:%S")
# logger.debug(f"Running update, current UTC time: {start_time_utc}")
# if existing_jobs:
# # TODO: make this debug level once the "no jobs are being scheduled" issue is resolved
# logger.debug(
# "Found existing indexing jobs: "
# f"{[(attempt_id, job.status) for attempt_id, job in existing_jobs.items()]}"
# )
# try:
# with Session(get_sqlalchemy_engine()) as db_session:
# check_index_swap(db_session)
# existing_jobs = cleanup_indexing_jobs(existing_jobs=existing_jobs)
# create_indexing_jobs(existing_jobs=existing_jobs)
# existing_jobs = kickoff_indexing_jobs(
# existing_jobs=existing_jobs,
# client=client_primary,
# secondary_client=client_secondary,
# )
# except Exception as e:
# logger.exception(f"Failed to run update due to {e}")
# sleep_time = delay - (time.time() - start)
# if sleep_time > 0:
# time.sleep(sleep_time)
def update__main() -> None:
set_is_ee_based_on_env_variable()
init_sqlalchemy_engine(POSTGRES_INDEXER_APP_NAME)

View File

@ -52,7 +52,6 @@ def get_chat_session_by_id(
) -> ChatSession:
stmt = select(ChatSession).where(ChatSession.id == chat_session_id)
connection = db_session.connection()
logger.info(f"Database URL: {connection.engine.url}")
if is_shared:
stmt = stmt.where(ChatSession.shared_status == ChatSessionSharedStatus.PUBLIC)
@ -64,8 +63,6 @@ def get_chat_session_by_id(
or_(ChatSession.user_id == user_id, ChatSession.user_id.is_(None))
)
logger.info(f"POPOPZExecuting SQL query: {stmt}")
result = db_session.execute(stmt)
chat_session = result.scalar_one_or_none()

View File

@ -38,18 +38,8 @@ import traceback
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="token")
logger = setup_logger()
def log_stack_trace() -> None:
stack = traceback.extract_stack()
logger.debug("Full stack trace:")
for filename, line, func, _ in stack[:-1]: # Exclude the current function
logger.debug(f" File: {filename}, Line: {line}, Function: {func}")
SYNC_DB_API = "psycopg2"
ASYNC_DB_API = "asyncpg"
@ -203,28 +193,18 @@ def get_current_tenant_id(request: Request) -> str | None:
token = request.cookies.get("tenant_details")
if not token:
logger.warning("No token found in cookies")
tenant_id = current_tenant_id.get()
logger.info(f"Returning default tenant_id: {tenant_id}")
return tenant_id
return current_tenant_id.get()
try:
logger.info("Attempting to decode token")
payload = jwt.decode(token, SECRET_JWT_KEY, algorithms=["HS256"])
logger.info(f"Decoded payload: {payload}")
tenant_id = payload.get("tenant_id")
if not tenant_id:
logger.warning("Invalid token: tenant_id missing")
raise HTTPException(status_code=400, detail="Invalid token: tenant_id missing")
logger.info(f"Valid tenant_id found: {tenant_id}")
current_tenant_id.set(tenant_id)
logger.info(f'setting current tenant id {tenant_id}')
return tenant_id
except (DecodeError, InvalidTokenError) as e:
logger.error(f"JWT decode error: {str(e)}")
except (DecodeError, InvalidTokenError):
raise HTTPException(status_code=401, detail="Invalid token format")
except Exception as e:
logger.exception(f"Unexpected error in get_current_tenant_id: {str(e)}")
except Exception:
raise HTTPException(status_code=500, detail="Internal server error")
def get_session(

View File

@ -771,7 +771,7 @@ class VespaIndex(DocumentIndex):
for doc_id in update_request.document_ids
]
# update_start = time.monotonic()
update_start = time.monotonic()
# Fetch all chunks for each document ahead of time
index_names = [self.index_name]
@ -834,10 +834,10 @@ class VespaIndex(DocumentIndex):
json=update.update_request,
)
# logger.debug(
# "Finished updating Vespa documents in %.2f seconds",
# time.monotonic() - update_start,
# )
logger.debug(
"Finished updating Vespa documents in %.2f seconds",
time.monotonic() - update_start,
)
return

View File

@ -81,11 +81,9 @@ def get_default_llms(
if db_session is None:
logger.debug("DB SESSION IS NONE")
with get_session_context_manager() as db_session:
llm_provider = fetch_default_provider(db_session)
else:
logger.debug("DB SESSION IS NOT NONE")
llm_provider = fetch_default_provider(db_session)

View File

@ -92,11 +92,10 @@ def authenticate_request(func: Callable) -> Callable:
@basic_router.post("/create")
@authenticate_request
def create_tenant(request: Request, tenant_id: str) -> dict[str, str]:
if not tenant_id:
raise HTTPException(status_code=400, detail="tenant_id is required")
logger.info(f"Received request to create tenant schema: {tenant_id}")
logger.info(f"Creating tenant schema: {tenant_id}")
with Session(get_sqlalchemy_engine(schema=tenant_id)) as db_session:
with db_session.begin():
@ -107,35 +106,29 @@ def create_tenant(request: Request, tenant_id: str) -> dict[str, str]:
schema_exists = result.scalar() is not None
if not schema_exists:
# Create schema
db_session.execute(text(f'CREATE SCHEMA "{tenant_id}"'))
logger.info(f"Schema {tenant_id} created")
else:
logger.info(f"Schema {tenant_id} already exists")
try:
logger.info(f"Running migrations for tenant: {tenant_id}")
run_alembic_migrations(tenant_id)
logger.info(f"Migrations completed for tenant: {tenant_id}")
except Exception as e:
logger.info("error has occurred")
logger.exception(f"Error while running migrations for tenant {tenant_id}: {str(e)}")
logger.exception(f"Error running migrations for tenant {tenant_id}: {str(e)}")
raise HTTPException(status_code=500, detail=str(e))
try:
with Session(get_sqlalchemy_engine(schema=tenant_id)) as db_session:
setup_postgres_and_initial_settings(db_session)
except Exception as e:
logger.exception(f"Error while setting up postgres for tenant {tenant_id}: {str(e)}")
logger.exception(f"Error setting up postgres for tenant {tenant_id}: {str(e)}")
raise
logger.info(f"Tenant {tenant_id} created successfully")
return {"status": "success", "message": f"Tenant {tenant_id} created successfully"}
async def check_schema_exists(tenant_id: str) -> bool:
logger.info(f"Checking if schema exists for tenant: {tenant_id}")
get_async_session_context = contextlib.asynccontextmanager(
get_async_session
)
@ -144,10 +137,7 @@ async def check_schema_exists(tenant_id: str) -> bool:
text("SELECT schema_name FROM information_schema.schemata WHERE schema_name = :schema_name"),
{"schema_name": tenant_id}
)
schema = result.scalar()
exists = schema is not None
logger.info(f"Schema for tenant {tenant_id} exists: {exists}")
return exists
return result.scalar() is not None
@basic_router.post("/auth/sso-callback")
async def sso_callback(
@ -155,27 +145,18 @@ async def sso_callback(
sso_token: str = Body(..., embed=True),
user_manager: UserManager = Depends(get_user_manager),
) -> JSONResponse:
logger.info("SSO callback initiated")
payload = verify_sso_token(sso_token)
logger.info(f"SSO token verified for email: {payload['email']}")
user = await user_manager.sso_authenticate(
payload["email"], payload["user_id"], payload["tenant_id"]
)
logger.info(f"User authenticated: {user.email}")
tenant_id = payload["tenant_id"]
logger.info(f"Checking schema for tenant: {tenant_id}")
# Check if tenant schema exists
schema_exists = await check_schema_exists(tenant_id)
if not schema_exists:
logger.info(f"Schema does not exist for tenant: {tenant_id}")
raise HTTPException(status_code=403, detail="Your Danswer app has not been set up yet!")
session_token = await create_user_session(user, payload["tenant_id"])
logger.info(f"Session token created for user: {user.email}")
# Set the session cookie with proper flags
response = JSONResponse(content={"message": "Authentication successful"})
response.set_cookie(
key="tenant_details",
@ -187,6 +168,4 @@ async def sso_callback(
httponly=True,
samesite="lax",
)
logger.info("Session cookie set")
logger.info("SSO callback completed successfully")
return response

View File

@ -24,14 +24,14 @@ autorestart=true
# relatively compute-light (e.g. they tend to just make a bunch of requests to
# Vespa / Postgres)
[program:celery_worker]
command=celery -A danswer.background.celery.celery_run:celery_app worker --pool=threads --concurrency=6 --loglevel=WARNING --logfile=/var/log/celery_worker_supervisor.log
command=celery -A danswer.background.celery.celery_run:celery_app worker --pool=threads --concurrency=6 --loglevel=INFO --logfile=/var/log/celery_worker_supervisor.log
environment=LOG_FILE_NAME=celery_worker
redirect_stderr=true
autorestart=true
# Job scheduler for periodic tasks
[program:celery_beat]
command=celery -A danswer.background.celery.celery_run:celery_app beat --loglevel=WARNING --logfile=/var/log/celery_beat_supervisor.log
command=celery -A danswer.background.celery.celery_run:celery_app beat --loglevel=INFO --logfile=/var/log/celery_beat_supervisor.log
environment=LOG_FILE_NAME=celery_beat
redirect_stderr=true
autorestart=true

View File

@ -30,25 +30,24 @@ const nextConfig = {
if (process.env.NODE_ENV === "production") return defaultRedirects;
return defaultRedirects.concat([
// TODO: validate the db sesion in tenancy for loca dev
// {
{
// source: "/api/chat/send-message:params*",
// destination: "http://127.0.0.1:8080/chat/send-message:params*", // Proxy to Backend
// permanent: true,
// },
// {
// source: "/api/query/stream-answer-with-quote:params*",
// destination:
// "http://127.0.0.1:8080/query/stream-answer-with-quote:params*", // Proxy to Backend
// permanent: true,
// },
// {
// source: "/api/query/stream-query-validation:params*",
// destination:
// "http://127.0.0.1:8080/query/stream-query-validation:params*", // Proxy to Backend
// permanent: true,
// },
source: "/api/chat/send-message:params*",
destination: "http://127.0.0.1:8080/chat/send-message:params*", // Proxy to Backend
permanent: true,
},
{
source: "/api/query/stream-answer-with-quote:params*",
destination:
"http://127.0.0.1:8080/query/stream-answer-with-quote:params*", // Proxy to Backend
permanent: true,
},
{
source: "/api/query/stream-query-validation:params*",
destination:
"http://127.0.0.1:8080/query/stream-query-validation:params*", // Proxy to Backend
permanent: true,
},
]);
},
publicRuntimeConfig: {

View File

@ -1,4 +1,3 @@
// app/auth/sso-callback/layout.tsx
import React from "react";
export const metadata = {
@ -12,14 +11,7 @@ export default function SSOCallbackLayout({
}) {
return (
<html lang="en">
<head>
<title>SSO Callback</title>
{/* Include any meta tags or scripts specific to this page */}
</head>
<body>
{/* Minimal styling or components */}
{children}
</body>
<body>{children}</body>
</html>
);
}

View File

@ -19,7 +19,6 @@ export default function SSOCallback() {
}
verificationStartedRef.current = true;
// Extract the SSO token from the URL hash
const hashParams = new URLSearchParams(window.location.hash.slice(1));
const ssoToken = hashParams.get("sso_token");
@ -28,9 +27,7 @@ export default function SSOCallback() {
return;
}
// Remove the SSO token from the URL for security
window.history.replaceState(null, '', window.location.pathname);
// const ssoToken = searchParams.get("sso_token");
if (!ssoToken) {
setError("No SSO token found");
@ -46,14 +43,13 @@ export default function SSOCallback() {
headers: {
"Content-Type": "application/json",
},
credentials: "include", // Ensure cookies are included in requests
credentials: "include",
body: JSON.stringify({ sso_token: ssoToken }),
}
)
if (response.ok) {
setAuthStatus("Authentication successful!");
// Redirect to the dashboard or desired page
router.replace("/admin/configuration/llm");
} else {
const errorData = await response.json();

View File

@ -2,9 +2,7 @@
import { useState } from "react";
import { loadStripe } from "@stripe/stripe-js";
import { buildClientUrl } from "@/lib/utilsSS";
import { BillingPlanType } from "@/app/admin/settings/interfaces";
// import { buildUrl } from '@/lib/utilsSS';
export function StripeCheckoutButton({
newQuantity,
@ -53,11 +51,10 @@ export function StripeCheckoutButton({
return (
<button
onClick={handleClick}
className={`py-2 px-4 text-white rounded ${
currentPlan === newPlan && currentQuantity === newQuantity
className={`py-2 px-4 text-white rounded ${currentPlan === newPlan && currentQuantity === newQuantity
? "bg-gray-400 cursor-not-allowed"
: "bg-blue-500 hover:bg-blue-600"
} disabled:bg-blue-300`}
} disabled:bg-blue-300`}
disabled={
(currentPlan === newPlan && currentQuantity === newQuantity) ||
isLoading
@ -68,12 +65,12 @@ export function StripeCheckoutButton({
: currentPlan === newPlan && currentQuantity === newQuantity
? "No Changes"
: newPlan > currentPlan ||
(newPlan === currentPlan && newQuantity > currentQuantity)
(newPlan === currentPlan && newQuantity > currentQuantity)
? "Upgrade Plan"
: newPlan == BillingPlanType.ENTERPRISE
? "Talk to us"
: // : newPlan < currentPlan ||
newPlan === currentPlan && newQuantity < currentQuantity
newPlan === currentPlan && newQuantity < currentQuantity
? "Upgrade Plan"
: "Change Plan"}
</button>

View File

@ -1,10 +1,5 @@
import "./globals.css";
import {
fetchEnterpriseSettingsSS,
fetchSettingsSS,
SettingsError,
} from "@/components/settings/lib";
import { fetchEnterpriseSettingsSS, fetchSettingsSS } from "@/components/settings/lib";
import {
CUSTOM_ANALYTICS_ENABLED,
SERVER_SIDE_ONLY__PAID_ENTERPRISE_FEATURES_ENABLED,
@ -53,47 +48,34 @@ export default async function RootLayout({
}: {
children: React.ReactNode;
}) {
// 00a89749-beab-489a-8b72-88aa3d646274
// 01fb5963-9ab3-4585-900a-438480857427
// return <>{children}</>
const combinedSettings = await fetchSettingsSS() || defaultCombinedSettings;
// SELECT table_name, column_name, data_type, character_maximum_length
// FROM information_schema.columns
// WHERE table_schema = '00a89749-beab-489a-8b72-88aa3d646274'
// ORDER BY table_name, ordinal_position;
if (!combinedSettings) {
return (
<html lang="en" className={`${inter.variable} font-sans`}>
<Head>
<title>Settings Unavailable | Danswer</title>
</Head>
<body className="bg-background text-default">
<div className="flex flex-col items-center justify-center min-h-screen">
<div className="mb-2 flex items-center max-w-[175px]">
<HeaderTitle>Danswer</HeaderTitle>
<Logo height={40} width={40} />
</div>
// const combinedSettings: CombinedSettings | null = await fetchSettingsSS()
const combinedSettings = defaultCombinedSettings
// if (!combinedSettings) {
// return <>{children}</>
// // Just display a simple full page error if fetching fails.
// return (
// <html lang="en" className={`${inter.variable} font-sans`}>
// <Head>
// <title>Settings Unavailable | Danswer</title>
// </Head>
// <body className="bg-background text-default">
// <div className="flex flex-col items-center justify-center min-h-screen">
// <div className="mb-2 flex items-center max-w-[175px]">
// <HeaderTitle>Danswer</HeaderTitle>
// <Logo height={40} width={40} />
// </div>
// <Card className="p-8 max-w-md">
// <h1 className="text-2xl font-bold mb-4 text-error">Error</h1>
// <p className="text-text-500">
// Your Danswer instance was not configured properly and your
// settings could not be loaded. Please contact your admin to fix
// this error.
// </p>
// </Card>
// </div>
// </body>
// </html>
// );
// }
<Card className="p-8 max-w-md">
<h1 className="text-2xl font-bold mb-4 text-error">Error</h1>
<p className="text-text-500">
Your Danswer instance was not configured properly and your
settings could not be loaded. Please contact your admin to fix
this error.
</p>
</Card>
</div>
</body>
</html>
);
}
return (
<html lang="en">
@ -104,7 +86,7 @@ export default async function RootLayout({
/>
</Head>
{CUSTOM_ANALYTICS_ENABLED && combinedSettings && combinedSettings.customAnalyticsScript && (
{CUSTOM_ANALYTICS_ENABLED && combinedSettings.customAnalyticsScript && (
<head>
<script
type="text/javascript"
@ -118,12 +100,11 @@ export default async function RootLayout({
<body className={`relative ${inter.variable} font-sans`}>
<div
className={`text-default bg-background ${
// TODO: remove this once proper dark mode exists
process.env.THEME_IS_DARK?.toLowerCase() === "true" ? "dark" : ""
}`}
}`}
>
<UserProvider>
<SettingsProvider settings={combinedSettings || defaultCombinedSettings}>
<SettingsProvider settings={combinedSettings}>
{children}
</SettingsProvider>
</UserProvider>