From 57010901e6077dbb12417f90dd99bbdd83b30d42 Mon Sep 17 00:00:00 2001 From: Timothy Jaeryang Baek Date: Wed, 26 Feb 2025 15:42:19 -0800 Subject: [PATCH] enh: bypass embedding and retrieval --- backend/open_webui/config.py | 21 +- backend/open_webui/main.py | 8 +- backend/open_webui/retrieval/utils.py | 52 +- .../open_webui/retrieval/vector/dbs/chroma.py | 3 +- backend/open_webui/routers/retrieval.py | 87 ++- backend/open_webui/utils/middleware.py | 36 +- .../admin/Settings/Documents.svelte | 632 +++++++++--------- .../admin/Settings/WebSearch.svelte | 12 +- .../components/chat/Messages/Citations.svelte | 3 +- .../components/common/FileItemModal.svelte | 2 +- 10 files changed, 486 insertions(+), 370 deletions(-) diff --git a/backend/open_webui/config.py b/backend/open_webui/config.py index bbaa1e75c..15982f886 100644 --- a/backend/open_webui/config.py +++ b/backend/open_webui/config.py @@ -1502,13 +1502,16 @@ VECTOR_DB = os.environ.get("VECTOR_DB", "chroma") # Chroma if VECTOR_DB == "chroma": import chromadb + CHROMA_DATA_PATH = f"{DATA_DIR}/vector_db" CHROMA_TENANT = os.environ.get("CHROMA_TENANT", chromadb.DEFAULT_TENANT) CHROMA_DATABASE = os.environ.get("CHROMA_DATABASE", chromadb.DEFAULT_DATABASE) CHROMA_HTTP_HOST = os.environ.get("CHROMA_HTTP_HOST", "") CHROMA_HTTP_PORT = int(os.environ.get("CHROMA_HTTP_PORT", "8000")) CHROMA_CLIENT_AUTH_PROVIDER = os.environ.get("CHROMA_CLIENT_AUTH_PROVIDER", "") - CHROMA_CLIENT_AUTH_CREDENTIALS = os.environ.get("CHROMA_CLIENT_AUTH_CREDENTIALS", "") + CHROMA_CLIENT_AUTH_CREDENTIALS = os.environ.get( + "CHROMA_CLIENT_AUTH_CREDENTIALS", "" + ) # Comma-separated list of header=value pairs CHROMA_HTTP_HEADERS = os.environ.get("CHROMA_HTTP_HEADERS", "") if CHROMA_HTTP_HEADERS: @@ -1608,6 +1611,14 @@ DOCUMENT_INTELLIGENCE_KEY = PersistentConfig( os.getenv("DOCUMENT_INTELLIGENCE_KEY", ""), ) + +BYPASS_EMBEDDING_AND_RETRIEVAL = PersistentConfig( + "BYPASS_EMBEDDING_AND_RETRIEVAL", + "rag.bypass_embedding_and_retrieval", + os.environ.get("BYPASS_EMBEDDING_AND_RETRIEVAL", "False").lower() == "true", +) + + RAG_TOP_K = PersistentConfig( "RAG_TOP_K", "rag.top_k", int(os.environ.get("RAG_TOP_K", "3")) ) @@ -1824,10 +1835,10 @@ RAG_WEB_SEARCH_ENGINE = PersistentConfig( os.getenv("RAG_WEB_SEARCH_ENGINE", ""), ) -RAG_WEB_SEARCH_FULL_CONTEXT = PersistentConfig( - "RAG_WEB_SEARCH_FULL_CONTEXT", - "rag.web.search.full_context", - os.getenv("RAG_WEB_SEARCH_FULL_CONTEXT", "False").lower() == "true", +BYPASS_WEB_SEARCH_EMBEDDING_AND_RETRIEVAL = PersistentConfig( + "BYPASS_WEB_SEARCH_EMBEDDING_AND_RETRIEVAL", + "rag.web.search.bypass_embedding_and_retrieval", + os.getenv("BYPASS_WEB_SEARCH_EMBEDDING_AND_RETRIEVAL", "False").lower() == "true", ) # You can provide a list of your own websites to filter after performing a web search. diff --git a/backend/open_webui/main.py b/backend/open_webui/main.py index 31ea93399..1870c1c89 100644 --- a/backend/open_webui/main.py +++ b/backend/open_webui/main.py @@ -162,6 +162,7 @@ from open_webui.config import ( RAG_TEMPLATE, DEFAULT_RAG_TEMPLATE, RAG_FULL_CONTEXT, + BYPASS_EMBEDDING_AND_RETRIEVAL, RAG_EMBEDDING_MODEL, RAG_EMBEDDING_MODEL_AUTO_UPDATE, RAG_EMBEDDING_MODEL_TRUST_REMOTE_CODE, @@ -191,7 +192,7 @@ from open_webui.config import ( YOUTUBE_LOADER_PROXY_URL, # Retrieval (Web Search) RAG_WEB_SEARCH_ENGINE, - RAG_WEB_SEARCH_FULL_CONTEXT, + BYPASS_WEB_SEARCH_EMBEDDING_AND_RETRIEVAL, RAG_WEB_SEARCH_RESULT_COUNT, RAG_WEB_SEARCH_CONCURRENT_REQUESTS, RAG_WEB_SEARCH_TRUST_ENV, @@ -531,6 +532,7 @@ app.state.config.FILE_MAX_COUNT = RAG_FILE_MAX_COUNT app.state.config.RAG_FULL_CONTEXT = RAG_FULL_CONTEXT +app.state.config.BYPASS_EMBEDDING_AND_RETRIEVAL = BYPASS_EMBEDDING_AND_RETRIEVAL app.state.config.ENABLE_RAG_HYBRID_SEARCH = ENABLE_RAG_HYBRID_SEARCH app.state.config.ENABLE_RAG_WEB_LOADER_SSL_VERIFICATION = ( ENABLE_RAG_WEB_LOADER_SSL_VERIFICATION @@ -567,7 +569,9 @@ app.state.config.YOUTUBE_LOADER_PROXY_URL = YOUTUBE_LOADER_PROXY_URL app.state.config.ENABLE_RAG_WEB_SEARCH = ENABLE_RAG_WEB_SEARCH app.state.config.RAG_WEB_SEARCH_ENGINE = RAG_WEB_SEARCH_ENGINE -app.state.config.RAG_WEB_SEARCH_FULL_CONTEXT = RAG_WEB_SEARCH_FULL_CONTEXT +app.state.config.BYPASS_WEB_SEARCH_EMBEDDING_AND_RETRIEVAL = ( + BYPASS_WEB_SEARCH_EMBEDDING_AND_RETRIEVAL +) app.state.config.RAG_WEB_SEARCH_DOMAIN_FILTER_LIST = RAG_WEB_SEARCH_DOMAIN_FILTER_LIST app.state.config.ENABLE_GOOGLE_DRIVE_INTEGRATION = ENABLE_GOOGLE_DRIVE_INTEGRATION diff --git a/backend/open_webui/retrieval/utils.py b/backend/open_webui/retrieval/utils.py index 09af0eabb..011a7bad0 100644 --- a/backend/open_webui/retrieval/utils.py +++ b/backend/open_webui/retrieval/utils.py @@ -17,6 +17,7 @@ from open_webui.retrieval.vector.connector import VECTOR_DB_CLIENT from open_webui.utils.misc import get_last_user_message, calculate_sha256_string from open_webui.models.users import UserModel +from open_webui.models.files import Files from open_webui.env import ( SRC_LOG_LEVELS, @@ -342,6 +343,7 @@ def get_embedding_function( def get_sources_from_files( + request, files, queries, embedding_function, @@ -359,19 +361,64 @@ def get_sources_from_files( relevant_contexts = [] for file in files: + + context = None if file.get("docs"): + # BYPASS_WEB_SEARCH_EMBEDDING_AND_RETRIEVAL context = { "documents": [[doc.get("content") for doc in file.get("docs")]], "metadatas": [[doc.get("metadata") for doc in file.get("docs")]], } elif file.get("context") == "full": + # Manual Full Mode Toggle context = { "documents": [[file.get("file").get("data", {}).get("content")]], "metadatas": [[{"file_id": file.get("id"), "name": file.get("name")}]], } - else: - context = None + elif ( + file.get("type") != "web_search" + and request.app.state.config.BYPASS_EMBEDDING_AND_RETRIEVAL + ): + # BYPASS_EMBEDDING_AND_RETRIEVAL + if file.get("type") == "collection": + file_ids = file.get("data", {}).get("file_ids", []) + documents = [] + metadatas = [] + for file_id in file_ids: + file_object = Files.get_file_by_id(file_id) + + if file_object: + documents.append(file_object.data.get("content", "")) + metadatas.append( + { + "file_id": file_id, + "name": file_object.filename, + "source": file_object.filename, + } + ) + + context = { + "documents": [documents], + "metadatas": [metadatas], + } + + elif file.get("id"): + file_object = Files.get_file_by_id(file.get("id")) + if file_object: + context = { + "documents": [[file_object.data.get("content", "")]], + "metadatas": [ + [ + { + "file_id": file.get("id"), + "name": file_object.filename, + "source": file_object.filename, + } + ] + ], + } + else: collection_names = [] if file.get("type") == "collection": if file.get("legacy"): @@ -434,6 +481,7 @@ def get_sources_from_files( if context: if "data" in file: del file["data"] + relevant_contexts.append({**context, "file": file}) sources = [] diff --git a/backend/open_webui/retrieval/vector/dbs/chroma.py b/backend/open_webui/retrieval/vector/dbs/chroma.py index 093eb7efe..006ee2076 100755 --- a/backend/open_webui/retrieval/vector/dbs/chroma.py +++ b/backend/open_webui/retrieval/vector/dbs/chroma.py @@ -107,8 +107,7 @@ class ChromaClient: } ) return None - except Exception as e: - log.exception(f"{e}") + except: return None def get(self, collection_name: str) -> Optional[GetResult]: diff --git a/backend/open_webui/routers/retrieval.py b/backend/open_webui/routers/retrieval.py index 446754db4..7dd324b80 100644 --- a/backend/open_webui/routers/retrieval.py +++ b/backend/open_webui/routers/retrieval.py @@ -352,6 +352,7 @@ async def get_rag_config(request: Request, user=Depends(get_admin_user)): "status": True, "pdf_extract_images": request.app.state.config.PDF_EXTRACT_IMAGES, "RAG_FULL_CONTEXT": request.app.state.config.RAG_FULL_CONTEXT, + "BYPASS_EMBEDDING_AND_RETRIEVAL": request.app.state.config.BYPASS_EMBEDDING_AND_RETRIEVAL, "enable_google_drive_integration": request.app.state.config.ENABLE_GOOGLE_DRIVE_INTEGRATION, "enable_onedrive_integration": request.app.state.config.ENABLE_ONEDRIVE_INTEGRATION, "content_extraction": { @@ -378,7 +379,7 @@ async def get_rag_config(request: Request, user=Depends(get_admin_user)): }, "web": { "ENABLE_RAG_WEB_LOADER_SSL_VERIFICATION": request.app.state.config.ENABLE_RAG_WEB_LOADER_SSL_VERIFICATION, - "RAG_WEB_SEARCH_FULL_CONTEXT": request.app.state.config.RAG_WEB_SEARCH_FULL_CONTEXT, + "BYPASS_WEB_SEARCH_EMBEDDING_AND_RETRIEVAL": request.app.state.config.BYPASS_WEB_SEARCH_EMBEDDING_AND_RETRIEVAL, "search": { "enabled": request.app.state.config.ENABLE_RAG_WEB_SEARCH, "drive": request.app.state.config.ENABLE_GOOGLE_DRIVE_INTEGRATION, @@ -473,11 +474,12 @@ class WebSearchConfig(BaseModel): class WebConfig(BaseModel): search: WebSearchConfig ENABLE_RAG_WEB_LOADER_SSL_VERIFICATION: Optional[bool] = None - RAG_WEB_SEARCH_FULL_CONTEXT: Optional[bool] = None + BYPASS_WEB_SEARCH_EMBEDDING_AND_RETRIEVAL: Optional[bool] = None class ConfigUpdateForm(BaseModel): RAG_FULL_CONTEXT: Optional[bool] = None + BYPASS_EMBEDDING_AND_RETRIEVAL: Optional[bool] = None pdf_extract_images: Optional[bool] = None enable_google_drive_integration: Optional[bool] = None enable_onedrive_integration: Optional[bool] = None @@ -504,6 +506,12 @@ async def update_rag_config( else request.app.state.config.RAG_FULL_CONTEXT ) + request.app.state.config.BYPASS_EMBEDDING_AND_RETRIEVAL = ( + form_data.BYPASS_EMBEDDING_AND_RETRIEVAL + if form_data.BYPASS_EMBEDDING_AND_RETRIEVAL is not None + else request.app.state.config.BYPASS_EMBEDDING_AND_RETRIEVAL + ) + request.app.state.config.ENABLE_GOOGLE_DRIVE_INTEGRATION = ( form_data.enable_google_drive_integration if form_data.enable_google_drive_integration is not None @@ -557,8 +565,8 @@ async def update_rag_config( request.app.state.config.ENABLE_RAG_WEB_SEARCH = form_data.web.search.enabled request.app.state.config.RAG_WEB_SEARCH_ENGINE = form_data.web.search.engine - request.app.state.config.RAG_WEB_SEARCH_FULL_CONTEXT = ( - form_data.web.RAG_WEB_SEARCH_FULL_CONTEXT + request.app.state.config.BYPASS_WEB_SEARCH_EMBEDDING_AND_RETRIEVAL = ( + form_data.web.BYPASS_WEB_SEARCH_EMBEDDING_AND_RETRIEVAL ) request.app.state.config.SEARXNG_QUERY_URL = ( @@ -626,6 +634,7 @@ async def update_rag_config( "status": True, "pdf_extract_images": request.app.state.config.PDF_EXTRACT_IMAGES, "RAG_FULL_CONTEXT": request.app.state.config.RAG_FULL_CONTEXT, + "BYPASS_EMBEDDING_AND_RETRIEVAL": request.app.state.config.BYPASS_EMBEDDING_AND_RETRIEVAL, "file": { "max_size": request.app.state.config.FILE_MAX_SIZE, "max_count": request.app.state.config.FILE_MAX_COUNT, @@ -650,7 +659,7 @@ async def update_rag_config( }, "web": { "ENABLE_RAG_WEB_LOADER_SSL_VERIFICATION": request.app.state.config.ENABLE_RAG_WEB_LOADER_SSL_VERIFICATION, - "RAG_WEB_SEARCH_FULL_CONTEXT": request.app.state.config.RAG_WEB_SEARCH_FULL_CONTEXT, + "BYPASS_WEB_SEARCH_EMBEDDING_AND_RETRIEVAL": request.app.state.config.BYPASS_WEB_SEARCH_EMBEDDING_AND_RETRIEVAL, "search": { "enabled": request.app.state.config.ENABLE_RAG_WEB_SEARCH, "engine": request.app.state.config.RAG_WEB_SEARCH_ENGINE, @@ -1019,36 +1028,45 @@ def process_file( hash = calculate_sha256_string(text_content) Files.update_file_hash_by_id(file.id, hash) - try: - result = save_docs_to_vector_db( - request, - docs=docs, - collection_name=collection_name, - metadata={ - "file_id": file.id, - "name": file.filename, - "hash": hash, - }, - add=(True if form_data.collection_name else False), - user=user, - ) - - if result: - Files.update_file_metadata_by_id( - file.id, - { - "collection_name": collection_name, + if not request.app.state.config.BYPASS_EMBEDDING_AND_RETRIEVAL: + try: + result = save_docs_to_vector_db( + request, + docs=docs, + collection_name=collection_name, + metadata={ + "file_id": file.id, + "name": file.filename, + "hash": hash, }, + add=(True if form_data.collection_name else False), + user=user, ) - return { - "status": True, - "collection_name": collection_name, - "filename": file.filename, - "content": text_content, - } - except Exception as e: - raise e + if result: + Files.update_file_metadata_by_id( + file.id, + { + "collection_name": collection_name, + }, + ) + + return { + "status": True, + "collection_name": collection_name, + "filename": file.filename, + "content": text_content, + } + except Exception as e: + raise e + else: + return { + "status": True, + "collection_name": None, + "filename": file.filename, + "content": text_content, + } + except Exception as e: log.exception(e) if "No pandoc was found" in str(e): @@ -1408,9 +1426,11 @@ async def process_web_search( ) docs = await loader.aload() - if request.app.state.config.RAG_WEB_SEARCH_FULL_CONTEXT: + if request.app.state.config.BYPASS_WEB_SEARCH_EMBEDDING_AND_RETRIEVAL: return { "status": True, + "collection_name": None, + "filenames": urls, "docs": [ { "content": doc.page_content, @@ -1418,7 +1438,6 @@ async def process_web_search( } for doc in docs ], - "filenames": urls, "loaded_count": len(docs), } else: diff --git a/backend/open_webui/utils/middleware.py b/backend/open_webui/utils/middleware.py index f479da40c..43fd0d480 100644 --- a/backend/open_webui/utils/middleware.py +++ b/backend/open_webui/utils/middleware.py @@ -351,24 +351,25 @@ async def chat_web_search_handler( all_results.append(results) files = form_data.get("files", []) - if request.app.state.config.RAG_WEB_SEARCH_FULL_CONTEXT: - files.append( - { - "docs": results.get("docs", []), - "name": searchQuery, - "type": "web_search_docs", - "urls": results["filenames"], - } - ) - else: + if results.get("collection_name"): files.append( { "collection_name": results["collection_name"], "name": searchQuery, - "type": "web_search_results", + "type": "web_search", "urls": results["filenames"], } ) + elif results.get("docs"): + files.append( + { + "docs": results.get("docs", []), + "name": searchQuery, + "type": "web_search", + "urls": results["filenames"], + } + ) + form_data["files"] = files except Exception as e: log.exception(e) @@ -518,6 +519,7 @@ async def chat_completion_files_handler( sources = [] if files := body.get("metadata", {}).get("files", None): + queries = [] try: queries_response = await generate_queries( request, @@ -543,8 +545,8 @@ async def chat_completion_files_handler( queries_response = {"queries": [queries_response]} queries = queries_response.get("queries", []) - except Exception as e: - queries = [] + except: + pass if len(queries) == 0: queries = [get_last_user_message(body["messages"])] @@ -556,6 +558,7 @@ async def chat_completion_files_handler( sources = await loop.run_in_executor( executor, lambda: get_sources_from_files( + request=request, files=files, queries=queries, embedding_function=lambda query: request.app.state.EMBEDDING_FUNCTION( @@ -738,6 +741,7 @@ async def process_chat_payload(request, form_data, metadata, user, model): tool_ids = form_data.pop("tool_ids", None) files = form_data.pop("files", None) + # Remove files duplicates if files: files = list({json.dumps(f, sort_keys=True): f for f in files}.values()) @@ -795,8 +799,6 @@ async def process_chat_payload(request, form_data, metadata, user, model): if len(sources) > 0: context_string = "" for source_idx, source in enumerate(sources): - source_id = source.get("source", {}).get("name", "") - if "document" in source: for doc_idx, doc_context in enumerate(source["document"]): context_string += f"{source_idx}{doc_context}\n" @@ -1913,7 +1915,9 @@ async def process_chat_response( ) log.info(f"content_blocks={content_blocks}") - log.info(f"serialize_content_blocks={serialize_content_blocks(content_blocks)}") + log.info( + f"serialize_content_blocks={serialize_content_blocks(content_blocks)}" + ) try: res = await generate_chat_completion( diff --git a/src/lib/components/admin/Settings/Documents.svelte b/src/lib/components/admin/Settings/Documents.svelte index 9ce035972..0d911af89 100644 --- a/src/lib/components/admin/Settings/Documents.svelte +++ b/src/lib/components/admin/Settings/Documents.svelte @@ -59,6 +59,7 @@ let pdfExtractImages = true; let RAG_FULL_CONTEXT = false; + let BYPASS_EMBEDDING_AND_RETRIEVAL = false; let enableGoogleDriveIntegration = false; let enableOneDriveIntegration = false; @@ -170,12 +171,6 @@ }; const submitHandler = async () => { - await embeddingModelUpdateHandler(); - - if (querySettings.hybrid) { - await rerankingModelUpdateHandler(); - } - if (contentExtractionEngine === 'tika' && tikaServerUrl === '') { toast.error($i18n.t('Tika Server URL required.')); return; @@ -187,6 +182,15 @@ toast.error($i18n.t('Document Intelligence endpoint and key required.')); return; } + + if (!BYPASS_EMBEDDING_AND_RETRIEVAL) { + await embeddingModelUpdateHandler(); + + if (querySettings.hybrid) { + await rerankingModelUpdateHandler(); + } + } + const res = await updateRAGConfig(localStorage.token, { pdf_extract_images: pdfExtractImages, enable_google_drive_integration: enableGoogleDriveIntegration, @@ -196,6 +200,7 @@ max_count: fileMaxCount === '' ? null : fileMaxCount }, RAG_FULL_CONTEXT: RAG_FULL_CONTEXT, + BYPASS_EMBEDDING_AND_RETRIEVAL: BYPASS_EMBEDDING_AND_RETRIEVAL, chunk: { text_splitter: textSplitter, chunk_overlap: chunkOverlap, @@ -260,6 +265,7 @@ chunkOverlap = res.chunk.chunk_overlap; RAG_FULL_CONTEXT = res.RAG_FULL_CONTEXT; + BYPASS_EMBEDDING_AND_RETRIEVAL = res.BYPASS_EMBEDDING_AND_RETRIEVAL; contentExtractionEngine = res.content_extraction.engine; tikaServerUrl = res.content_extraction.tika_server_url; @@ -328,9 +334,6 @@ - - - + + -
-
-
-
- {$i18n.t('Chunk Size')} -
-
- -
-
- -
-
- {$i18n.t('Chunk Overlap')} -
- -
- -
-
-
-
- - -
-
{$i18n.t('Embedding')}
- -
- -
-
-
{$i18n.t('Embedding Model Engine')}
+ {#if !BYPASS_EMBEDDING_AND_RETRIEVAL} +
+
{$i18n.t('Text Splitter')}
- {#if embeddingEngine === 'openai'} -
- - - -
- {:else if embeddingEngine === 'ollama'} -
- - - -
- {/if} -
- -
-
{$i18n.t('Embedding Model')}
- -
- {#if embeddingEngine === 'ollama'} -
-
+
+
+
+
+ {$i18n.t('Chunk Size')} +
+
- {:else} -
-
- + +
+
+ {$i18n.t('Chunk Overlap')}
- {#if embeddingEngine === ''} +
+ +
+
+
+
+ {/if} +
+ + {#if !BYPASS_EMBEDDING_AND_RETRIEVAL} +
+
{$i18n.t('Embedding')}
+ +
+ +
+
+
+ {$i18n.t('Embedding Model Engine')} +
+
+ +
+
+ + {#if embeddingEngine === 'openai'} +
+ + + +
+ {:else if embeddingEngine === 'ollama'} +
+ + + +
+ {/if} +
+ +
+
{$i18n.t('Embedding Model')}
+ +
+ {#if embeddingEngine === 'ollama'} +
+
+ +
+
+ {:else} +
+
+ +
+ + {#if embeddingEngine === ''} + + {/if} +
+ {/if} +
+ +
+ {$i18n.t( + 'Warning: If you update or change your embedding model, you will need to re-import all documents.' + )} +
+
+ + {#if embeddingEngine === 'ollama' || embeddingEngine === 'openai'} +
+
{$i18n.t('Embedding Batch Size')}
+ +
+ +
+
+ {/if} + +
+
{$i18n.t('Full Context Mode')}
+
+ + + +
+
+ +
+
{$i18n.t('Hybrid Search')}
+
+ { + toggleHybridSearch(); + }} + /> +
+
+ + {#if querySettings.hybrid === true} +
+
{$i18n.t('Reranking Model')}
+ +
+
+
+ +
- {/if} +
- {/if} -
- -
- {$i18n.t( - 'Warning: If you update or change your embedding model, you will need to re-import all documents.' - )} -
+
+ {/if}
- {#if embeddingEngine === 'ollama' || embeddingEngine === 'openai'} -
-
{$i18n.t('Embedding Batch Size')}
+
+
{$i18n.t('Retrieval')}
-
+
+ +
+
{$i18n.t('Top K')}
+
- {/if} -
-
{$i18n.t('Full Context Mode')}
-
- - - -
-
- -
-
{$i18n.t('Hybrid Search')}
-
- { - toggleHybridSearch(); - }} - /> -
-
- - {#if querySettings.hybrid === true} -
-
{$i18n.t('Reranking Model')}
- -
-
-
+ {#if querySettings.hybrid === true} +
+
+
{$i18n.t('Minimum Score')}
+
-
-
-
- {/if} -
- -
-
{$i18n.t('Query')}
- -
- -
-
{$i18n.t('Top K')}
-
- -
-
- - {#if querySettings.hybrid === true} -
-
-
{$i18n.t('Minimum Score')}
-
- -
-
-
- {$i18n.t( - 'Note: If you set a minimum score, the search will only return documents with a score greater than or equal to the minimum score.' - )} -
-
- {/if} - -
-
{$i18n.t('RAG Template')}
-
- -