From 2847ab003e463b8ba270131e3dbcc415bc04a478 Mon Sep 17 00:00:00 2001 From: pablonyx Date: Mon, 16 Dec 2024 13:34:43 -0800 Subject: [PATCH] Prompting (#3372) * auto generate start prompts * post rebase clean up * update for clarity --- backend/onyx/configs/chat_configs.py | 4 + backend/onyx/db/models.py | 7 + backend/onyx/document_index/interfaces.py | 14 + .../vespa/app_config/schemas/danswer_chunk.sd | 6 + backend/onyx/document_index/vespa/index.py | 27 ++ .../shared_utils/vespa_request_builders.py | 10 +- backend/onyx/prompts/starter_messages.py | 46 +++ .../starter_message_creation.py | 271 ++++++++++++++++ backend/onyx/server/features/persona/api.py | 28 ++ .../onyx/server/features/persona/models.py | 8 + web/src/app/admin/add-connector/page.tsx | 3 +- .../app/admin/assistants/AssistantEditor.tsx | 291 ++++++++++-------- .../admin/assistants/StarterMessageList.tsx | 198 ++++++++++++ 13 files changed, 778 insertions(+), 135 deletions(-) create mode 100644 backend/onyx/prompts/starter_messages.py create mode 100644 backend/onyx/secondary_llm_flows/starter_message_creation.py create mode 100644 web/src/app/admin/assistants/StarterMessageList.tsx diff --git a/backend/onyx/configs/chat_configs.py b/backend/onyx/configs/chat_configs.py index f0359abe3..1560a2805 100644 --- a/backend/onyx/configs/chat_configs.py +++ b/backend/onyx/configs/chat_configs.py @@ -63,6 +63,10 @@ LANGUAGE_CHAT_NAMING_HINT = ( or "The name of the conversation must be in the same language as the user query." ) +# Number of prompts each persona should have +NUM_PERSONA_PROMPTS = 4 +NUM_PERSONA_PROMPT_GENERATION_CHUNKS = 5 + # Agentic search takes significantly more tokens and therefore has much higher cost. # This configuration allows users to get a search-only experience with instant results # and no involvement from the LLM. diff --git a/backend/onyx/db/models.py b/backend/onyx/db/models.py index 353eba9c9..e5644c983 100644 --- a/backend/onyx/db/models.py +++ b/backend/onyx/db/models.py @@ -5,6 +5,8 @@ from typing import Literal from typing import NotRequired from typing import Optional from uuid import uuid4 + +from pydantic import BaseModel from typing_extensions import TypedDict # noreorder from uuid import UUID @@ -1344,6 +1346,11 @@ class StarterMessage(TypedDict): message: str +class StarterMessageModel(BaseModel): + name: str + message: str + + class Persona(Base): __tablename__ = "persona" diff --git a/backend/onyx/document_index/interfaces.py b/backend/onyx/document_index/interfaces.py index 725af0bdc..1f6386b09 100644 --- a/backend/onyx/document_index/interfaces.py +++ b/backend/onyx/document_index/interfaces.py @@ -369,6 +369,19 @@ class AdminCapable(abc.ABC): raise NotImplementedError +class RandomCapable(abc.ABC): + """Class must implement random document retrieval capability""" + + @abc.abstractmethod + def random_retrieval( + self, + filters: IndexFilters, + num_to_retrieve: int = 10, + ) -> list[InferenceChunkUncleaned]: + """Retrieve random chunks matching the filters""" + raise NotImplementedError + + class BaseIndex( Verifiable, Indexable, @@ -376,6 +389,7 @@ class BaseIndex( Deletable, AdminCapable, IdRetrievalCapable, + RandomCapable, abc.ABC, ): """ diff --git a/backend/onyx/document_index/vespa/app_config/schemas/danswer_chunk.sd b/backend/onyx/document_index/vespa/app_config/schemas/danswer_chunk.sd index 8789a0534..2fd861b77 100644 --- a/backend/onyx/document_index/vespa/app_config/schemas/danswer_chunk.sd +++ b/backend/onyx/document_index/vespa/app_config/schemas/danswer_chunk.sd @@ -218,4 +218,10 @@ schema DANSWER_CHUNK_NAME { expression: bm25(content) + (5 * bm25(title)) } } + + rank-profile random_ { + first-phase { + expression: random.match + } + } } diff --git a/backend/onyx/document_index/vespa/index.py b/backend/onyx/document_index/vespa/index.py index f3639948b..09455cff2 100644 --- a/backend/onyx/document_index/vespa/index.py +++ b/backend/onyx/document_index/vespa/index.py @@ -2,6 +2,7 @@ import concurrent.futures import io import logging import os +import random import re import time import urllib @@ -903,6 +904,32 @@ class VespaIndex(DocumentIndex): logger.info("Batch deletion completed") + def random_retrieval( + self, + filters: IndexFilters, + num_to_retrieve: int = 10, + ) -> list[InferenceChunkUncleaned]: + """Retrieve random chunks matching the filters using Vespa's random ranking + + This method is currently used for random chunk retrieval in the context of + assistant starter message creation (passed as sample context for usage by the assistant). + """ + vespa_where_clauses = build_vespa_filters(filters, remove_trailing_and=True) + + yql = YQL_BASE.format(index_name=self.index_name) + vespa_where_clauses + + random_seed = random.randint(0, 1000000) + + params: dict[str, str | int | float] = { + "yql": yql, + "hits": num_to_retrieve, + "timeout": VESPA_TIMEOUT, + "ranking.profile": "random_", + "ranking.properties.random.seed": random_seed, + } + + return query_vespa(params) + class _VespaDeleteRequest: def __init__(self, document_id: str, index_name: str) -> None: diff --git a/backend/onyx/document_index/vespa/shared_utils/vespa_request_builders.py b/backend/onyx/document_index/vespa/shared_utils/vespa_request_builders.py index a1752d52c..dda75c853 100644 --- a/backend/onyx/document_index/vespa/shared_utils/vespa_request_builders.py +++ b/backend/onyx/document_index/vespa/shared_utils/vespa_request_builders.py @@ -19,7 +19,12 @@ from onyx.utils.logger import setup_logger logger = setup_logger() -def build_vespa_filters(filters: IndexFilters, include_hidden: bool = False) -> str: +def build_vespa_filters( + filters: IndexFilters, + *, + include_hidden: bool = False, + remove_trailing_and: bool = False, # Set to True when using as a complete Vespa query +) -> str: def _build_or_filters(key: str, vals: list[str] | None) -> str: if vals is None: return "" @@ -78,6 +83,9 @@ def build_vespa_filters(filters: IndexFilters, include_hidden: bool = False) -> filter_str += _build_time_filter(filters.time_cutoff) + if remove_trailing_and and filter_str.endswith(" and "): + filter_str = filter_str[:-5] # We remove the trailing " and " + return filter_str diff --git a/backend/onyx/prompts/starter_messages.py b/backend/onyx/prompts/starter_messages.py new file mode 100644 index 000000000..3ef8066e9 --- /dev/null +++ b/backend/onyx/prompts/starter_messages.py @@ -0,0 +1,46 @@ +PERSONA_CATEGORY_GENERATION_PROMPT = """ +Based on the assistant's name, description, and instructions, generate a list of {num_categories} + **unique and diverse** categories that represent different types of starter messages a user + might send to initiate a conversation with this chatbot assistant. + +**Ensure that the categories are varied and cover a wide range of topics related to the assistant's capabilities.** + +Provide the categories as a JSON array of strings **without any code fences or additional text**. + +**Context about the assistant:** +- **Name**: {name} +- **Description**: {description} +- **Instructions**: {instructions} +""".strip() + +PERSONA_STARTER_MESSAGE_CREATION_PROMPT = """ +Create a starter message that a **user** might send to initiate a conversation with a chatbot assistant. + +**Category**: {category} + +Your response should include two parts: + +1. **Title**: A short, engaging title that reflects the user's intent + (e.g., 'Need Travel Advice', 'Question About Coding', 'Looking for Book Recommendations'). + +2. **Message**: The actual message that the user would send to the assistant. + This should be natural, engaging, and encourage a helpful response from the assistant. + **Avoid overly specific details; keep the message general and broadly applicable.** + +For example: +- Instead of "I've just adopted a 6-month-old Labrador puppy who's pulling on the leash," +write "I'm having trouble training my new puppy to walk nicely on a leash." + +Ensure each part is clearly labeled and separated as shown above. +Do not provide any additional text or explanation and be extremely concise + +**Context about the assistant:** +- **Name**: {name} +- **Description**: {description} +- **Instructions**: {instructions} +""".strip() + + +if __name__ == "__main__": + print(PERSONA_CATEGORY_GENERATION_PROMPT) + print(PERSONA_STARTER_MESSAGE_CREATION_PROMPT) diff --git a/backend/onyx/secondary_llm_flows/starter_message_creation.py b/backend/onyx/secondary_llm_flows/starter_message_creation.py new file mode 100644 index 000000000..464c9610c --- /dev/null +++ b/backend/onyx/secondary_llm_flows/starter_message_creation.py @@ -0,0 +1,271 @@ +import json +import re +from typing import Any +from typing import cast +from typing import Dict +from typing import List + +from litellm import get_supported_openai_params +from sqlalchemy.orm import Session + +from onyx.configs.chat_configs import NUM_PERSONA_PROMPT_GENERATION_CHUNKS +from onyx.configs.chat_configs import NUM_PERSONA_PROMPTS +from onyx.context.search.models import IndexFilters +from onyx.context.search.models import InferenceChunk +from onyx.context.search.postprocessing.postprocessing import cleanup_chunks +from onyx.context.search.preprocessing.access_filters import ( + build_access_filters_for_user, +) +from onyx.db.document_set import get_document_sets_by_ids +from onyx.db.models import StarterMessageModel as StarterMessage +from onyx.db.models import User +from onyx.document_index.document_index_utils import get_both_index_names +from onyx.document_index.factory import get_default_document_index +from onyx.llm.factory import get_default_llms +from onyx.prompts.starter_messages import PERSONA_CATEGORY_GENERATION_PROMPT +from onyx.prompts.starter_messages import PERSONA_STARTER_MESSAGE_CREATION_PROMPT +from onyx.utils.logger import setup_logger +from onyx.utils.threadpool_concurrency import FunctionCall +from onyx.utils.threadpool_concurrency import run_functions_in_parallel + +logger = setup_logger() + + +def get_random_chunks_from_doc_sets( + doc_sets: List[str], db_session: Session, user: User | None = None +) -> List[InferenceChunk]: + """ + Retrieves random chunks from the specified document sets. + """ + curr_ind_name, sec_ind_name = get_both_index_names(db_session) + document_index = get_default_document_index(curr_ind_name, sec_ind_name) + + acl_filters = build_access_filters_for_user(user, db_session) + filters = IndexFilters(document_set=doc_sets, access_control_list=acl_filters) + + chunks = document_index.random_retrieval( + filters=filters, num_to_retrieve=NUM_PERSONA_PROMPT_GENERATION_CHUNKS + ) + return cleanup_chunks(chunks) + + +def parse_categories(content: str) -> List[str]: + """ + Parses the JSON array of categories from the LLM response. + """ + # Clean the response to remove code fences and extra whitespace + content = content.strip().strip("```").strip() + if content.startswith("json"): + content = content[4:].strip() + + try: + categories = json.loads(content) + if not isinstance(categories, list): + logger.error("Categories are not a list.") + return [] + return categories + except json.JSONDecodeError as e: + logger.error(f"Failed to parse categories: {e}") + return [] + + +def generate_start_message_prompts( + name: str, + description: str, + instructions: str, + categories: List[str], + chunk_contents: str, + supports_structured_output: bool, + fast_llm: Any, +) -> List[FunctionCall]: + """ + Generates the list of FunctionCall objects for starter message generation. + """ + functions = [] + for category in categories: + # Create a prompt specific to the category + start_message_generation_prompt = ( + PERSONA_STARTER_MESSAGE_CREATION_PROMPT.format( + name=name, + description=description, + instructions=instructions, + category=category, + ) + ) + + if chunk_contents: + start_message_generation_prompt += ( + "\n\nExample content this assistant has access to:\n" + "'''\n" + f"{chunk_contents}" + "\n'''" + ) + + if supports_structured_output: + functions.append( + FunctionCall( + fast_llm.invoke, + (start_message_generation_prompt, None, None, StarterMessage), + ) + ) + else: + functions.append( + FunctionCall( + fast_llm.invoke, + (start_message_generation_prompt,), + ) + ) + return functions + + +def parse_unstructured_output(output: str) -> Dict[str, str]: + """ + Parses the assistant's unstructured output into a dictionary with keys: + - 'name' (Title) + - 'message' (Message) + """ + + # Debug output + logger.debug(f"LLM Output for starter message creation: {output}") + + # Patterns to match + title_pattern = r"(?i)^\**Title\**\s*:\s*(.+)" + message_pattern = r"(?i)^\**Message\**\s*:\s*(.+)" + + # Initialize the response dictionary + response_dict = {} + + # Split the output into lines + lines = output.strip().split("\n") + + # Variables to keep track of the current key being processed + current_key = None + current_value_lines = [] + + for line in lines: + # Check for title + title_match = re.match(title_pattern, line.strip()) + if title_match: + # Save previous key-value pair if any + if current_key and current_value_lines: + response_dict[current_key] = " ".join(current_value_lines).strip() + current_value_lines = [] + current_key = "name" + current_value_lines.append(title_match.group(1).strip()) + continue + + # Check for message + message_match = re.match(message_pattern, line.strip()) + if message_match: + if current_key and current_value_lines: + response_dict[current_key] = " ".join(current_value_lines).strip() + current_value_lines = [] + current_key = "message" + current_value_lines.append(message_match.group(1).strip()) + continue + + # If the line doesn't match a new key, append it to the current value + if current_key: + current_value_lines.append(line.strip()) + + # Add the last key-value pair + if current_key and current_value_lines: + response_dict[current_key] = " ".join(current_value_lines).strip() + + # Validate that the necessary keys are present + if not all(k in response_dict for k in ["name", "message"]): + raise ValueError("Failed to parse the assistant's response.") + + return response_dict + + +def generate_starter_messages( + name: str, + description: str, + instructions: str, + document_set_ids: List[int], + db_session: Session, + user: User | None, +) -> List[StarterMessage]: + """ + Generates starter messages by first obtaining categories and then generating messages for each category. + On failure, returns an empty list (or list with processed starter messages if some messages are processed successfully). + """ + _, fast_llm = get_default_llms(temperature=0.5) + + provider = fast_llm.config.model_provider + model = fast_llm.config.model_name + + params = get_supported_openai_params(model=model, custom_llm_provider=provider) + supports_structured_output = ( + isinstance(params, list) and "response_format" in params + ) + + # Generate categories + category_generation_prompt = PERSONA_CATEGORY_GENERATION_PROMPT.format( + name=name, + description=description, + instructions=instructions, + num_categories=NUM_PERSONA_PROMPTS, + ) + + category_response = fast_llm.invoke(category_generation_prompt) + categories = parse_categories(cast(str, category_response.content)) + + if not categories: + logger.error("No categories were generated.") + return [] + + # Fetch example content if document sets are provided + if document_set_ids: + document_sets = get_document_sets_by_ids( + document_set_ids=document_set_ids, + db_session=db_session, + ) + + chunks = get_random_chunks_from_doc_sets( + doc_sets=[doc_set.name for doc_set in document_sets], + db_session=db_session, + user=user, + ) + + # Add example content context + chunk_contents = "\n".join(chunk.content.strip() for chunk in chunks) + else: + chunk_contents = "" + + # Generate prompts for starter messages + functions = generate_start_message_prompts( + name, + description, + instructions, + categories, + chunk_contents, + supports_structured_output, + fast_llm, + ) + + # Run LLM calls in parallel + if not functions: + logger.error("No functions to execute for starter message generation.") + return [] + + results = run_functions_in_parallel(function_calls=functions) + prompts = [] + + for response in results.values(): + try: + if supports_structured_output: + response_dict = json.loads(response.content) + else: + response_dict = parse_unstructured_output(response.content) + starter_message = StarterMessage( + name=response_dict["name"], + message=response_dict["message"], + ) + prompts.append(starter_message) + except (json.JSONDecodeError, ValueError) as e: + logger.error(f"Failed to parse starter message: {e}") + continue + + return prompts diff --git a/backend/onyx/server/features/persona/api.py b/backend/onyx/server/features/persona/api.py index 9911bfc18..ece335fd2 100644 --- a/backend/onyx/server/features/persona/api.py +++ b/backend/onyx/server/features/persona/api.py @@ -19,6 +19,7 @@ from onyx.configs.constants import MilestoneRecordType from onyx.configs.constants import NotificationType from onyx.db.engine import get_current_tenant_id from onyx.db.engine import get_session +from onyx.db.models import StarterMessageModel as StarterMessage from onyx.db.models import User from onyx.db.notification import create_notification from onyx.db.persona import create_assistant_category @@ -36,7 +37,11 @@ from onyx.db.persona import update_persona_shared_users from onyx.db.persona import update_persona_visibility from onyx.file_store.file_store import get_default_file_store from onyx.file_store.models import ChatFileType +from onyx.secondary_llm_flows.starter_message_creation import ( + generate_starter_messages, +) from onyx.server.features.persona.models import CreatePersonaRequest +from onyx.server.features.persona.models import GenerateStarterMessageRequest from onyx.server.features.persona.models import ImageGenerationToolStatus from onyx.server.features.persona.models import PersonaCategoryCreate from onyx.server.features.persona.models import PersonaCategoryResponse @@ -377,3 +382,26 @@ def build_final_template_prompt( retrieval_disabled=retrieval_disabled, ) ) + + +@basic_router.post("/assistant-prompt-refresh") +def build_assistant_prompts( + generate_persona_prompt_request: GenerateStarterMessageRequest, + db_session: Session = Depends(get_session), + user: User | None = Depends(current_user), +) -> list[StarterMessage]: + try: + logger.info( + "Generating starter messages for user: %s", user.id if user else "Anonymous" + ) + return generate_starter_messages( + name=generate_persona_prompt_request.name, + description=generate_persona_prompt_request.description, + instructions=generate_persona_prompt_request.instructions, + document_set_ids=generate_persona_prompt_request.document_set_ids, + db_session=db_session, + user=user, + ) + except Exception as e: + logger.exception("Failed to generate starter messages") + raise HTTPException(status_code=500, detail=str(e)) diff --git a/backend/onyx/server/features/persona/models.py b/backend/onyx/server/features/persona/models.py index e84175343..f656e87f3 100644 --- a/backend/onyx/server/features/persona/models.py +++ b/backend/onyx/server/features/persona/models.py @@ -17,6 +17,14 @@ from onyx.utils.logger import setup_logger logger = setup_logger() +# More minimal request for generating a persona prompt +class GenerateStarterMessageRequest(BaseModel): + name: str + description: str + instructions: str + document_set_ids: list[int] + + class CreatePersonaRequest(BaseModel): name: str description: str diff --git a/web/src/app/admin/add-connector/page.tsx b/web/src/app/admin/add-connector/page.tsx index dc1e0721f..587f1a085 100644 --- a/web/src/app/admin/add-connector/page.tsx +++ b/web/src/app/admin/add-connector/page.tsx @@ -75,7 +75,8 @@ export default function Page() { }, {} as Record ); - }, [sources, searchTerm]); + }, [sources, filterSources, searchTerm]); + const handleKeyPress = (e: React.KeyboardEvent) => { if (e.key === "Enter") { const filteredCategories = Object.entries(categorizedSources).filter( diff --git a/web/src/app/admin/assistants/AssistantEditor.tsx b/web/src/app/admin/assistants/AssistantEditor.tsx index 8bc1a724b..eb79bd045 100644 --- a/web/src/app/admin/assistants/AssistantEditor.tsx +++ b/web/src/app/admin/assistants/AssistantEditor.tsx @@ -1,7 +1,7 @@ "use client"; +import { Option } from "@/components/Dropdown"; import { generateRandomIconShape, createSVG } from "@/lib/assistantIconUtils"; - import { CCPairBasicInfo, DocumentSet, User } from "@/lib/types"; import { Separator } from "@/components/ui/separator"; import { Button } from "@/components/ui/button"; @@ -9,12 +9,11 @@ import { Textarea } from "@/components/ui/textarea"; import { IsPublicGroupSelector } from "@/components/IsPublicGroupSelector"; import { ArrayHelpers, - ErrorMessage, - Field, FieldArray, Form, Formik, FormikProps, + useFormikContext, } from "formik"; import { @@ -27,7 +26,6 @@ import { import { usePopup } from "@/components/admin/connectors/Popup"; import { getDisplayNameForModel, useCategories } from "@/lib/hooks"; import { DocumentSetSelectable } from "@/components/documentSet/DocumentSetSelectable"; -import { Option } from "@/components/Dropdown"; import { addAssistantToList } from "@/lib/assistants/updateAssistantPreferences"; import { checkLLMSupportsImageInput, destructureValue } from "@/lib/llm/utils"; import { ToolSnapshot } from "@/lib/tools/interfaces"; @@ -41,10 +39,9 @@ import { } from "@/components/ui/tooltip"; import Link from "next/link"; import { useRouter } from "next/navigation"; -import { useEffect, useState } from "react"; -import { FiInfo, FiX } from "react-icons/fi"; +import { useEffect, useMemo, useState } from "react"; +import { FiInfo, FiRefreshCcw } from "react-icons/fi"; import * as Yup from "yup"; -import { FullLLMProvider } from "../configuration/llm/interfaces"; import CollapsibleSection from "./CollapsibleSection"; import { SuccessfulPersonaUpdateRedirectType } from "./enums"; import { Persona, PersonaCategory, StarterMessage } from "./interfaces"; @@ -66,6 +63,9 @@ import { AdvancedOptionsToggle } from "@/components/AdvancedOptionsToggle"; import { buildImgUrl } from "@/app/chat/files/images/utils"; import { LlmList } from "@/components/llm/LLMList"; import { useAssistants } from "@/components/context/AssistantsContext"; +import { debounce } from "lodash"; +import { FullLLMProvider } from "../configuration/llm/interfaces"; +import StarterMessagesList from "./StarterMessageList"; import { Input } from "@/components/ui/input"; import { CategoryCard } from "./CategoryCard"; @@ -129,12 +129,14 @@ export function AssistantEditor({ ]; const [showAdvancedOptions, setShowAdvancedOptions] = useState(false); + const [hasEditedStarterMessage, setHasEditedStarterMessage] = useState(false); const [showPersonaCategory, setShowPersonaCategory] = useState(!admin); // state to persist across formik reformatting const [defautIconColor, _setDeafultIconColor] = useState( colorOptions[Math.floor(Math.random() * colorOptions.length)] ); + const [isRefreshing, setIsRefreshing] = useState(false); const [defaultIconShape, setDefaultIconShape] = useState(null); @@ -148,6 +150,10 @@ export function AssistantEditor({ const [removePersonaImage, setRemovePersonaImage] = useState(false); + const autoStarterMessageEnabled = useMemo( + () => llmProviders.length > 0, + [llmProviders.length] + ); const isUpdate = existingPersona !== undefined && existingPersona !== null; const existingPrompt = existingPersona?.prompts[0] ?? null; const defaultProvider = llmProviders.find( @@ -217,7 +223,24 @@ export function AssistantEditor({ existingPersona?.llm_model_provider_override ?? null, llm_model_version_override: existingPersona?.llm_model_version_override ?? null, - starter_messages: existingPersona?.starter_messages ?? [], + starter_messages: existingPersona?.starter_messages ?? [ + { + name: "", + message: "", + }, + { + name: "", + message: "", + }, + { + name: "", + message: "", + }, + { + name: "", + message: "", + }, + ], enabled_tools_map: enabledToolsMap, icon_color: existingPersona?.icon_color ?? defautIconColor, icon_shape: existingPersona?.icon_shape ?? defaultIconShape, @@ -228,6 +251,44 @@ export function AssistantEditor({ groups: existingPersona?.groups ?? [], }; + interface AssistantPrompt { + message: string; + name: string; + } + + const debouncedRefreshPrompts = debounce( + async (values: any, setFieldValue: any) => { + if (!autoStarterMessageEnabled) { + return; + } + setIsRefreshing(true); + try { + const response = await fetch("/api/persona/assistant-prompt-refresh", { + method: "POST", + headers: { + "Content-Type": "application/json", + }, + body: JSON.stringify({ + name: values.name, + description: values.description, + document_set_ids: values.document_set_ids, + instructions: values.system_prompt || values.task_prompt, + }), + }); + + const data: AssistantPrompt = await response.json(); + if (response.ok) { + setFieldValue("starter_messages", data); + } + } catch (error) { + console.error("Failed to refresh prompts:", error); + } finally { + setIsRefreshing(false); + } + }, + 1000 + ); + const [isRequestSuccessful, setIsRequestSuccessful] = useState(false); return ( @@ -421,6 +482,8 @@ export function AssistantEditor({ isSubmitting, values, setFieldValue, + errors, + ...formikProps }: FormikProps) => { function toggleToolInValues(toolId: number) { @@ -445,6 +508,7 @@ export function AssistantEditor({ return (
+ {/* Refresh starter messages when name or description changes */}
+
+
+
+ Starter Messages +
+
+ + + Pre-configured messages that help users understand what this + assistant can do and how to interact with it effectively. + +
+ + + +
+ +
+
+ {!autoStarterMessageEnabled && ( + +

+ No LLM providers configured. Generation is not + available. +

+
+ )} +
+
+
+
+ ( + { + setHasEditedStarterMessage(true); + }} + /> + )} + /> +
+
+ {admin && ( )} -
-
-
- Starter Messages (Optional){" "} -
-
- - Add pre-defined messages to help users get started. Only - the first 4 will be displayed. - - - ) => ( -
- {values.starter_messages && - values.starter_messages.length > 0 && - values.starter_messages.map( - ( - starterMessage: StarterMessage, - index: number - ) => { - return ( -
-
-
-
- - - Shows up as the "title" - for this Starter Message. For - example, "Write an email". - - - -
- -
- - - The actual message to be sent as the - initial user message if a user - selects this starter prompt. For - example, "Write me an email to - a client about a new billing feature - we just released." - - - -
-
-
- - arrayHelpers.remove(index) - } - /> -
-
-
- ); - } - )} - - -
- )} - /> -
- void; +}) { + const { handleChange } = useFormikContext(); + + // Group starter messages into rows of 2 for display purposes + const rows = values.reduce((acc: StarterMessage[][], curr, i) => { + if (i % 2 === 0) acc.push([curr]); + else acc[acc.length - 1].push(curr); + return acc; + }, []); + + const canAddMore = values.length <= 6; + + return ( +
+ {rows.map((row, rowIndex) => ( +
+
+ {row.map((starterMessage, colIndex) => ( +
+
+ {isRefreshing ? ( +
+
+
+
+
+ +
+
+
+
+ +
+
+
+
+
+ ) : ( + <> +
+
+ + + + + + + +

+ Shows up as the "title" for this + Starter Message. For example, "Write an + email." +

+
+
+
+
+ { + touchStarterMessages(); + handleChange(e); + }} + /> + +
+ +
+
+ + + + + + + +

+ The actual message to be sent as the initial + user message. +

+
+
+
+
+ { + touchStarterMessages(); + handleChange(e); + }} + /> + +
+ + )} +
+
+ ))} +
+ + +
+ ))} + + {canAddMore && ( + + )} +
+ ); +}