diff --git a/src/components/LLMConversationListViewer.tsx b/src/components/LLMConversationListViewer.tsx
new file mode 100644
index 0000000..e230c74
--- /dev/null
+++ b/src/components/LLMConversationListViewer.tsx
@@ -0,0 +1,202 @@
+/**
+ * LLM Conversation List Viewer
+ * Displays all LLM conversations with search and sort
+ */
+
+import { useState, useMemo } from "react";
+import { useLiveQuery } from "dexie-react-hooks";
+import { MessageSquare, Trash2 } from "lucide-react";
+import db from "@/services/db";
+import type { LLMConversation } from "@/types/llm";
+import { Button } from "./ui/button";
+import { Input } from "./ui/input";
+import { useGrimoire } from "@/core/state";
+import Timestamp from "./Timestamp";
+
+export function LLMConversationListViewer() {
+ const [searchQuery, setSearchQuery] = useState("");
+ const [sortBy, setSortBy] = useState<"recent" | "oldest" | "tokens">(
+ "recent",
+ );
+ const { addWindow } = useGrimoire();
+
+ // Load conversations from Dexie
+ const conversations = useLiveQuery(() =>
+ db.llmConversations.orderBy("updatedAt").reverse().toArray(),
+ );
+
+ // Filter and sort conversations
+ const filteredConversations = useMemo(() => {
+ if (!conversations) return [];
+
+ let filtered = conversations;
+
+ // Filter by search query
+ if (searchQuery.trim()) {
+ const query = searchQuery.toLowerCase();
+ filtered = filtered.filter(
+ (conv) =>
+ conv.title.toLowerCase().includes(query) ||
+ conv.messages.some((m) => m.content.toLowerCase().includes(query)),
+ );
+ }
+
+ // Sort
+ const sorted = [...filtered];
+ switch (sortBy) {
+ case "recent":
+ sorted.sort((a, b) => b.updatedAt - a.updatedAt);
+ break;
+ case "oldest":
+ sorted.sort((a, b) => a.updatedAt - b.updatedAt);
+ break;
+ case "tokens":
+ sorted.sort((a, b) => b.totalTokens.total - a.totalTokens.total);
+ break;
+ }
+
+ return sorted;
+ }, [conversations, searchQuery, sortBy]);
+
+ // Handle opening a conversation
+ const handleOpen = (conversation: LLMConversation) => {
+ addWindow("llm-chat", {
+ conversationId: conversation.id,
+ });
+ };
+
+ // Handle deleting a conversation
+ const handleDelete = async (conversation: LLMConversation) => {
+ if (
+ confirm(
+ `Delete conversation "${conversation.title}"? This cannot be undone.`,
+ )
+ ) {
+ await db.llmConversations.delete(conversation.id);
+ }
+ };
+
+ if (!conversations) {
+ return (
+
+ Loading conversations...
+
+ );
+ }
+
+ return (
+
+ {/* Header with search and sort */}
+
+
+ setSearchQuery(e.target.value)}
+ className="flex-1"
+ />
+
+
+
+ {filteredConversations.length} conversation
+ {filteredConversations.length !== 1 ? "s" : ""}
+
+
+
+ {/* Conversation list */}
+
+ {filteredConversations.length === 0 ? (
+
+ {searchQuery ? (
+ <>
+
No conversations found matching "{searchQuery}"
+
+ >
+ ) : (
+ <>
+
+
No conversations yet
+
+ Use llm{" "}
+ command to start
+
+ >
+ )}
+
+ ) : (
+
+ {filteredConversations.map((conversation) => (
+
handleOpen(conversation)}
+ >
+
+
+
+ {conversation.title}
+
+
+
+
+
+
+
+ {conversation.messages.length} message
+ {conversation.messages.length !== 1 ? "s" : ""} •{" "}
+ {conversation.totalTokens.total.toLocaleString()} tokens
+ {conversation.totalCost > 0 && (
+ <> • ${conversation.totalCost.toFixed(4)}>
+ )}
+
+
+
+ Updated
+
+
+ {/* Preview of last message */}
+ {conversation.messages.length > 0 && (
+
+ {conversation.messages[
+ conversation.messages.length - 1
+ ].content.slice(0, 100)}
+ ...
+
+ )}
+
+
+ ))}
+
+ )}
+
+
+ );
+}
diff --git a/src/components/WindowRenderer.tsx b/src/components/WindowRenderer.tsx
index 8968ab0..963e36b 100644
--- a/src/components/WindowRenderer.tsx
+++ b/src/components/WindowRenderer.tsx
@@ -33,6 +33,11 @@ const ChatViewer = lazy(() =>
const LLMChatViewer = lazy(() =>
import("./LLMChatViewer").then((m) => ({ default: m.LLMChatViewer })),
);
+const LLMConversationListViewer = lazy(() =>
+ import("./LLMConversationListViewer").then((m) => ({
+ default: m.LLMConversationListViewer,
+ })),
+);
const SpellsViewer = lazy(() =>
import("./SpellsViewer").then((m) => ({ default: m.SpellsViewer })),
);
@@ -192,6 +197,9 @@ export function WindowRenderer({ window, onClose }: WindowRendererProps) {
/>
);
break;
+ case "llm-list":
+ content = ;
+ break;
case "spells":
content = ;
break;
diff --git a/src/components/llm/ConfigPanel.tsx b/src/components/llm/ConfigPanel.tsx
index 391a9da..85e61b1 100644
--- a/src/components/llm/ConfigPanel.tsx
+++ b/src/components/llm/ConfigPanel.tsx
@@ -4,7 +4,7 @@
*/
import { useState, useEffect } from "react";
-import { getProvider } from "@/lib/llm/providers/registry";
+import { getProvider, getAllProviders } from "@/lib/llm/providers/registry";
import type { LLMConfig, ModelInfo } from "@/types/llm";
import { Button } from "@/components/ui/button";
import { Input } from "@/components/ui/input";
@@ -68,6 +68,7 @@ export function ConfigPanel({ config, onChange, onClear }: ConfigPanelProps) {
}
const currentModel = availableModels.find((m) => m.id === config.model);
+ const availableProviders = getAllProviders();
return (
@@ -75,9 +76,34 @@ export function ConfigPanel({ config, onChange, onClear }: ConfigPanelProps) {
Configuration
+ {/* Provider Selection */}
+
+
+
+
+
{/* API Key */}
-
+
+ {/* Base URL (Optional) */}
+
+
+
+ onChange({
+ ...config,
+ provider: { ...config.provider, baseUrl: e.target.value },
+ })
+ }
+ placeholder="https://api.openai.com/v1"
+ className="mt-1 font-mono text-xs"
+ />
+
+ For custom endpoints (OpenRouter, local servers, proxies)
+
+
+
{/* Model Selection */}
diff --git a/src/lib/llm-parser.ts b/src/lib/llm-parser.ts
index 22c3880..728db05 100644
--- a/src/lib/llm-parser.ts
+++ b/src/lib/llm-parser.ts
@@ -1,6 +1,6 @@
/**
* Parser for the llm command
- * Syntax: llm [conversation-id]
+ * Supports: llm, llm list, llm open , llm
*/
import type { LLMCommandResult } from "@/types/llm";
@@ -11,7 +11,27 @@ export function parseLLMCommand(args: string[]): LLMCommandResult {
return {};
}
- // If first arg looks like a UUID, treat as conversation ID
+ // Handle subcommands
+ const subcommand = args[0].toLowerCase();
+
+ if (subcommand === "list") {
+ // Open conversation list viewer
+ return { showList: true };
+ }
+
+ if (subcommand === "open") {
+ // llm open
+ if (args.length < 2) {
+ throw new Error("Usage: llm open ");
+ }
+ const conversationId = args[1];
+ if (!conversationId.match(/^[a-f0-9-]{36}$/i)) {
+ throw new Error(`Invalid conversation ID: ${conversationId}`);
+ }
+ return { conversationId };
+ }
+
+ // If first arg looks like a UUID, treat as conversation ID (shorthand for "open")
if (args[0].match(/^[a-f0-9-]{36}$/i)) {
return { conversationId: args[0] };
}
@@ -19,6 +39,8 @@ export function parseLLMCommand(args: string[]): LLMCommandResult {
throw new Error(
`Invalid LLM command. Usage:
llm # Start new conversation
- llm # Resume existing conversation`,
+ llm list # Browse all conversations
+ llm open # Open specific conversation
+ llm # Open specific conversation (shorthand)`,
);
}
diff --git a/src/types/app.ts b/src/types/app.ts
index 2734999..56ab058 100644
--- a/src/types/app.ts
+++ b/src/types/app.ts
@@ -18,6 +18,7 @@ export type AppId =
| "conn"
| "chat"
| "llm-chat"
+ | "llm-list"
| "spells"
| "spellbooks"
| "win";
diff --git a/src/types/llm.ts b/src/types/llm.ts
index c536a3f..3b39f52 100644
--- a/src/types/llm.ts
+++ b/src/types/llm.ts
@@ -69,4 +69,5 @@ export interface LLMCommandResult {
conversationId?: string;
provider?: LLMProvider;
model?: string;
+ showList?: boolean; // Show conversation list viewer
}
diff --git a/src/types/man.ts b/src/types/man.ts
index 771ef84..b76f673 100644
--- a/src/types/man.ts
+++ b/src/types/man.ts
@@ -378,24 +378,39 @@ export const manPages: Record = {
llm: {
name: "llm",
section: "1",
- synopsis: "llm [conversation-id]",
+ synopsis: "llm [list|open |]",
description:
- "Chat with AI language models (OpenAI GPT, etc.). Start a new conversation or resume an existing one. Configure your API key and model settings via the configuration panel. Messages are streamed in real-time with token usage and cost tracking.",
+ "Chat with AI language models (OpenAI GPT, etc.). Start a new conversation, browse existing conversations, or resume a specific one. Configure your API key, model, and other settings via the configuration panel. Messages are streamed in real-time with token usage and cost tracking.",
options: [
{
- flag: "[conversation-id]",
- description: "Optional conversation ID to resume (UUID format)",
+ flag: "list",
+ description: "Browse all conversations with search and sort",
+ },
+ {
+ flag: "open ",
+ description: "Open specific conversation by ID",
+ },
+ {
+ flag: "",
+ description: "Open conversation by ID (shorthand for 'open')",
},
],
examples: [
"llm Start a new conversation",
- "llm abc123-def456-... Resume existing conversation",
+ "llm list Browse all conversations",
+ "llm open abc123-... Open specific conversation",
+ "llm abc123-def456-... Open conversation (shorthand)",
],
seeAlso: ["chat"],
- appId: "llm-chat",
+ appId: "llm-chat", // Default, overridden by argParser
category: "System",
argParser: async (args: string[]) => {
- return parseLLMCommand(args);
+ const result = parseLLMCommand(args);
+ // Override appId if showing list
+ if (result.showList) {
+ return { appId: "llm-list" as const };
+ }
+ return result;
},
},
profile: {