feat(bot): Add test-assistant script for testing LLM in isolation

Adds npm run assistant "question" command to test the LLM processing
without needing the Nostr connection.

Also:
- Add proper API key validation with clear error message
- Add debug logging (DEBUG=1) for troubleshooting

Usage:
  export ANTHROPIC_API_KEY=sk-ant-...
  npm run assistant "how do I see what my contacts are zapping"

https://claude.ai/code/session_01X4HWkMGrghBv2RfY89L5Lz
This commit is contained in:
Claude
2026-02-03 12:33:19 +00:00
parent 5e806d08e8
commit 0584d669b8
3 changed files with 70 additions and 2 deletions

View File

@@ -7,7 +7,8 @@
"scripts": {
"build": "tsc",
"start": "node dist/index.js",
"dev": "tsx src/index.ts"
"dev": "tsx src/index.ts",
"assistant": "tsx src/test-assistant.ts"
},
"dependencies": {
"@mariozechner/pi-ai": "^0.51.2",

View File

@@ -25,6 +25,16 @@ import { getNipInfo, searchNips } from "./data/nips.js";
const PROVIDER = process.env.LLM_PROVIDER || "anthropic";
const MODEL_ID = process.env.LLM_MODEL || "claude-3-5-haiku-20241022";
// API key from environment
const API_KEY = process.env.ANTHROPIC_API_KEY;
if (!API_KEY) {
console.error("ERROR: ANTHROPIC_API_KEY environment variable is not set.");
console.error("Please set it before running the bot:");
console.error(" export ANTHROPIC_API_KEY=sk-ant-...");
process.exit(1);
}
// Use default model for simplicity (typed correctly)
const model = getModel("anthropic", "claude-3-5-haiku-20241022");
@@ -247,7 +257,14 @@ export async function processMessage(userMessage: string): Promise<string> {
while (iterations < maxIterations) {
iterations++;
const response = await complete(model, context);
const response = await complete(model, context, { apiKey: API_KEY });
// Debug: log full response if DEBUG is set
if (process.env.DEBUG) {
console.error(`DEBUG: Iteration ${iterations}`);
console.error("DEBUG: response =", JSON.stringify(response, null, 2));
}
context.messages.push(response);
// Check for tool calls
@@ -257,6 +274,16 @@ export async function processMessage(userMessage: string): Promise<string> {
// No tool calls, extract text response
const textBlocks = response.content.filter((b) => b.type === "text");
const textContent = textBlocks.map((b) => (b as any).text).join("\n");
// Debug: log response structure if empty
if (!textContent && process.env.DEBUG) {
console.error("DEBUG: Empty text response");
console.error(
"DEBUG: response.content =",
JSON.stringify(response.content, null, 2),
);
}
return (
textContent ||
"I couldn't generate a response. Please try rephrasing your question."

40
bot/src/test-assistant.ts Normal file
View File

@@ -0,0 +1,40 @@
/**
* Test script for the Grimoire REQ Assistant
*
* Usage: npm run assistant "your question here"
*/
import { processMessage } from "./llm.js";
async function main() {
const question = process.argv.slice(2).join(" ");
if (!question) {
console.error('Usage: npm run assistant "your question here"');
console.error("");
console.error("Examples:");
console.error(
' npm run assistant "how do I see what my contacts are zapping"',
);
console.error(' npm run assistant "find all articles about bitcoin"');
console.error(' npm run assistant "what kind is used for reactions"');
process.exit(1);
}
console.log("Question:", question);
console.log("");
console.log("Processing...");
console.log("");
try {
const response = await processMessage(question);
console.log("Response:");
console.log("=========");
console.log(response);
} catch (error) {
console.error("Error:", error);
process.exit(1);
}
}
main();