add mcpbridge server

This commit is contained in:
dbth 2025-02-09 21:05:47 +01:00
parent de58d17f73
commit 0c5f8da296
45 changed files with 2584 additions and 36 deletions

43
.idea/dataSources.xml generated
View File

@ -9,7 +9,7 @@
<working-dir>$ProjectFileDir$</working-dir>
</data-source>
<data-source source="LOCAL" name="bot" uuid="35aa282b-8394-415f-9bbb-b649db25cd4d">
<driver-ref>038acd88-11f0-4c07-ae7d-36e6f74bad7c</driver-ref>
<driver-ref>sqlite.xerial</driver-ref>
<synchronize>true</synchronize>
<jdbc-driver>org.sqlite.JDBC</jdbc-driver>
<jdbc-url>jdbc:sqlite:$PROJECT_DIR$/db/bot.db</jdbc-url>
@ -21,7 +21,7 @@
</libraries>
</data-source>
<data-source source="LOCAL" name="subscriptions" uuid="7914fe2c-114f-4e86-8ddb-7883b17e9302">
<driver-ref>038acd88-11f0-4c07-ae7d-36e6f74bad7c</driver-ref>
<driver-ref>sqlite.xerial</driver-ref>
<synchronize>true</synchronize>
<jdbc-driver>org.sqlite.JDBC</jdbc-driver>
<jdbc-url>jdbc:sqlite:$PROJECT_DIR$/db/subscriptions.db</jdbc-url>
@ -33,7 +33,7 @@
</libraries>
</data-source>
<data-source source="LOCAL" name="Translator" uuid="7e65ee79-fe23-4823-bae3-244dbefdd7f2">
<driver-ref>038acd88-11f0-4c07-ae7d-36e6f74bad7c</driver-ref>
<driver-ref>sqlite.xerial</driver-ref>
<synchronize>true</synchronize>
<jdbc-driver>org.sqlite.JDBC</jdbc-driver>
<jdbc-url>jdbc:sqlite:$PROJECT_DIR$/db/Translator.db</jdbc-url>
@ -45,7 +45,7 @@
</libraries>
</data-source>
<data-source source="LOCAL" name="Unstable Diffusion" uuid="ef14cba4-1991-4706-8760-77dba3d8e8e4">
<driver-ref>038acd88-11f0-4c07-ae7d-36e6f74bad7c</driver-ref>
<driver-ref>sqlite.xerial</driver-ref>
<synchronize>true</synchronize>
<jdbc-driver>org.sqlite.JDBC</jdbc-driver>
<jdbc-url>jdbc:sqlite:$PROJECT_DIR$/db/Unstable Diffusion.db</jdbc-url>
@ -57,7 +57,7 @@
</libraries>
</data-source>
<data-source source="LOCAL" name="Bot" uuid="80b7a7dc-a233-4d7b-bd0e-e1d0bc7226e8">
<driver-ref>038acd88-11f0-4c07-ae7d-36e6f74bad7c</driver-ref>
<driver-ref>sqlite.xerial</driver-ref>
<synchronize>true</synchronize>
<jdbc-driver>org.sqlite.JDBC</jdbc-driver>
<jdbc-url>jdbc:sqlite:$PROJECT_DIR$/db/nostr_mostr.db</jdbc-url>
@ -69,7 +69,7 @@
</libraries>
</data-source>
<data-source source="LOCAL" name="Profiles" uuid="77eda71f-1c66-4b3d-bc34-dfe34fb45fc2">
<driver-ref>038acd88-11f0-4c07-ae7d-36e6f74bad7c</driver-ref>
<driver-ref>sqlite.xerial</driver-ref>
<synchronize>true</synchronize>
<jdbc-driver>org.sqlite.JDBC</jdbc-driver>
<jdbc-url>jdbc:sqlite:$PROJECT_DIR$/db/nostr_profiles.db</jdbc-url>
@ -102,7 +102,7 @@
<working-dir>$ProjectFileDir$</working-dir>
</data-source>
<data-source source="LOCAL" name="subscriptions" uuid="ccd96349-b12f-47d5-8caf-c0c8c359d831">
<driver-ref>038acd88-11f0-4c07-ae7d-36e6f74bad7c</driver-ref>
<driver-ref>sqlite.xerial</driver-ref>
<synchronize>true</synchronize>
<jdbc-driver>org.sqlite.JDBC</jdbc-driver>
<jdbc-url>jdbc:sqlite:$PROJECT_DIR$/db/subscriptions</jdbc-url>
@ -127,5 +127,34 @@
<jdbc-url>jdbc:sqlite:identifier.sqlite</jdbc-url>
<working-dir>$ProjectFileDir$</working-dir>
</data-source>
<data-source source="LOCAL" name="Popular from npubs you don't follow.db" uuid="ee98abb4-9b19-4cef-9cd9-ca495fb24db9">
<driver-ref>sqlite.xerial</driver-ref>
<synchronize>true</synchronize>
<jdbc-driver>org.sqlite.JDBC</jdbc-driver>
<jdbc-url>jdbc:sqlite:$PROJECT_DIR$/db/Popular from npubs you don't follow.db</jdbc-url>
<working-dir>$ProjectFileDir$</working-dir>
</data-source>
<data-source source="LOCAL" name="Popular from npubs you don't follow.db [2]" uuid="d6be93f5-15d2-44f7-acc4-e3c443a4280a">
<driver-ref>sqlite.xerial</driver-ref>
<synchronize>true</synchronize>
<jdbc-driver>org.sqlite.JDBC</jdbc-driver>
<jdbc-url>jdbc:sqlite:$PROJECT_DIR$/db/Popular from npubs you don't follow.db</jdbc-url>
<working-dir>$ProjectFileDir$</working-dir>
</data-source>
<data-source source="LOCAL" name="Popular from npubs you follow" uuid="48ca5252-5f38-4a1f-87fb-97adfc613215">
<driver-ref>c24e9046-eb72-47b7-bbb1-d45c84d4c415</driver-ref>
<synchronize>true</synchronize>
<jdbc-driver>org.sqlite.JDBC</jdbc-driver>
<jdbc-url>jdbc:sqlite:$PROJECT_DIR$/db/Popular from npubs you follow.db</jdbc-url>
<working-dir>$ProjectFileDir$</working-dir>
<libraries>
<library>
<url>file://$APPLICATION_CONFIG_DIR$/jdbc-drivers/Xerial SQLiteJDBC/3.45.1/org/xerial/sqlite-jdbc/3.45.1.0/sqlite-jdbc-3.45.1.0.jar</url>
</library>
<library>
<url>file://$APPLICATION_CONFIG_DIR$/jdbc-drivers/Xerial SQLiteJDBC/3.45.1/org/slf4j/slf4j-api/1.7.36/slf4j-api-1.7.36.jar</url>
</library>
</libraries>
</data-source>
</component>
</project>

2
.idea/dvm.iml generated
View File

@ -14,7 +14,7 @@
<excludeFolder url="file://$MODULE_DIR$/p1234" />
<excludeFolder url="file://$MODULE_DIR$/p23" />
</content>
<orderEntry type="jdk" jdkName="Python 3.12 (dvm) (2)" jdkType="Python SDK" />
<orderEntry type="jdk" jdkName="Python 3.12 virtualenv at ~/Documents/GitHub/nostrdvm/final" jdkType="Python SDK" />
<orderEntry type="sourceFolder" forTests="false" />
</component>
</module>

2
.idea/misc.xml generated
View File

@ -3,5 +3,5 @@
<component name="Black">
<option name="sdkName" value="Python 3.10 (dvm)" />
</component>
<component name="ProjectRootManager" version="2" project-jdk-name="Python 3.12 (dvm) (2)" project-jdk-type="Python SDK" />
<component name="ProjectRootManager" version="2" project-jdk-name="Python 3.12 virtualenv at ~/Documents/GitHub/nostrdvm/final" project-jdk-type="Python SDK" />
</project>

View File

@ -0,0 +1,379 @@
# src/__main__.py
import argparse
import asyncio
import json
import logging
import os
import signal
import sys
from typing import List
import anyio
# Rich imports
from rich import print
from rich.markdown import Markdown
from rich.panel import Panel
from chat_handler import handle_chat_mode, get_input
from config import load_config
from messages.send_ping import send_ping
from messages.send_prompts import send_prompts_list
from messages.send_resources import send_resources_list
from messages.send_initialize_message import send_initialize
from messages.send_call_tool import send_call_tool
from messages.send_tools_list import send_tools_list
from transport.stdio.stdio_client import stdio_client
# Default path for the configuration file
DEFAULT_CONFIG_FILE = "server_config.json"
# Configure logging
logging.basicConfig(
level=logging.CRITICAL,
format="%(asctime)s - %(levelname)s - %(message)s",
stream=sys.stderr,
)
def signal_handler(sig, frame):
# Ignore subsequent SIGINT signals
signal.signal(signal.SIGINT, signal.SIG_IGN)
# pretty exit
print("\n[bold red]Goodbye![/bold red]")
# Immediately and forcibly kill the process
os.kill(os.getpid(), signal.SIGKILL)
# signal handler
signal.signal(signal.SIGINT, signal_handler)
async def handle_command(command: str, server_streams: List[tuple]) -> bool:
"""Handle specific commands dynamically with multiple servers."""
try:
if command == "ping":
print("[cyan]\nPinging Servers...[/cyan]")
for i, (read_stream, write_stream) in enumerate(server_streams):
result = await send_ping(read_stream, write_stream)
server_num = i + 1
if result:
ping_md = f"## Server {server_num} Ping Result\n\n✅ **Server is up and running**"
print(Panel(Markdown(ping_md), style="bold green"))
else:
ping_md = f"## Server {server_num} Ping Result\n\n❌ **Server ping failed**"
print(Panel(Markdown(ping_md), style="bold red"))
elif command == "list-tools":
print("[cyan]\nFetching Tools List from all servers...[/cyan]")
for i, (read_stream, write_stream) in enumerate(server_streams):
response = await send_tools_list(read_stream, write_stream)
tools_list = response.get("tools", [])
server_num = i + 1
if not tools_list:
tools_md = (
f"## Server {server_num} Tools List\n\nNo tools available."
)
else:
tools_md = f"## Server {server_num} Tools List\n\n" + "\n".join(
[
f"- **{t.get('name')}**: {t.get('description', 'No description')}"
for t in tools_list
]
)
print(
Panel(
Markdown(tools_md),
title=f"Server {server_num} Tools",
style="bold cyan",
)
)
elif command == "call-tool":
tool_name = await get_input("[bold magenta]Enter tool name[/bold magenta]")
if not tool_name:
print("[red]Tool name cannot be empty.[/red]")
return True
arguments_str = await get_input("[bold magenta]Enter tool arguments as JSON (e.g., {'key': 'value'})[/bold magenta]")
try:
arguments = json.loads(arguments_str)
except json.JSONDecodeError as e:
print(f"[red]Invalid JSON arguments format:[/red] {e}")
return True
print(f"[cyan]\nCalling tool '{tool_name}' with arguments:\n[/cyan]")
print(
Panel(
Markdown(f"```json\n{json.dumps(arguments, indent=2)}\n```"),
style="dim",
)
)
for read_stream, write_stream in server_streams:
result = await send_call_tool(tool_name, arguments, read_stream, write_stream)
if result.get("isError"):
# print(f"[red]Error calling tool:[/red] {result.get('error')}")
continue
response_content = result.get("content", "No content")
try:
if response_content[0]['text'].startswith('Error:'):
continue
except:
pass
print(
Panel(
Markdown(f"### Tool Response\n\n{response_content}"),
style="green",
)
)
elif command == "list-resources":
print("[cyan]\nFetching Resources List from all servers...[/cyan]")
for i, (read_stream, write_stream) in enumerate(server_streams):
response = await send_resources_list(read_stream, write_stream)
resources_list = response.get("resources", []) if response else None
server_num = i + 1
if not resources_list:
resources_md = f"## Server {server_num} Resources List\n\nNo resources available."
else:
resources_md = f"## Server {server_num} Resources List\n"
for r in resources_list:
if isinstance(r, dict):
json_str = json.dumps(r, indent=2)
resources_md += f"\n```json\n{json_str}\n```"
else:
resources_md += f"\n- {r}"
print(
Panel(
Markdown(resources_md),
title=f"Server {server_num} Resources",
style="bold cyan",
)
)
elif command == "list-prompts":
print("[cyan]\nFetching Prompts List from all servers...[/cyan]")
for i, (read_stream, write_stream) in enumerate(server_streams):
response = await send_prompts_list(read_stream, write_stream)
prompts_list = response.get("prompts", []) if response else None
server_num = i + 1
if not prompts_list:
prompts_md = (
f"## Server {server_num} Prompts List\n\nNo prompts available."
)
else:
prompts_md = f"## Server {server_num} Prompts List\n\n" + "\n".join(
[f"- {p}" for p in prompts_list]
)
print(
Panel(
Markdown(prompts_md),
title=f"Server {server_num} Prompts",
style="bold cyan",
)
)
elif command == "chat":
provider = os.getenv("LLM_PROVIDER", "openai")
model = os.getenv("LLM_MODEL", "gpt-4o-mini")
# Clear the screen first
if sys.platform == "win32":
os.system("cls")
else:
os.system("clear")
chat_info_text = (
"Welcome to the Chat!\n\n"
f"**Provider:** {provider} | **Model:** {model}\n\n"
"Type 'exit' to quit."
)
print(
Panel(
Markdown(chat_info_text),
style="bold cyan",
title="Chat Mode",
title_align="center",
)
)
await handle_chat_mode(server_streams, provider, model)
elif command in ["quit", "exit"]:
print("\n[bold red]Goodbye![/bold red]")
return False
elif command == "clear":
if sys.platform == "win32":
os.system("cls")
else:
os.system("clear")
elif command == "help":
help_md = """
# Available Commands
- **ping**: Check if server is responsive
- **list-tools**: Display available tools
- **list-resources**: Display available resources
- **list-prompts**: Display available prompts
- **chat**: Enter chat mode
- **clear**: Clear the screen
- **help**: Show this help message
- **quit/exit**: Exit the program
**Note:** Commands use dashes (e.g., `list-tools` not `list tools`).
"""
print(Panel(Markdown(help_md), style="yellow"))
else:
print(f"[red]\nUnknown command: {command}[/red]")
print("[yellow]Type 'help' for available commands[/yellow]")
except Exception as e:
print(f"\n[red]Error executing command:[/red] {e}")
return True
async def interactive_mode(server_streams: List[tuple]):
"""Run the CLI in interactive mode with multiple servers."""
welcome_text = """
# Welcome to the Interactive MCP Command-Line Tool (Multi-Server Mode)
Type 'help' for available commands or 'quit' to exit.
"""
print(Panel(Markdown(welcome_text), style="bold cyan"))
while True:
try:
command = await get_input("[bold green]\n>[/bold green]")
command = command.lower()
if not command:
continue
should_continue = await handle_command(command, server_streams)
if not should_continue:
return
except EOFError:
break
except Exception as e:
print(f"\n[red]Error:[/red] {e}")
class GracefulExit(Exception):
"""Custom exception for handling graceful exits."""
pass
async def run(config_path: str, server_names: List[str], command: str = None) -> None:
"""Main function to manage server initialization, communication, and shutdown."""
# Clear screen before rendering anything
if sys.platform == "win32":
os.system("cls")
else:
os.system("clear")
# Load server configurations and establish connections for all servers
server_streams = []
context_managers = []
for server_name in server_names:
server_params = await load_config(config_path, server_name)
# Establish stdio communication for each server
cm = stdio_client(server_params)
(read_stream, write_stream) = await cm.__aenter__()
context_managers.append(cm)
server_streams.append((read_stream, write_stream))
init_result = await send_initialize(read_stream, write_stream)
if not init_result:
print(f"[red]Server initialization failed for {server_name}[/red]")
return
try:
if command:
# Single command mode
await handle_command(command, server_streams)
else:
# Interactive mode
await interactive_mode(server_streams)
finally:
# Clean up all streams
for cm in context_managers:
with anyio.move_on_after(1): # wait up to 1 second
await cm.__aexit__()
def cli_main():
# setup the parser
parser = argparse.ArgumentParser(description="MCP Command-Line Tool")
parser.add_argument(
"--config-file",
default=DEFAULT_CONFIG_FILE,
help="Path to the JSON configuration file containing server details.",
)
parser.add_argument(
"--server",
action="append",
dest="servers",
help="Server configuration(s) to use. Can be specified multiple times.",
default=[],
)
parser.add_argument(
"--all",
action="store_true",
dest="all",
default=False
)
parser.add_argument(
"command",
nargs="?",
choices=["ping", "list-tools", "list-resources", "list-prompts"],
help="Command to execute (optional - if not provided, enters interactive mode).",
)
parser.add_argument(
"--provider",
choices=["openai", "anthropic", "ollama"],
default="openai",
help="LLM provider to use. Defaults to 'openai'.",
)
parser.add_argument(
"--model",
help=("Model to use. Defaults to 'gpt-4o-mini' for openai, 'claude-3-5-haiku-latest' for anthropic and 'qwen2.5-coder' for ollama"),
)
args = parser.parse_args()
# Set default model based on provider
model = args.model or (
"gpt-4o-mini" if args.provider == "openai"
else "claude-3-5-haiku-latest" if args.provider == "anthropic"
else "qwen2.5-coder"
)
os.environ["LLM_PROVIDER"] = args.provider
os.environ["LLM_MODEL"] = model
try:
if args.all:
with open(args.config_file,'r') as f:
args.servers = list(json.load(f)['mcpServers'].keys())
result = anyio.run(run, args.config_file, args.servers, args.command)
sys.exit(result)
except Exception as e:
print(f"[red]Error occurred:[/red] {e}")
sys.exit(1)
if __name__ == "__main__":
cli_main()

View File

@ -0,0 +1,180 @@
# chat_handler.py
import json
import asyncio
from rich import print
from rich.markdown import Markdown
from rich.panel import Panel
from rich.prompt import Prompt
from llm_client import LLMClient
from system_prompt_generator import SystemPromptGenerator
from tools_handler import convert_to_openai_tools, fetch_tools, handle_tool_call
async def get_input(prompt: str):
"""Get input asynchronously."""
loop = asyncio.get_event_loop()
return await loop.run_in_executor(None, lambda: Prompt.ask(prompt).strip())
async def handle_chat_mode(server_streams, provider="openai", model="gpt-4o-mini"):
"""Enter chat mode with multi-call support for autonomous tool chaining."""
try:
tools = []
for read_stream, write_stream in server_streams:
tools.extend(await fetch_tools(read_stream, write_stream))
# for (read_stream, write_stream) in server_streams:
# tools = await fetch_tools(read_stream, write_stream)
if not tools:
print("[red]No tools available. Exiting chat mode.[/red]")
return
system_prompt = generate_system_prompt(tools)
openai_tools = convert_to_openai_tools(tools)
client = LLMClient(provider=provider, model=model)
conversation_history = [{"role": "system", "content": system_prompt}]
while True:
try:
# Change prompt to yellow
user_message = await get_input("[bold yellow]>[/bold yellow]")
if user_message.lower() in ["exit", "quit"]:
print(Panel("Exiting chat mode.", style="bold red"))
break
# User panel in bold yellow
user_panel_text = user_message if user_message else "[No Message]"
print(Panel(user_panel_text, style="bold yellow", title="You"))
conversation_history.append({"role": "user", "content": user_message})
await process_conversation(
client, conversation_history, openai_tools, server_streams
)
except Exception as e:
print(f"[red]Error processing message:[/red] {e}")
continue
except Exception as e:
print(f"[red]Error in chat mode:[/red] {e}")
async def process_conversation(
client, conversation_history, openai_tools, server_streams
):
"""Process the conversation loop, handling tool calls and responses."""
while True:
completion = client.create_completion(
messages=conversation_history,
tools=openai_tools,
)
response_content = completion.get("response", "No response")
tool_calls = completion.get("tool_calls", [])
if tool_calls:
for tool_call in tool_calls:
# Extract tool_name and raw_arguments as before
if hasattr(tool_call, "function"):
tool_name = getattr(tool_call.function, "name", "unknown tool")
raw_arguments = getattr(tool_call.function, "arguments", {})
elif isinstance(tool_call, dict) and "function" in tool_call:
fn_info = tool_call["function"]
tool_name = fn_info.get("name", "unknown tool")
raw_arguments = fn_info.get("arguments", {})
else:
tool_name = "unknown tool"
raw_arguments = {}
# If raw_arguments is a string, try to parse it as JSON
if isinstance(raw_arguments, str):
try:
raw_arguments = json.loads(raw_arguments)
except json.JSONDecodeError:
# If it's not valid JSON, just display as is
pass
# Now raw_arguments should be a dict or something we can pretty-print as JSON
tool_args_str = json.dumps(raw_arguments, indent=2)
tool_md = f"**Tool Call:** {tool_name}\n\n```json\n{tool_args_str}\n```"
print(
Panel(
Markdown(tool_md), style="bold magenta", title="Tool Invocation"
)
)
await handle_tool_call(tool_call, conversation_history, server_streams)
continue
# Assistant panel with Markdown
assistant_panel_text = response_content if response_content else "[No Response]"
print(
Panel(Markdown(assistant_panel_text), style="bold blue", title="Assistant")
)
conversation_history.append({"role": "assistant", "content": response_content})
break
def generate_system_prompt(tools):
"""
Generate a concise system prompt for the assistant.
This prompt is internal and not displayed to the user.
"""
prompt_generator = SystemPromptGenerator()
tools_json = {"tools": tools}
system_prompt = prompt_generator.generate_prompt(tools_json)
system_prompt += """
**GENERAL GUIDELINES:**
1. Step-by-step reasoning:
- Analyze tasks systematically.
- Break down complex problems into smaller, manageable parts.
- Verify assumptions at each step to avoid errors.
- Reflect on results to improve subsequent actions.
2. Effective tool usage:
- Explore:
- Identify available information and verify its structure.
- Check assumptions and understand data relationships.
- Iterate:
- Start with simple queries or actions.
- Build upon successes, adjusting based on observations.
- Handle errors:
- Carefully analyze error messages.
- Use errors as a guide to refine your approach.
- Document what went wrong and suggest fixes.
3. Clear communication:
- Explain your reasoning and decisions at each step.
- Share discoveries transparently with the user.
- Outline next steps or ask clarifying questions as needed.
EXAMPLES OF BEST PRACTICES:
- Working with databases:
- Check schema before writing queries.
- Verify the existence of columns or tables.
- Start with basic queries and refine based on results.
- Processing data:
- Validate data formats and handle edge cases.
- Ensure integrity and correctness of results.
- Accessing resources:
- Confirm resource availability and permissions.
- Handle missing or incomplete data gracefully.
REMEMBER:
- Be thorough and systematic.
- Each tool call should have a clear and well-explained purpose.
- Make reasonable assumptions if ambiguous.
- Minimize unnecessary user interactions by providing actionable insights.
EXAMPLES OF ASSUMPTIONS:
- Default sorting (e.g., descending order) if not specified.
- Assume basic user intentions, such as fetching top results by a common metric.
"""
return system_prompt

View File

@ -0,0 +1,53 @@
# config.py
import json
import logging
from nostr_dvm.backends.mcp.transport.stdio.stdio_server_parameters import StdioServerParameters
async def load_config(config_path: str, server_name: str) -> StdioServerParameters:
"""Load the server configuration from a JSON file."""
try:
# debug
logging.debug(f"Loading config from {config_path}")
# Read the configuration file
with open(config_path, "r") as config_file:
config = json.load(config_file)
# Retrieve the server configuration
server_config = config.get("mcpServers", {}).get(server_name)
if not server_config:
error_msg = f"Server '{server_name}' not found in configuration file."
logging.error(error_msg)
raise ValueError(error_msg)
# Construct the server parameters
result = StdioServerParameters(
command=server_config["command"],
args=server_config.get("args", []),
env=server_config.get("env"),
)
# debug
logging.debug(
f"Loaded config: command='{result.command}', args={result.args}, env={result.env}"
)
# return result
return result
except FileNotFoundError:
# error
error_msg = f"Configuration file not found: {config_path}"
logging.error(error_msg)
raise FileNotFoundError(error_msg)
except json.JSONDecodeError as e:
# json error
error_msg = f"Invalid JSON in configuration file: {e.msg}"
logging.error(error_msg)
raise json.JSONDecodeError(error_msg, e.doc, e.pos)
except ValueError as e:
# error
logging.error(str(e))
raise

View File

@ -0,0 +1,49 @@
import anyio
import logging
import sys
from config import load_config
from messages.send_initialize_message import send_initialize
from messages.send_ping import send_ping
from messages.send_tools_list import send_tools_list
from transport.stdio.stdio_client import stdio_client
# Configure logging
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s - %(levelname)s - %(message)s',
stream=sys.stderr
)
async def main():
"""Stripped-down script to initialize the server and send a ping."""
# Configuration values
config_path = "server_config.json"
server_name = "sqlite"
# Load server configuration
server_params = await load_config(config_path, server_name)
# Establish stdio communication
async with stdio_client(server_params) as (read_stream, write_stream):
# Initialize the server
init_result = await send_initialize(read_stream, write_stream)
# check we got a result
if not init_result:
print("Server initialization failed")
return
# connected
print(f"We're connected!!!")
# Send a ping
result = await send_ping(read_stream, write_stream)
print("Ping successful" if result else "Ping failed")
# get tools
result = await send_tools_list(read_stream, write_stream)
print(result)
# Run the script
if __name__ == "__main__":
anyio.run(main)

View File

@ -0,0 +1,38 @@
# environment.py
import os
import sys
# Default environment variables to inherit
DEFAULT_INHERITED_ENV_VARS = (
["HOME", "LOGNAME", "PATH", "SHELL", "TERM", "USER"]
if sys.platform != "win32"
else [
"APPDATA",
"HOMEDRIVE",
"HOMEPATH",
"LOCALAPPDATA",
"PATH",
"PROCESSOR_ARCHITECTURE",
"SYSTEMDRIVE",
"SYSTEMROOT",
"TEMP",
"USERNAME",
"USERPROFILE",
]
)
def get_default_environment() -> dict[str, str]:
"""
Retrieve a dictionary of default environment variables to inherit.
"""
# get the current environment
env = {
key: value
for key in DEFAULT_INHERITED_ENV_VARS
if (value := os.environ.get(key)) and not value.startswith("()")
}
# return the dictionary
return env

View File

@ -0,0 +1,232 @@
import logging
import os
import uuid
from typing import Any, Dict, List
import json
import ollama
from dotenv import load_dotenv
from openai import OpenAI
from anthropic import Anthropic
# Load environment variables
load_dotenv()
class LLMClient:
def __init__(self, provider="openai", model="gpt-4o-mini", api_key=None):
# set the provider, model and api key
self.provider = provider
self.model = model
self.api_key = api_key
# ensure we have the api key for openai if set
if provider == "openai":
self.api_key = self.api_key or os.getenv("OPENAI_API_KEY")
if not self.api_key:
raise ValueError("The OPENAI_API_KEY environment variable is not set.")
# check anthropic api key
elif provider == "anthropic":
self.api_key = self.api_key or os.getenv("ANTHROPIC_API_KEY")
if not self.api_key:
raise ValueError("The ANTHROPIC_API_KEY environment variable is not set.")
# check ollama is good
elif provider == "ollama" and not hasattr(ollama, "chat"):
raise ValueError("Ollama is not properly configured in this environment.")
def create_completion(
self, messages: List[Dict], tools: List = None
) -> Dict[str, Any]:
"""Create a chat completion using the specified LLM provider."""
if self.provider == "openai":
# perform an openai completion
return self._openai_completion(messages, tools)
elif self.provider == "anthropic":
# perform an anthropic completion
return self._anthropic_completion(messages, tools)
elif self.provider == "ollama":
# perform an ollama completion
return self._ollama_completion(messages, tools)
else:
# unsupported providers
raise ValueError(f"Unsupported provider: {self.provider}")
def _openai_completion(self, messages: List[Dict], tools: List) -> Dict[str, Any]:
"""Handle OpenAI chat completions."""
# get the openai client
client = OpenAI(api_key=self.api_key)
try:
# make a request, passing in tools
response = client.chat.completions.create(
model=self.model,
messages=messages,
tools=tools or [],
)
# return the response
return {
"response": response.choices[0].message.content,
"tool_calls": getattr(response.choices[0].message, "tool_calls", []),
}
except Exception as e:
# error
logging.error(f"OpenAI API Error: {str(e)}")
raise ValueError(f"OpenAI API Error: {str(e)}")
def _anthropic_completion(self, messages: List[Dict], tools: List) -> Dict[str, Any]:
"""Handle Anthropic chat completions."""
# get the anthropic client
client = Anthropic(api_key=self.api_key)
try:
# format messages for anthropic api
anthropic_messages = []
system_messages = []
for msg in messages:
if msg["role"] == "system":
system_messages.append({
"type": "text",
"text": msg["content"]
})
elif msg["role"] == "tool":
anthropic_messages.append({
"role": "user",
"content": [{
"type": "tool_result",
"tool_use_id": msg["tool_call_id"],
"content": msg["content"]
}]
})
elif msg["role"] == "assistant" and "tool_calls" in msg:
content = []
if msg["content"]:
content.append({
"type": "text",
"content": msg["content"]
})
for tool_call in msg["tool_calls"]:
content.append({
"type": "tool_use",
"id": tool_call["id"],
"name": tool_call["function"]["name"],
"input":(
json.loads(tool_call["function"]["arguments"])
if isinstance(tool_call["function"]["arguments"], str)
else tool_call["function"]["arguments"]
)
})
anthropic_messages.append({
"role": msg["role"],
"content": content
})
else:
anthropic_messages.append({
"role": msg["role"],
"content": [{
"type": "text",
"text": msg["content"]
}]
})
# add prompt caching markers
if len(system_messages) > 0:
system_messages[-1]["cache_control"] = {"type": "ephemeral"}
if len(anthropic_messages) > 0:
anthropic_messages[-1]["content"][-1]["cache_control"] = {"type": "ephemeral"}
if len(anthropic_messages) > 2:
anthropic_messages[-3]["content"][-1]["cache_control"] = {"type": "ephemeral"}
# format tools for anthropic api
if tools:
anthropic_tools = []
for tool in tools:
anthropic_tools.append({
"name": tool["function"]["name"],
"description": tool["function"]["description"],
"input_schema": tool["function"]["parameters"]
})
# add prompt caching marker
if len(anthropic_tools) > 0:
anthropic_tools[-1]["cache_control"] = {"type": "ephemeral"}
else:
anthropic_tools = None
# make a reuest, passing in tools
response = client.messages.create(
model=self.model,
system=system_messages,
tools=anthropic_tools,
messages=anthropic_messages,
max_tokens=8192
)
# format tool calls
tool_calls = []
for block in response.content:
if block.type == "tool_use":
tool_calls.append({
"id": block.id,
"type": "function",
"function": {
"name": block.name,
"arguments": block.input
}
})
# return the response
return {
"response": response.content[0].text if response.content[0].type == "text" else "",
"tool_calls": tool_calls
}
except Exception as e:
# error
raise ValueError(f"Anthropic API Error: {repr(e)}")
def _ollama_completion(self, messages: List[Dict], tools: List) -> Dict[str, Any]:
"""Handle Ollama chat completions."""
# Format messages for Ollama
ollama_messages = [
{"role": msg["role"], "content": msg["content"]} for msg in messages
]
try:
# Make API call with tools
response = ollama.chat(
model=self.model,
messages=ollama_messages,
stream=False,
tools=tools or [],
)
logging.info(f"Ollama raw response: {response}")
# Extract the message and tool calls
message = response.message
tool_calls = []
# Convert Ollama tool calls to OpenAI format
if hasattr(message, "tool_calls") and message.tool_calls:
for tool in message.tool_calls:
tool_calls.append(
{
"id": str(uuid.uuid4()), # Generate unique ID
"type": "function",
"function": {
"name": tool.function.name,
"arguments": tool.function.arguments,
},
}
)
return {
"response": message.content if message else "No response",
"tool_calls": tool_calls,
}
except Exception as e:
# error
logging.error(f"Ollama API Error: {str(e)}")
raise ValueError(f"Ollama API Error: {str(e)}")

View File

@ -0,0 +1,23 @@
# messages/message_types/incrementing_id_message.py
from typing import ClassVar
from nostr_dvm.backends.mcp.messages.message_types.json_rpc_message import JSONRPCMessage
class IncrementingIDMessage(JSONRPCMessage):
counter: ClassVar[int] = 0
@classmethod
def load_counter(cls, value: int):
cls.counter = value
@classmethod
def save_counter(cls) -> int:
return cls.counter
def __init__(self, prefix: str, method: str, start_id: int = None, **kwargs):
if start_id is not None:
type(self).counter = start_id
else:
type(self).counter += 1
message_id = f"{prefix}-{type(self).counter}"
super().__init__(method=method, id=message_id, **kwargs)

View File

@ -0,0 +1,62 @@
from typing import Optional
from pydantic import BaseModel, Field
from nostr_dvm.backends.mcp.messages.message_types.json_rpc_message import JSONRPCMessage
class MCPClientCapabilities(BaseModel):
roots: dict = Field(default_factory=lambda: {"listChanged": True})
sampling: dict = Field(default_factory=dict)
class MCPClientInfo(BaseModel):
name: str = "PythonMCPClient"
version: str = "1.0.0"
class InitializeParams(BaseModel):
protocolVersion: str
capabilities: MCPClientCapabilities
clientInfo: MCPClientInfo
class ServerInfo(BaseModel):
name: str
version: str
class ServerCapabilities(BaseModel):
logging: dict = Field(default_factory=dict)
prompts: Optional[dict] = None
resources: Optional[dict] = None
tools: Optional[dict] = None
class InitializeResult(BaseModel):
protocolVersion: str
capabilities: ServerCapabilities
serverInfo: ServerInfo
class InitializeMessage(JSONRPCMessage):
"""
A JSON-RPC 'initialize' message with default id and method.
"""
def __init__(self, init_params: InitializeParams, **kwargs):
super().__init__(
id="init-1",
method="initialize",
params=init_params.model_dump(),
**kwargs
)
class InitializedNotificationMessage(JSONRPCMessage):
"""
A JSON-RPC notification message to notify the server that the client is initialized.
"""
def __init__(self, **kwargs):
super().__init__(
method="notifications/initialized",
params={},
**kwargs
)

View File

@ -0,0 +1,13 @@
# messages/message_types/json_rpc_message.py
from typing import Any, Dict, Optional
from pydantic import BaseModel, ConfigDict
class JSONRPCMessage(BaseModel):
jsonrpc: str = "2.0"
id: Optional[str] = None
method: Optional[str] = None
params: Optional[Dict[str, Any]] = None
result: Optional[Dict[str, Any]] = None
error: Optional[Dict[str, Any]] = None
model_config = ConfigDict(extra="allow")

View File

@ -0,0 +1,6 @@
# messages/message_types/ping_message.py
from nostr_dvm.backends.mcp.messages.message_types.incrementing_id_message import IncrementingIDMessage
class PingMessage(IncrementingIDMessage):
def __init__(self, start_id: int = None, **kwargs):
super().__init__(prefix="ping", method="ping", start_id=start_id, **kwargs)

View File

@ -0,0 +1,27 @@
# messages/message_types/prompts_messages.py
from typing import Any, Dict, Optional
from mcpcli.messages.message_types.incrementing_id_message import IncrementingIDMessage
from mcpcli.messages.message_types.json_rpc_message import JSONRPCMessage
from mcpcli.messages.message_types.prompts_models import PromptsGetParams
class PromptsListMessage(IncrementingIDMessage):
def __init__(self, start_id: int = None, **kwargs):
super().__init__(prefix="prompts-list", method="prompts/list", start_id=start_id, **kwargs)
class PromptsGetMessage(IncrementingIDMessage):
def __init__(self, name: str, arguments: Optional[Dict[str, Any]] = None, start_id: int = None, **kwargs):
# Validate params using PromptsGetParams
params_model = PromptsGetParams(name=name, arguments=arguments or {})
super().__init__(
prefix="prompts-get",
method="prompts/get",
start_id=start_id,
params=params_model.model_dump(),
**kwargs
)
class PromptsListChangedMessage(JSONRPCMessage):
def __init__(self, **kwargs):
super().__init__(method="notifications/prompts/list_changed", id=None, **kwargs)

View File

@ -0,0 +1,43 @@
from typing import Optional, List, Dict, Any, Union, Literal
from pydantic import BaseModel, Field
# Content Types
class TextContent(BaseModel):
type: Literal["text"] = "text"
text: str
class ImageContent(BaseModel):
type: Literal["image"] = "image"
data: str # base64-encoded image data
mimeType: str
class ResourceData(BaseModel):
uri: str
mimeType: str
text: Optional[str] = None
blob: Optional[str] = None # if binary data is included, base64-encoded
class ResourceContent(BaseModel):
type: Literal["resource"] = "resource"
resource: ResourceData
# Union of all content types
MessageContent = Union[TextContent, ImageContent, ResourceContent]
class PromptMessage(BaseModel):
role: str
content: MessageContent
# Prompt Definition
class Prompt(BaseModel):
name: str
description: Optional[str] = None
arguments: Optional[List[str]] = None
class PromptsGetResult(BaseModel):
description: Optional[str]
messages: List[PromptMessage]
class PromptsGetParams(BaseModel):
name: str
arguments: Dict[str, Any] = {}

View File

@ -0,0 +1,7 @@
# messages/message_types/resources_messages.py
from typing import ClassVar
from mcpcli.messages.message_types.incrementing_id_message import IncrementingIDMessage
class ResourcesListMessage(IncrementingIDMessage):
def __init__(self, start_id: int = None, **kwargs):
super().__init__(prefix="resources-list", method="resources/list", start_id=start_id, **kwargs)

View File

@ -0,0 +1,16 @@
# messages/message_types/tools_messages.py
from nostr_dvm.backends.mcp.messages.message_types.incrementing_id_message import IncrementingIDMessage
from nostr_dvm.backends.mcp.messages.message_types.json_rpc_message import JSONRPCMessage
class ToolsListMessage(IncrementingIDMessage):
def __init__(self, start_id: int = None, **kwargs):
super().__init__(prefix="tools-list", method="tools/list", start_id=start_id, **kwargs)
class CallToolMessage(IncrementingIDMessage):
def __init__(self, tool_name: str, arguments: dict, start_id: int = None, **kwargs):
super().__init__(prefix="tools-call", method="tools/call", start_id=start_id, params={"name": tool_name, "arguments": arguments}, **kwargs)
class ToolsListChangedMessage(JSONRPCMessage):
def __init__(self, **kwargs):
# A notification has no 'id' field.
super().__init__(method="notifications/tools/list_changed", id=None, **kwargs)

View File

@ -0,0 +1,26 @@
# mcpcli/messages/tools.py
from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream
from nostr_dvm.backends.mcp.messages.send_message import send_message
from nostr_dvm.backends.mcp.messages.message_types.tools_messages import CallToolMessage
async def send_call_tool(
tool_name: str,
arguments: dict,
read_stream: MemoryObjectReceiveStream,
write_stream: MemoryObjectSendStream,
) -> dict:
# create the message
message = CallToolMessage(tool_name=tool_name, arguments=arguments)
try:
# send the message
response = await send_message(
read_stream=read_stream,
write_stream=write_stream,
message=message,
)
# get the result
return response.get("result", {})
except Exception as e:
return {"isError": True, "error": str(e)}

View File

@ -0,0 +1,80 @@
# messages/send_initialize_message.py
import logging
import anyio
from typing import Optional
from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream
from nostr_dvm.backends.mcp.messages.message_types.initialize_message import (
InitializeMessage,
InitializedNotificationMessage,
InitializeParams,
MCPClientCapabilities,
MCPClientInfo,
InitializeResult,
)
async def send_initialize(
read_stream: MemoryObjectReceiveStream,
write_stream: MemoryObjectSendStream,
) -> Optional[InitializeResult]:
"""Send an initialization request to the server and process its response."""
# Set initialize params
init_params = InitializeParams(
protocolVersion="2024-11-05",
capabilities=MCPClientCapabilities(),
clientInfo=MCPClientInfo(),
)
# Create the initialize message
init_message = InitializeMessage(init_params)
# Sending
logging.debug("Sending initialize request")
await write_stream.send(init_message)
try:
# 5-second timeout for response
with anyio.fail_after(5):
# Get the response from the server
async for response in read_stream:
# If the response is an exception, log it and continue
if isinstance(response, Exception):
logging.error(f"Error from server: {response}")
continue
# Debug log the received message
logging.debug(f"Received: {response.model_dump()}")
# Check for error
if response.error:
logging.error(f"Server initialization error: {response.error}")
return None
# Check for result
if response.result:
try:
# Validate the result
init_result = InitializeResult.model_validate(response.result)
logging.debug("Server initialized successfully")
# Notify the server of successful initialization
initialized_notify = InitializedNotificationMessage()
await write_stream.send(initialized_notify)
return init_result
except Exception as e:
logging.error(f"Error processing init result: {e}")
return None
except TimeoutError:
logging.error("Timeout waiting for server initialization response")
return None
except Exception as e:
logging.error(f"Unexpected error during server initialization: {e}")
raise
# Timeout
logging.error("Initialization response timeout")
return None

View File

@ -0,0 +1,58 @@
# messages/send_message.py
import logging
import anyio
from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream
from nostr_dvm.backends.mcp.messages.message_types.json_rpc_message import JSONRPCMessage
async def send_message(
read_stream: MemoryObjectReceiveStream,
write_stream: MemoryObjectSendStream,
message: JSONRPCMessage,
timeout: float = 5,
retries: int = 3,
) -> dict:
"""
Send a JSON-RPC message to the server and return the response.
Args:
read_stream (MemoryObjectReceiveStream): The stream to read responses.
write_stream (MemoryObjectSendStream): The stream to send requests.
message (JSONRPCMessage): The JSON-RPC message to send.
timeout (float): Timeout in seconds to wait for a response.
retries (int): Number of retry attempts.
Returns:
dict: The server's response as a dictionary.
Raises:
TimeoutError: If no response is received within the timeout.
Exception: If an unexpected error occurs.
"""
for attempt in range(1, retries + 1):
try:
logging.debug(f"Attempt {attempt}/{retries}: Sending message: {message}")
await write_stream.send(message)
with anyio.fail_after(timeout):
async for response in read_stream:
if not isinstance(response, Exception):
logging.debug(f"Received response: {response.model_dump()}")
return response.model_dump()
else:
logging.error(f"Server error: {response}")
raise response
except TimeoutError:
logging.error(
f"Timeout waiting for response to message '{message.method}' (Attempt {attempt}/{retries})"
)
if attempt == retries:
raise
except Exception as e:
logging.error(
f"Unexpected error during '{message.method}' request: {e} (Attempt {attempt}/{retries})"
)
if attempt == retries:
raise
await anyio.sleep(2)

View File

@ -0,0 +1,21 @@
# messages/send_ping.py
from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream
from nostr_dvm.backends.mcp.messages.send_message import send_message
from nostr_dvm.backends.mcp.messages.message_types.ping_message import PingMessage
async def send_ping(
read_stream: MemoryObjectReceiveStream,
write_stream: MemoryObjectSendStream,
) -> bool:
# create a ping message
ping_msg = PingMessage()
# send the message
response = await send_message(
read_stream=read_stream,
write_stream=write_stream,
message=ping_msg
)
# return the response
return response is not None

View File

@ -0,0 +1,21 @@
# messages/prompts.py
from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream
from send_message import send_message
from message_types.prompts_messages import PromptsListMessage
async def send_prompts_list(
read_stream: MemoryObjectReceiveStream,
write_stream: MemoryObjectSendStream,
) -> list:
"""Send a 'prompts/list' message and return the list of prompts."""
message = PromptsListMessage()
# send the message
response = await send_message(
read_stream=read_stream,
write_stream=write_stream,
message=message,
)
# return the result
return response.get("result", [])

View File

@ -0,0 +1,22 @@
# messages/send_resources.py
from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream
from send_message import send_message
from message_types.resources_messages import ResourcesListMessage
async def send_resources_list(
read_stream: MemoryObjectReceiveStream,
write_stream: MemoryObjectSendStream,
) -> list:
"""Send a 'resources/list' message and return the list of resources."""
# create the message
message = ResourcesListMessage()
# send the message
response = await send_message(
read_stream=read_stream,
write_stream=write_stream,
message=message,
)
# return the result
return response.get("result", [])

View File

@ -0,0 +1,21 @@
# mcpcli/messages/tools.py
from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream
from nostr_dvm.backends.mcp.messages.send_message import send_message
from nostr_dvm.backends.mcp.messages.message_types.tools_messages import ToolsListMessage
async def send_tools_list(
read_stream: MemoryObjectReceiveStream,
write_stream: MemoryObjectSendStream,
) -> list:
# create the tools list message
message = ToolsListMessage()
# send the message
response = await send_message(
read_stream=read_stream,
write_stream=write_stream,
message=message,
)
# get the response
return response.get("result", [])

View File

@ -0,0 +1,8 @@
{
"mcpServers": {
"sqlite": {
"command": "uvx",
"args": ["mcp-server-sqlite", "--db-path", "test.db"]
}
}
}

View File

@ -0,0 +1,58 @@
import json
class SystemPromptGenerator:
"""
A class for generating system prompts dynamically based on tools JSON and user inputs.
"""
def __init__(self):
"""
Initialize the SystemPromptGenerator with a default system prompt template.
"""
self.template = """
In this environment you have access to a set of tools you can use to answer the user's question.
{{ FORMATTING INSTRUCTIONS }}
String and scalar parameters should be specified as is, while lists and objects should use JSON format. Note that spaces for string values are not stripped. The output is not expected to be valid XML and is parsed with regular expressions.
Here are the functions available in JSONSchema format:
{{ TOOL DEFINITIONS IN JSON SCHEMA }}
{{ USER SYSTEM PROMPT }}
{{ TOOL CONFIGURATION }}
"""
self.default_user_system_prompt = "You are an intelligent assistant capable of using tools to solve user queries effectively."
self.default_tool_config = "No additional configuration is required."
def generate_prompt(
self, tools: dict, user_system_prompt: str = None, tool_config: str = None
) -> str:
"""
Generate a system prompt based on the provided tools JSON, user prompt, and tool configuration.
Args:
tools (dict): The tools JSON containing definitions of the available tools.
user_system_prompt (str): A user-provided description or instruction for the assistant (optional).
tool_config (str): Additional tool configuration information (optional).
Returns:
str: The dynamically generated system prompt.
"""
# set the user system prompt
user_system_prompt = user_system_prompt or self.default_user_system_prompt
# set the tools config
tool_config = tool_config or self.default_tool_config
# get the tools schema
tools_json_schema = json.dumps(tools, indent=2)
# perform replacements
prompt = self.template.replace(
"{{ TOOL DEFINITIONS IN JSON SCHEMA }}", tools_json_schema
)
prompt = prompt.replace("{{ FORMATTING INSTRUCTIONS }}", "")
prompt = prompt.replace("{{ USER SYSTEM PROMPT }}", user_system_prompt)
prompt = prompt.replace("{{ TOOL CONFIGURATION }}", tool_config)
# return the prompt
return prompt

View File

@ -0,0 +1,165 @@
import json
import logging
import re
from typing import Any, Dict, Optional
from messages.send_call_tool import send_call_tool
from messages.send_tools_list import send_tools_list
def parse_tool_response(response: str) -> Optional[Dict[str, Any]]:
"""Parse tool call from Llama's XML-style format."""
function_regex = r"<function=(\w+)>(.*?)</function>"
match = re.search(function_regex, response)
if match:
function_name, args_string = match.groups()
try:
args = json.loads(args_string)
return {
"id": f"call_{function_name}",
"function": function_name,
"arguments": args,
}
except json.JSONDecodeError as error:
logging.debug(f"Error parsing function arguments: {error}")
return None
async def handle_tool_call(tool_call, conversation_history, server_streams):
"""
Handle a single tool call for both OpenAI and Llama formats.
This function no longer prints directly to stdout. It updates the conversation_history
with the tool call and its response. The calling function can then display the results.
"""
tool_call_id = None
tool_name = "unknown_tool"
raw_arguments = {}
try:
# Handle object-style tool calls from both OpenAI and Ollama
if hasattr(tool_call, "function") or (
isinstance(tool_call, dict) and "function" in tool_call
):
# Get tool name and arguments based on format
if hasattr(tool_call, "function"):
tool_call_id = tool_call.id
tool_name = tool_call.function.name
raw_arguments = tool_call.function.arguments
else:
tool_call_id = tool_call["id"]
tool_name = tool_call["function"]["name"]
raw_arguments = tool_call["function"]["arguments"]
else:
# Parse Llama's XML format from the last message
last_message = conversation_history[-1]["content"]
parsed_tool = parse_tool_response(last_message)
if not parsed_tool:
logging.debug("Unable to parse tool call from message")
return
tool_call_id = parsed_tool["id"]
tool_name = parsed_tool["function"]
raw_arguments = parsed_tool["arguments"]
# Parse the tool arguments
tool_args = (
json.loads(raw_arguments)
if isinstance(raw_arguments, str)
else raw_arguments
)
# Call the tool (no direct print here)
for read_stream, write_stream in server_streams:
tool_response = await send_call_tool(
tool_name, tool_args, read_stream, write_stream
)
if not tool_response.get("isError"):
break
if tool_response.get("isError"):
logging.debug(
f"Error calling tool '{tool_name}': {tool_response.get('content')}"
)
# Format the tool response
formatted_response = format_tool_response(tool_response.get("content", []))
logging.debug(f"Tool '{tool_name}' Response: {formatted_response}")
# Update the conversation history with the tool call
# Add the tool call itself (for OpenAI tracking)
conversation_history.append(
{
"role": "assistant",
"content": None,
"tool_calls": [
{
"id": tool_call_id,
"type": "function",
"function": {
"name": tool_name,
"arguments": json.dumps(tool_args)
if isinstance(tool_args, dict)
else tool_args,
},
}
],
}
)
# Add the tool response to conversation history
conversation_history.append(
{
"role": "tool",
"name": tool_name,
"content": formatted_response,
"tool_call_id": tool_call_id,
}
)
except json.JSONDecodeError:
logging.debug(
f"Error decoding arguments for tool '{tool_name}': {raw_arguments}"
)
except Exception as e:
logging.debug(f"Error handling tool call '{tool_name}': {str(e)}")
def format_tool_response(response_content):
"""Format the response content from a tool."""
if isinstance(response_content, list):
return "\n".join(
item.get("text", "No content")
for item in response_content
if item.get("type") == "text"
)
return str(response_content)
async def fetch_tools(read_stream, write_stream):
"""Fetch tools from the server."""
logging.debug("\nFetching tools for chat mode...")
# get the tools list
tools_response = await send_tools_list(read_stream, write_stream)
tools = tools_response.get("tools", [])
# check if tools are valid
if not isinstance(tools, list) or not all(isinstance(tool, dict) for tool in tools):
logging.debug("Invalid tools format received.")
return None
return tools
def convert_to_openai_tools(tools):
"""Convert tools into OpenAI-compatible function definitions."""
return [
{
"type": "function",
"function": {
"name": tool["name"],
"description": tool.get("description", ""),
"parameters": tool.get("inputSchema", {}),
},
}
for tool in tools
]

View File

@ -0,0 +1,149 @@
# transport/stdio/stdio_client.py
import json
import logging
import sys
import traceback
from asyncio import CancelledError
from contextlib import asynccontextmanager
import anyio
from anyio.streams.text import TextReceiveStream
from nostr_dvm.backends.mcp.environment import get_default_environment
from nostr_dvm.backends.mcp.messages.message_types.json_rpc_message import JSONRPCMessage
from nostr_dvm.backends.mcp.transport.stdio.stdio_server_parameters import StdioServerParameters
@asynccontextmanager
async def stdio_client(server: StdioServerParameters):
# ensure we have a server command
if not server.command:
raise ValueError("Server command must not be empty.")
# ensure we have server arguments as a list or tuple
if not isinstance(server.args, (list, tuple)):
raise ValueError("Server arguments must be a list or tuple.")
# create the the read and write streams
read_stream_writer, read_stream = anyio.create_memory_object_stream(0)
write_stream, write_stream_reader = anyio.create_memory_object_stream(0)
# start the subprocess
process = await anyio.open_process(
[server.command, *server.args],
env={**get_default_environment(), **(server.env or {})},
stderr=sys.stderr,
)
# started server
logging.debug(
f"Subprocess started with PID {process.pid}, command: {server.command}"
)
# create a task to read from the subprocess' stdout
async def process_json_line(line: str, writer):
try:
logging.debug(f"Processing line: {line.strip()}")
data = json.loads(line)
# parse the json
logging.debug(f"Parsed JSON data: {data}")
# validate the jsonrpc message
message = JSONRPCMessage.model_validate(data)
logging.debug(f"Validated JSONRPCMessage: {message}")
# send the message
await writer.send(message)
except json.JSONDecodeError as exc:
# not valid json
logging.error(f"JSON decode error: {exc}. Line: {line.strip()}")
except Exception as exc:
# other exception
logging.error(f"Error processing message: {exc}. Line: {line.strip()}")
logging.debug(f"Traceback:\n{traceback.format_exc()}")
async def stdout_reader():
"""Read JSON-RPC messages from the server's stdout."""
assert process.stdout, "Opened process is missing stdout"
buffer = ""
logging.debug("Starting stdout_reader")
try:
async with read_stream_writer:
async for chunk in TextReceiveStream(process.stdout):
lines = (buffer + chunk).split("\n")
buffer = lines.pop()
for line in lines:
if line.strip():
await process_json_line(line, read_stream_writer)
if buffer.strip():
await process_json_line(buffer, read_stream_writer)
except anyio.ClosedResourceError:
logging.debug("Read stream closed.")
except Exception as exc:
logging.error(f"Unexpected error in stdout_reader: {exc}")
logging.debug(f"Traceback:\n{traceback.format_exc()}")
raise
finally:
logging.debug("Exiting stdout_reader")
async def stdin_writer():
"""Send JSON-RPC messages from the write stream to the server's stdin."""
assert process.stdin, "Opened process is missing stdin"
logging.debug("Starting stdin_writer")
try:
async with write_stream_reader:
async for message in write_stream_reader:
json_str = message.model_dump_json(exclude_none=True)
logging.debug(f"Sending: {json_str}")
await process.stdin.send((json_str + "\n").encode())
except anyio.ClosedResourceError:
logging.debug("Write stream closed.")
except Exception as exc:
logging.error(f"Unexpected error in stdin_writer: {exc}")
logging.debug(f"Traceback:\n{traceback.format_exc()}")
raise
finally:
logging.debug("Exiting stdin_writer")
async def terminate_process():
"""Gracefully terminate the subprocess."""
try:
if process.returncode is None: # Process is still running
logging.debug("Terminating subprocess...")
process.terminate()
with anyio.fail_after(5):
await process.wait()
else:
logging.info("Process already terminated.")
except TimeoutError:
logging.warning(
"Process did not terminate gracefully. Forcefully killing it."
)
try:
process.kill()
except Exception as kill_exc:
logging.error(f"Error killing process: {kill_exc}")
except Exception as exc:
logging.error(f"Error during process termination: {exc}")
try:
async with anyio.create_task_group() as tg, process:
tg.start_soon(stdout_reader)
tg.start_soon(stdin_writer)
yield read_stream, write_stream
# exit the task group
exit_code = await process.wait()
logging.info(f"Process exited with code {exit_code}")
except CancelledError as er:
print(er)
except Exception as exc:
# other exception
logging.error(f"Unhandled error in TaskGroup: {exc}")
logging.debug(f"Traceback:\n{traceback.format_exc()}")
if hasattr(exc, "__cause__") and exc.__cause__:
logging.debug(f"TaskGroup exception cause: {exc.__cause__}")
raise
finally:
await terminate_process()

View File

@ -0,0 +1,8 @@
# transport/stdio/stdio_server_parameters.py
from pydantic import BaseModel, Field
from typing import Any, Dict, Optional
class StdioServerParameters(BaseModel):
command: str
args: list[str] = Field(default_factory=list)
env: Optional[Dict[str, str]] = None

View File

@ -0,0 +1,89 @@
# transport/stdio/stdio_server_shutdown.py
import logging
from typing import Optional
import anyio
from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream
async def shutdown_stdio_server(
read_stream: Optional[MemoryObjectReceiveStream],
write_stream: Optional[MemoryObjectSendStream],
process: anyio.abc.Process,
timeout: float = 5.0,
) -> None:
"""
Gracefully shutdown a stdio-based server.
This function performs the following steps:
1. Closes the stdin stream of the process.
2. Waits for the process to terminate gracefully.
3. Sends SIGTERM if the process does not terminate within the timeout.
4. Sends SIGKILL if the process does not terminate after SIGTERM.
5. Logs each step and ensures cleanup in case of errors.
Args:
read_stream (Optional[MemoryObjectReceiveStream]): Stream to receive responses.
write_stream (Optional[MemoryObjectSendStream]): Stream to send requests.
process (anyio.abc.Process): The server process.
timeout (float): Time to wait for graceful shutdown and SIGTERM before escalation.
"""
logging.info("Initiating stdio server shutdown")
try:
# ensure we have a process
if process:
# Step 1: Close the write stream (stdin for the server)
if process.stdin:
# close
await process.stdin.aclose()
logging.info("Closed stdin stream")
# Step 2: Wait for the process to terminate gracefully
with anyio.fail_after(timeout):
await process.wait()
logging.info("Process exited normally")
return
except TimeoutError:
logging.warning(
f"Server did not exit within {timeout} seconds, sending SIGTERM"
)
# ensure we have a process
if process:
# terminate
process.terminate()
try:
# Step 3: Wait for the process to terminate after SIGTERM
with anyio.fail_after(timeout):
await process.wait()
logging.info("Process exited after SIGTERM")
return
except TimeoutError:
logging.warning("Server did not respond to SIGTERM, sending SIGKILL")
# ensure we have a process
if process:
# kill
process.kill()
# Step 4: Wait for the process to terminate after SIGKILL
await process.wait()
logging.info("Process exited after SIGKILL")
except Exception as e:
# Catch unexpected errors during shutdown
logging.error(f"Unexpected error during stdio server shutdown: {e}")
if process:
# kill
process.kill()
# wait
await process.wait()
logging.info("Process forcibly terminated")
finally:
# complete
logging.info("Stdio server shutdown complete")

View File

@ -0,0 +1,276 @@
import asyncio
import json
import os
from datetime import timedelta
from pathlib import Path
from nostr_sdk import Client, Timestamp, PublicKey, Tag, Keys, Options, SecretKey, NostrSigner, Kind
from nostr_dvm.backends.mcp import config
from nostr_dvm.backends.mcp.config import load_config
from nostr_dvm.backends.mcp.messages.send_call_tool import send_call_tool
from nostr_dvm.backends.mcp.messages.send_initialize_message import send_initialize
from nostr_dvm.backends.mcp.messages.send_ping import send_ping
from nostr_dvm.backends.mcp.messages.send_tools_list import send_tools_list
from nostr_dvm.backends.mcp.transport.stdio.stdio_client import stdio_client
from nostr_dvm.interfaces.dvmtaskinterface import DVMTaskInterface, process_venv
from nostr_dvm.utils.admin_utils import AdminConfig
from nostr_dvm.utils.definitions import EventDefinitions, relay_timeout
from nostr_dvm.utils.dvmconfig import DVMConfig, build_default_config
from nostr_dvm.utils.nip88_utils import NIP88Config
from nostr_dvm.utils.nip89_utils import NIP89Config, check_and_set_d_tag
from nostr_dvm.utils.output_utils import post_process_list_to_events
"""
This File contains a Module to search for notes
Accepted Inputs: a search query
Outputs: A list of events
Params: None
"""
class MCPBridge(DVMTaskInterface):
KIND: Kind = EventDefinitions.KIND_NIP90_MCP
TASK: str = "mcp-bridge"
FIX_COST: float = 0
dependencies = [("nostr-dvm", "nostr-dvm"),
("mcp", "mcp")]
dvm_config: DVMConfig
async def init_dvm(self, name, dvm_config: DVMConfig, nip89config: NIP89Config, nip88config: NIP88Config = None,
admin_config: AdminConfig = None, options=None):
dvm_config.SCRIPT = os.path.abspath(__file__)
async def is_input_supported(self, tags, client=None, dvm_config=None):
for tag in tags:
if tag.as_vec()[0] == 'i':
input_value = tag.as_vec()[1]
input_type = tag.as_vec()[2]
if input_type != "text":
return False
return True
async def create_request_from_nostr_event(self, event, client=None, dvm_config=None):
self.dvm_config = dvm_config
request_form = {"jobID": event.id().to_hex()}
self.config_path = Path.absolute(Path(__file__).parent / "server_config.json")
self.server_names = ["mcp-crypto-price"]
if self.options.get("config_path"):
self.config_path = self.options.get("config_path")
if self.options.get("server_names"):
self.server_names = (self.options.get("server_names"))
c = "list-tools"
for tag in event.tags().to_vec():
if tag.as_vec()[0] == 'c':
c = tag.as_vec()[1]
content = event.content()
options = {
"command" : c,
"config_path" : self.config_path,
"server_names" : self.server_names,
"payload": content
}
request_form['options'] = json.dumps(options)
return request_form
async def process(self, request_form):
options = self.set_options(request_form)
config_path = options["config_path"]
server_names = options["server_names"]
if options["command"] == "list-tools":
tools = await self.list_tools(config_path, server_names)
# Just return the first for now.
for tool in tools:
print(tool[1])
return json.dumps(tool[1])
elif options["command"] == "execute-tool":
print(options["payload"])
tool_name = "get-crypto-price"
tool_args = {"symbol": "BTC"}
tool_response = await self.call_tool(config_path, server_names, tool_name, tool_args)
return json.dumps(tool_response)
async def post_process(self, result, event):
"""Overwrite the interface function to return a social client readable format, if requested"""
for tag in event.tags().to_vec():
if tag.as_vec()[0] == 'output':
format = tag.as_vec()[1]
if format == "text/plain": # check for output type
result = post_process_list_to_events(result)
# if not text/plain, don't post-process
return result
@classmethod
async def list_tools(cls, config_path, server_names):
alltools = []
for server_name in server_names:
server_params = await config.load_config(config_path, server_name)
try:
async with stdio_client(server_params) as (read_stream, write_stream):
# Initialize the server
tools = await send_tools_list(read_stream, write_stream)
if tools is not None:
alltools.append((server_name, tools))
raise Exception("I'm gonna leave you.")
else:
print("nada")
except:
pass
print("Ignore the error. We're good.")
return alltools
@classmethod
async def call_tool(cls, config_path, server_names, tool_name, tool_args):
print("starting to call the tool")
tool_response = None
try:
for server_name in server_names:
server_params = await config.load_config(config_path, server_name)
try:
async with stdio_client(server_params) as (read_stream, write_stream):
#Check if our current config has a tool.
tools = await send_tools_list(read_stream, write_stream)
stuff = json.dumps(tools)
toolsobject = json.loads(stuff)["tools"]
print(toolsobject)
server_has_tool = False
for tool in toolsobject:
if tool["name"] == tool_name:
print(f"Found tool {tool_name}.")
server_has_tool = True
if server_has_tool is False:
continue
tool_response = await send_call_tool(
tool_name, tool_args, read_stream, write_stream)
raise Exception("I'm gonna leave you.") # Until we find a better way to leave the async with
except:
pass
return tool_response
raise Exception("I'm gonna leave you.")
except:
pass
return "not_found"
alltools = []
for server_name in server_names:
server_params = await config.load_config(config_path, server_name)
try:
async with stdio_client(server_params) as (read_stream, write_stream):
# Initialize the server
tools = await send_tools_list(read_stream, write_stream)
if tools is not None:
alltools.append((server_name, tools))
raise Exception("I'm gonna leave you.")
else:
print("nada")
except:
pass
print("All clear. We made it out of thread hell. never mind the error.")
return alltools
# We build an example here that we can call by either calling this file directly from the main directory,
# or by adding it to our playground. You can call the example and adjust it to your needs or redefine it in the
# playground or elsewhere
def build_example(name, identifier, admin_config):
dvm_config = build_default_config(identifier)
dvm_config.USE_OWN_VENV = False
# Add NIP89
nip89info = {
"name": name,
"picture": "https://nostr.band/android-chrome-192x192.png",
"about": "I search notes on Nostr.band.",
"supportsEncryption": True,
"acceptsNutZaps": dvm_config.ENABLE_NUTZAP,
"nip90Params": {
"users": {
"required": False,
"values": [],
"description": "Search for content from specific users"
},
"since": {
"required": False,
"values": [],
"description": "A unix timestamp in the past from where the search should start"
},
"until": {
"required": False,
"values": [],
"description": "A unix timestamp that tells until the search should include results"
},
"max_results": {
"required": False,
"values": [],
"description": "The number of maximum results to return (default currently 20)"
}
}
}
nip89config = NIP89Config()
nip89config.DTAG = check_and_set_d_tag(identifier, name, dvm_config.PRIVATE_KEY, nip89info["picture"])
nip89config.CONTENT = json.dumps(nip89info)
options = {"relay": "wss://relay.nostr.band"}
return MCPBridge(name=name, dvm_config=dvm_config, nip89config=nip89config,
admin_config=admin_config, options=options)
if __name__ == '__main__':
process_venv(MCPBridge)

View File

@ -46,6 +46,11 @@ class EventDefinitions:
KIND_NIP90_RESULTS_USER_SEARCH = Kind(6303)
KIND_NIP90_VISUAL_DISCOVERY = Kind(5304)
KIND_NIP90_RESULT_VISUAL_DISCOVERY = Kind(6304)
KIND_NIP90_MCP = Kind(5910)
KIND_NIP90_MCP_RESULT = Kind(6910)
KIND_NIP90_DVM_SUBSCRIPTION = Kind(5906)
KIND_NIP90_RESULT_DVM_SUBSCRIPTION = Kind(6906)

View File

@ -17,6 +17,7 @@ class NIP89Config:
KIND: Kind = None
PK: str = ""
CONTENT: str = ""
EXTRA_TAGS : []
def nip89_create_d_tag(name, pubkey, image):
@ -28,9 +29,14 @@ def nip89_create_d_tag(name, pubkey, image):
async def nip89_announce_tasks(dvm_config, client):
k_tag = Tag.parse(["k", str(dvm_config.NIP89.KIND.as_u16())])
d_tag = Tag.parse(["d", dvm_config.NIP89.DTAG])
tags = [k_tag, d_tag]
keys = Keys.parse(dvm_config.NIP89.PK)
content = dvm_config.NIP89.CONTENT
event = EventBuilder(EventDefinitions.KIND_ANNOUNCEMENT, content).tags([k_tag, d_tag]).sign_with_keys(keys)
tags += dvm_config.EXTRA_TAGS
event = EventBuilder(EventDefinitions.KIND_ANNOUNCEMENT, content).tags(tags).sign_with_keys(keys)
response_status = await send_event(event, client=client, dvm_config=dvm_config, broadcast=True)

View File

@ -181,7 +181,6 @@ async def send_event_outbox(event: Event, client, dvm_config) -> SendEventOutput
relays = dvm_config.RELAY_LIST
for tag in event.tags().to_vec():
if tag.as_vec()[0] == 'relays':
print(tag.as_vec())
for index, param in enumerate(tag.as_vec()):
if index != 0:
if tag.as_vec()[index].rstrip("/") not in dvm_config.AVOID_OUTBOX_RELAY_LIST:

View File

@ -20,7 +20,7 @@ from nostr_dvm.utils.dvmconfig import build_default_config
from nostr_dvm.utils.nip89_utils import create_amount_tag, NIP89Config, check_and_set_d_tag, delete_nip_89
from nostr_dvm.utils.outbox_utils import AVOID_OUTBOX_RELAY_LIST
rebroadcast_NIP89 = False # Announce NIP89 on startup Only do this if you know what you're doing.
rebroadcast_NIP89 = True # Announce NIP89 on startup Only do this if you know what you're doing.
rebroadcast_NIP65_Relay_List = True
update_profile = True
delete_nip_89_on_shutdown = False

131
tests/discovery_pareto.py Normal file
View File

@ -0,0 +1,131 @@
import json
from pathlib import Path
import dotenv
from nostr_sdk import init_logger, LogLevel
from nostr_dvm.framework import DVMFramework
from nostr_dvm.tasks.content_discovery_currently_latest_longform import DicoverContentLatestLongForm
from nostr_dvm.utils.admin_utils import AdminConfig
from nostr_dvm.utils.dvmconfig import build_default_config
from nostr_dvm.utils.nip89_utils import create_amount_tag, NIP89Config, check_and_set_d_tag
from nostr_dvm.utils.outbox_utils import AVOID_OUTBOX_RELAY_LIST
rebroadcast_NIP89 = True # Announce NIP89 on startup Only do this if you know what you're doing.
rebroadcast_NIP65_Relay_List = True
update_profile = False
global_update_rate = 180 # set this high on first sync so db can fully sync before another process trys to.
use_logger = True
log_level = LogLevel.INFO
RELAY_LIST = ["wss://nostr.mom",
"wss://relay.primal.net",
"wss://nostr.oxtr.dev",
"wss://relay.nostrdvm.com",
]
SYNC_DB_RELAY_LIST = [
"wss://pareto.nostr1.com",
"wss://nostr.pareto.space"
]
if use_logger:
init_logger(log_level)
def build_longform_alt(name, identifier, admin_config, options, image, description, cost=0, update_rate=180, processing_msg=None,
update_db=True):
dvm_config = build_default_config(identifier)
dvm_config.USE_OWN_VENV = False
dvm_config.SHOWLOG = True
dvm_config.SCHEDULE_UPDATES_SECONDS = update_rate # Every 10 minutes
dvm_config.UPDATE_DATABASE = True
dvm_config.AVOID_OUTBOX_RELAY_LIST = AVOID_OUTBOX_RELAY_LIST
dvm_config.SYNC_DB_RELAY_LIST = SYNC_DB_RELAY_LIST
dvm_config.RELAY_LIST = RELAY_LIST
dvm_config.SEND_FEEDBACK_EVENTS = False
dvm_config.LOGLEVEL = LogLevel.INFO
dvm_config.FIX_COST = cost
dvm_config.CUSTOM_PROCESSING_MESSAGE = processing_msg
dvm_config.DELETE_ANNOUNCEMENT_ON_SHUTDOWN = True
admin_config.LUD16 = dvm_config.LN_ADDRESS
# Add NIP89
nip89info = {
"name": name,
"picture": image,
"about": description,
"lud16": dvm_config.LN_ADDRESS,
"supportsEncryption": True,
"acceptsNutZaps": dvm_config.ENABLE_NUTZAP,
"personalized": False,
"amount": create_amount_tag(cost),
"nip90Params": {
"max_results": {
"required": False,
"values": [],
"description": "The number of maximum results to return (default currently 100)"
}
}
}
nip89config = NIP89Config()
nip89config.DTAG = check_and_set_d_tag(identifier, name, dvm_config.PRIVATE_KEY, nip89info["picture"])
nip89config.CONTENT = json.dumps(nip89info)
return DicoverContentLatestLongForm(name=name, dvm_config=dvm_config, nip89config=nip89config,
admin_config=admin_config, options=options)
def playground():
framework = DVMFramework()
# Popular Animals (Fluffy frens)
admin_config = AdminConfig()
admin_config.REBROADCAST_NIP89 = rebroadcast_NIP89
admin_config.REBROADCAST_NIP65_RELAY_LIST = rebroadcast_NIP65_Relay_List
admin_config.UPDATE_PROFILE = update_profile
options = {
"db_name": "db/nostr_pareto.db",
"db_since": 24 * 60 * 60 * 365, # 48h since gmt,
"personalized": False,
"logger": False}
image = "https://route96.pareto.space/c96d2bff659509249cd7f8ce39e0e63f1c13d0c2e1da427b60dd991acc9984a5.webp"
description = "I show Notes on Pareto"
custom_processing_msg = ["Checking new Posts on Pareto."]
cost = 0
update_db = True # we use the DB scheduler above for a shared database. Or don't use it and let the DVM manage it
pareto = build_longform_alt("Pareto",
"pareto_test",
admin_config, options,
image=image,
description=description,
update_rate=global_update_rate,
cost=cost,
processing_msg=custom_processing_msg,
update_db=update_db,
)
framework.add(pareto)
framework.run()
if __name__ == '__main__':
env_path = Path('.env')
if not env_path.is_file():
with open('.env', 'w') as f:
print("Writing new .env file")
f.write('')
if env_path.is_file():
print(f'loading environment from {env_path.resolve()}')
dotenv.load_dotenv(env_path, verbose=True, override=True)
else:
raise FileNotFoundError(f'.env file not found at {env_path} ')
playground()

119
tests/mcp_dvm_client.py Normal file
View File

@ -0,0 +1,119 @@
import asyncio
import json
from pathlib import Path
import dotenv
from nostr_sdk import Keys, Client, Tag, EventBuilder, Filter, HandleNotification, Timestamp, nip04_decrypt, \
nip44_encrypt, Nip44Version, NostrSigner, Event, Kind, init_logger, LogLevel
from nostr_dvm.utils.definitions import EventDefinitions
from nostr_dvm.utils.nostr_utils import send_event, check_and_set_private_key
relay_list = ["wss://nostr.oxtr.dev", "wss://relay.nostrdvm.com"]
async def nostr_client_test_mcp_get_tools():
keys = Keys.parse(check_and_set_private_key("test_client"))
outTag = Tag.parse(["output", "application/json"])
cTag = Tag.parse(["c", "list-tools"])
alttag = Tag.parse(["alt", "This is a NIP90 Request to contact MCP server"])
relaysTag = Tag.parse(['relays'] + relay_list)
event = EventBuilder(EventDefinitions.KIND_NIP90_MCP, str("MCP request")).tags(
[outTag, alttag, cTag, relaysTag]).sign_with_keys(keys)
client = Client(NostrSigner.keys(keys))
for relay in relay_list:
await client.add_relay(relay)
await client.connect()
result = await client.send_event(event)
print(result)
return result
async def nostr_client_test_mcp_execute_tool(tool_name, tool_parameters):
keys = Keys.parse(check_and_set_private_key("test_client"))
outTag = Tag.parse(["output", "application/json"])
cTag = Tag.parse(["c", "execute-tool"])
alttag = Tag.parse(["alt", "This is a NIP90 Request to contact MCP server"])
relaysTag = Tag.parse(['relays'] + relay_list)
payload = {"name": tool_name,
"parameters": tool_parameters
}
event = EventBuilder(EventDefinitions.KIND_NIP90_MCP, json.dumps(payload)).tags(
[outTag, alttag, cTag, relaysTag]).sign_with_keys(keys)
client = Client(NostrSigner.keys(keys))
for relay in relay_list:
await client.add_relay(relay)
await client.connect()
result = await client.send_event(event)
print(result)
return result
async def nostr_client():
init_logger(LogLevel.INFO)
keys = Keys.parse(check_and_set_private_key("test_client"))
pk = keys.public_key()
print(f"Bot public key: {pk.to_bech32()}")
signer = NostrSigner.keys(keys)
client = Client(signer)
await client.add_relay("wss://relay.damus.io")
await client.add_relay("wss://nostr.mom")
await client.add_relay("wss://nostr.oxtr.dev")
await client.add_relay("wss://relay.nostrdvm.com")
await client.connect()
now = Timestamp.now()
mcp_filter = Filter().pubkey(pk).kind(Kind(6910)).limit(0)
await client.subscribe(mcp_filter, None)
#await nostr_client_test_mcp_get_tools()
await nostr_client_test_mcp_execute_tool(tool_name="get-crypto-price", tool_parameters={"symbol": "BTC"})
class NotificationHandler(HandleNotification):
async def handle(self, relay_url, subscription_id, event: Event):
print(f"Received new event from {relay_url}: {event.as_json()}")
if event.kind().as_u16() == 6910:
print(event.content())
async def handle_msg(self, relay_url, msg):
_var = None
await client.handle_notifications(NotificationHandler())
if __name__ == '__main__':
env_path = Path('.env')
if env_path.is_file():
print(f'loading environment from {env_path.resolve()}')
dotenv.load_dotenv(env_path, verbose=True, override=True)
else:
raise FileNotFoundError(f'.env file not found at {env_path} ')
asyncio.run(nostr_client())

View File

@ -0,0 +1,14 @@
{
"mcpServers": {
"sqlite": {
"command": "uvx",
"args": ["mcp-server-sqlite", "--db-path", "test.db"]
},
"mcp-crypto-price": {
"command": "node",
"args": [
"/Volumes/Home/~/Documents/mcp-crypto-price/build/index.js"
]
}
}
}

95
tests/mcp_test.py Normal file
View File

@ -0,0 +1,95 @@
import asyncio
import json
from pathlib import Path
import dotenv
from nostr_dvm.framework import DVMFramework
from nostr_dvm.tasks.mcpbridge import MCPBridge
from nostr_dvm.utils.admin_utils import AdminConfig
from nostr_dvm.utils.dvmconfig import build_default_config
from nostr_dvm.utils.nip89_utils import NIP89Config, check_and_set_d_tag
from nostr_sdk import Tag
async def get_tools(config_path, server_names):
tools = await MCPBridge.list_tools(config_path, server_names)
return tools
def playground(announce=False):
framework = DVMFramework()
admin_config = AdminConfig()
admin_config.REBROADCAST_NIP89 = announce
admin_config.REBROADCAST_NIP65_RELAY_LIST = announce
admin_config.UPDATE_PROFILE = announce
name = "MCP Test DVM"
identifier = "mcp_test" # Chose a unique identifier in order to get a lnaddress
dvm_config = build_default_config(identifier)
# MCP CONFIG
config_path = str(Path.absolute(Path(__file__).parent / "mcp_server_config.json"))
server_names = ["mcp-crypto-price"]
tools = asyncio.run(get_tools(config_path, server_names))
# for now get the first connected server only
#print(tools)
j = json.loads(json.dumps(tools[0][1]))
# Add NIP89
nip89info = {
"name": name,
"picture": "https://i.nostr.build/er2Vu8DccjfanFLo.png",
"about": "I'm a MCP Test DVM'",
"supportsEncryption": True,
"acceptsNutZaps": dvm_config.ENABLE_NUTZAP,
"nip90Params": {
},
"tools": j["tools"]
}
nip89config = NIP89Config()
nip89config.DTAG = check_and_set_d_tag(identifier, name, dvm_config.PRIVATE_KEY, nip89info["picture"])
nip89config.CONTENT = json.dumps(nip89info)
capabilities_tag = Tag.parse(["capabilities", "mcp-1.0"])
t1_tag = Tag.parse(["t","mcp"])
t2_tag = Tag.parse(["t", "bitcoin price"])
nip89config.EXTRA_TAGS =[capabilities_tag, t1_tag, t2_tag]
options = {
"config_path": config_path,
"server_names": server_names
}
dvm = MCPBridge(name=name, dvm_config=dvm_config, nip89config=nip89config,
admin_config=admin_config, options=options)
framework.add(dvm)
framework.run()
if __name__ == '__main__':
env_path = Path('.env')
if not env_path.is_file():
with open('.env', 'w') as f:
print("Writing new .env file")
f.write('')
if env_path.is_file():
print(f'loading environment from {env_path.resolve()}')
dotenv.load_dotenv(env_path, verbose=True, override=True)
else:
raise FileNotFoundError(f'.env file not found at {env_path} ')
playground(announce=False)

View File

@ -63,6 +63,27 @@ async def nostr_client_test_search_profile(input):
return event.as_json()
async def nostr_client_test_mcp(command="list-tools"):
keys = Keys.parse(check_and_set_private_key("test_client"))
outTag = Tag.parse(["output", "application/json"])
cTag = Tag.parse(["c", command])
alttag = Tag.parse(["alt", "This is a NIP90 Request to contact MCP server"])
event = EventBuilder(EventDefinitions.KIND_NIP90_MCP, str("MCP request")).tags(
[outTag, alttag, cTag]).sign_with_keys(keys)
relay_list = ["wss://relay.nostr.oxtr.dev", "wss://relay.nostrdvm.com"]
client = Client(NostrSigner.keys(keys))
for relay in relay_list:
await client.add_relay(relay)
await client.connect()
config = DVMConfig
await send_event(event, client=client, dvm_config=config)
return event.as_json()
async def nostr_client_test_image(prompt):
keys = Keys.parse(check_and_set_private_key("test_client"))
@ -429,7 +450,7 @@ async def nostr_client():
# await nostr_client_test_search_profile("dontbelieve")
#wot = ["99bb5591c9116600f845107d31f9b59e2f7c7e09a1ff802e84f1d43da557ca64"]
await nostr_client_test_discovery("99bb5591c9116600f845107d31f9b59e2f7c7e09a1ff802e84f1d43da557ca64", "9e09a914f41db178ba442b7372944b021135c08439516464a9bd436588af0b58")
#await nostr_client_test_discovery("99bb5591c9116600f845107d31f9b59e2f7c7e09a1ff802e84f1d43da557ca64", "9e09a914f41db178ba442b7372944b021135c08439516464a9bd436588af0b58")
#await nostr_client_test_discovery_gallery("99bb5591c9116600f845107d31f9b59e2f7c7e09a1ff802e84f1d43da557ca64", "4add3944eb596a27a650f9b954f5ed8dfefeec6ca50473605b0fbb058dd11306")
#await nostr_client_test_discovery("99bb5591c9116600f845107d31f9b59e2f7c7e09a1ff802e84f1d43da557ca64",
@ -442,31 +463,30 @@ async def nostr_client():
# cashutoken = "cashuAeyJ0b2tlbiI6W3sicHJvb2ZzIjpbeyJpZCI6InZxc1VRSVorb0sxOSIsImFtb3VudCI6MSwiQyI6IjAyNWU3ODZhOGFkMmExYTg0N2YxMzNiNGRhM2VhMGIyYWRhZGFkOTRiYzA4M2E2NWJjYjFlOTgwYTE1NGIyMDA2NCIsInNlY3JldCI6InQ1WnphMTZKMGY4UElQZ2FKTEg4V3pPck5rUjhESWhGa291LzVzZFd4S0U9In0seyJpZCI6InZxc1VRSVorb0sxOSIsImFtb3VudCI6NCwiQyI6IjAyOTQxNmZmMTY2MzU5ZWY5ZDc3MDc2MGNjZmY0YzliNTMzMzVmZTA2ZGI5YjBiZDg2Njg5Y2ZiZTIzMjVhYWUwYiIsInNlY3JldCI6IlRPNHB5WE43WlZqaFRQbnBkQ1BldWhncm44UHdUdE5WRUNYWk9MTzZtQXM9In0seyJpZCI6InZxc1VRSVorb0sxOSIsImFtb3VudCI6MTYsIkMiOiIwMmRiZTA3ZjgwYmMzNzE0N2YyMDJkNTZiMGI3ZTIzZTdiNWNkYTBhNmI3Yjg3NDExZWYyOGRiZDg2NjAzNzBlMWIiLCJzZWNyZXQiOiJHYUNIdHhzeG9HM3J2WWNCc0N3V0YxbU1NVXczK0dDN1RKRnVwOHg1cURzPSJ9XSwibWludCI6Imh0dHBzOi8vbG5iaXRzLmJpdGNvaW5maXhlc3RoaXMub3JnL2Nhc2h1L2FwaS92MS9ScDlXZGdKZjlxck51a3M1eVQ2SG5rIn1dfQ=="
# await nostr_client_test_image_private("a beautiful ostrich watching the sunset")
nutzap_wallet = NutZapWallet()
await nostr_client_test_mcp()
#nutzap_wallet = NutZapWallet()
nut_wallet = await nutzap_wallet.get_nut_wallet(client, keys)
#nut_wallet = await nutzap_wallet.get_nut_wallet(client, keys)
#dangerous, dont use this, except your wallet is messed up.
delete = False
if delete:
for mint in nut_wallet.nutmints:
await nutzap_wallet.update_spend_mint_proof_event(nut_wallet, mint.proofs, mint.mint_url, "", None,
None, client, keys)
nut_wallet.balance = 0
await nutzap_wallet.update_nut_wallet(nut_wallet, [], client, keys)
nut_wallet = await nutzap_wallet.get_nut_wallet(client, keys)
else:
nut_wallet = await nutzap_wallet.get_nut_wallet(client, keys)
if nut_wallet is None:
await nutzap_wallet.create_new_nut_wallet(dvmconfig.NUZAP_MINTS, dvmconfig.NUTZAP_RELAYS, client, keys,
"Test", "My Nutsack")
nut_wallet = await nutzap_wallet.get_nut_wallet(client, keys)
if nut_wallet is not None:
await nutzap_wallet.announce_nutzap_info_event(nut_wallet, client, keys)
else:
print("Couldn't fetch wallet, please restart and see if it is there")
#delete = False
#if delete:
# for mint in nut_wallet.nutmints:
# await nutzap_wallet.update_spend_mint_proof_event(nut_wallet, mint.proofs, mint.mint_url, "", None,
#
# nut_wallet.balance = 0
# await nutzap_wallet.update_nut_wallet(nut_wallet, [], client, keys)
# nut_wallet = await nutzap_wallet.get_nut_wallet(client, keys)
#else:
# nut_wallet = await nutzap_wallet.get_nut_wallet(client, keys)
# if nut_wallet is None:
# await nutzap_wallet.create_new_nut_wallet(dvmconfig.NUZAP_MINTS, dvmconfig.NUTZAP_RELAYS, client, keys,
# "Test", "My Nutsack")
# nut_wallet = await nutzap_wallet.get_nut_wallet(client, keys)
# if nut_wallet is not None:
# await nutzap_wallet.announce_nutzap_info_event(nut_wallet, client, keys)
# else:
# print("Couldn't fetch wallet, please restart and see if it is there")

View File

@ -39,7 +39,7 @@ async def test():
print(nostruri)
await test_search_by_user_since_days(client,
PublicKey.from_bech32(
PublicKey.parse(
"npub1nxa4tywfz9nqp7z9zp7nr7d4nchhclsf58lcqt5y782rmf2hefjquaa6q8"), 60,
"Bitcoin")