Merge branch 'main' into backends-nserver

This commit is contained in:
Believethehype
2023-12-20 16:48:34 +01:00
48 changed files with 2395 additions and 444 deletions

1
.gitignore vendored
View File

@@ -173,3 +173,4 @@ db/*
backends/nserver/venv
backends/nserver/cache
backends/nserver/modules/image_upscale/weights
cache/

View File

@@ -11,9 +11,10 @@ This means the project is in alpha status, interfaces might still change/break a
Create a new venv by running `"python -m venv venv"`
- Place .env file (based on .env_example) in main folder.
- the framework will automatically create keys for your dvms in this file.
- Install requirements.txt
- Run python main.py.
- Create a `LNbits` account on an accessible instance of your choice, enter one account's id and admin key (this account will create other accounts for the dvms)
- the framework will then automatically create keys, nip89 tags and zapable NIP57 `lightning addresses` for your dvms in this file.
- pip install nostr-dvm
- Run python3 main.py.
In each task component DVM examples are already prepared. Feel free to play along with the existing ones.
You can also add new tasks by using the interface, just like the existing tasks in the `tasks` folder.
@@ -21,8 +22,6 @@ You can also add new tasks by using the interface, just like the existing tasks
A `bot` is running by default that lists and communicates with the `DVMs` added to it,
so your DVMs can be controled via any regular social client as well.
The Framework optionally supports `LNbits` to create invoices instead of using a `lightning address`. If LNBits is not used,
make sure your nostr accounts have a valid lightning address.
If LNBits is not used, make sure your DVM's nostr accounts have a valid lightning address.
A tutorial on how to add additional tasks, as well as the larger server backend will be added at a later stage.

0
backends/__init__.py Normal file
View File

0
backends/mlx/__init__.py Normal file
View File

View File

@@ -0,0 +1,70 @@
# Copyright © 2023 Apple Inc.
import mlx.core as mx
import mlx.nn as nn
from .config import CLIPTextModelConfig
class CLIPEncoderLayer(nn.Module):
"""The transformer encoder layer from CLIP."""
def __init__(self, model_dims: int, num_heads: int):
super().__init__()
self.layer_norm1 = nn.LayerNorm(model_dims)
self.layer_norm2 = nn.LayerNorm(model_dims)
self.attention = nn.MultiHeadAttention(model_dims, num_heads)
# Add biases to the attention projections to match CLIP
self.attention.query_proj.bias = mx.zeros(model_dims)
self.attention.key_proj.bias = mx.zeros(model_dims)
self.attention.value_proj.bias = mx.zeros(model_dims)
self.attention.out_proj.bias = mx.zeros(model_dims)
self.linear1 = nn.Linear(model_dims, 4 * model_dims)
self.linear2 = nn.Linear(4 * model_dims, model_dims)
def __call__(self, x, attn_mask=None):
y = self.layer_norm1(x)
y = self.attention(y, y, y, attn_mask)
x = y + x
y = self.layer_norm2(x)
y = self.linear1(y)
y = nn.gelu_approx(y)
y = self.linear2(y)
x = y + x
return x
class CLIPTextModel(nn.Module):
"""Implements the text encoder transformer from CLIP."""
def __init__(self, config: CLIPTextModelConfig):
super().__init__()
self.token_embedding = nn.Embedding(config.vocab_size, config.model_dims)
self.position_embedding = nn.Embedding(config.max_length, config.model_dims)
self.layers = [
CLIPEncoderLayer(config.model_dims, config.num_heads)
for i in range(config.num_layers)
]
self.final_layer_norm = nn.LayerNorm(config.model_dims)
def __call__(self, x):
# Extract some shapes
B, N = x.shape
# Compute the embeddings
x = self.token_embedding(x)
x = x + self.position_embedding.weight[:N]
# Compute the features from the transformer
mask = nn.MultiHeadAttention.create_additive_causal_mask(N, x.dtype)
for l in self.layers:
x = l(x, mask)
# Apply the final layernorm and return
return self.final_layer_norm(x)

View File

@@ -0,0 +1,48 @@
# Copyright © 2023 Apple Inc.
from dataclasses import dataclass
from typing import Optional, Tuple
@dataclass
class AutoencoderConfig:
in_channels: int = 3
out_channels: int = 3
latent_channels_out: int = 8
latent_channels_in: int = 4
block_out_channels: Tuple[int] = (128, 256, 512, 512)
layers_per_block: int = 2
norm_num_groups: int = 32
scaling_factor: float = 0.18215
@dataclass
class CLIPTextModelConfig:
num_layers: int = 23
model_dims: int = 1024
num_heads: int = 16
max_length: int = 77
vocab_size: int = 49408
@dataclass
class UNetConfig:
in_channels: int = 4
out_channels: int = 4
conv_in_kernel: int = 3
conv_out_kernel: int = 3
block_out_channels: Tuple[int] = (320, 640, 1280, 1280)
layers_per_block: Tuple[int] = (2, 2, 2, 2)
mid_block_layers: int = 2
transformer_layers_per_block: Tuple[int] = (1, 1, 1, 1)
num_attention_heads: Tuple[int] = (5, 10, 20, 20)
cross_attention_dim: Tuple[int] = (1024,) * 4
norm_num_groups: int = 32
@dataclass
class DiffusionConfig:
beta_schedule: str = "scaled_linear"
beta_start: float = 0.00085
beta_end: float = 0.012
num_train_steps: int = 1000

View File

@@ -0,0 +1,292 @@
# Copyright © 2023 Apple Inc.
import json
from functools import partial
import numpy as np
from huggingface_hub import hf_hub_download
from safetensors import safe_open as safetensor_open
import mlx.core as mx
from mlx.utils import tree_unflatten
from .clip import CLIPTextModel
from .config import UNetConfig, CLIPTextModelConfig, AutoencoderConfig, DiffusionConfig
from .tokenizer import Tokenizer
from .unet import UNetModel
from .vae import Autoencoder
_DEFAULT_MODEL = "stabilityai/stable-diffusion-2-1-base"
_MODELS = {
# See https://huggingface.co/stabilityai/stable-diffusion-2-1-base for the model details and license
"stabilityai/stable-diffusion-2-1-base": {
"unet_config": "unet/config.json",
"unet": "unet/diffusion_pytorch_model.safetensors",
"text_encoder_config": "text_encoder/config.json",
"text_encoder": "text_encoder/model.safetensors",
"vae_config": "vae/config.json",
"vae": "vae/diffusion_pytorch_model.safetensors",
"diffusion_config": "scheduler/scheduler_config.json",
"tokenizer_vocab": "tokenizer/vocab.json",
"tokenizer_merges": "tokenizer/merges.txt",
}
}
def _from_numpy(x):
return mx.array(np.ascontiguousarray(x))
def map_unet_weights(key, value):
# Map up/downsampling
if "downsamplers" in key:
key = key.replace("downsamplers.0.conv", "downsample")
if "upsamplers" in key:
key = key.replace("upsamplers.0.conv", "upsample")
# Map the mid block
if "mid_block.resnets.0" in key:
key = key.replace("mid_block.resnets.0", "mid_blocks.0")
if "mid_block.attentions.0" in key:
key = key.replace("mid_block.attentions.0", "mid_blocks.1")
if "mid_block.resnets.1" in key:
key = key.replace("mid_block.resnets.1", "mid_blocks.2")
# Map attention layers
if "to_k" in key:
key = key.replace("to_k", "key_proj")
if "to_out.0" in key:
key = key.replace("to_out.0", "out_proj")
if "to_q" in key:
key = key.replace("to_q", "query_proj")
if "to_v" in key:
key = key.replace("to_v", "value_proj")
# Map transformer ffn
if "ff.net.2" in key:
key = key.replace("ff.net.2", "linear3")
if "ff.net.0" in key:
k1 = key.replace("ff.net.0.proj", "linear1")
k2 = key.replace("ff.net.0.proj", "linear2")
v1, v2 = np.split(value, 2)
return [(k1, _from_numpy(v1)), (k2, _from_numpy(v2))]
if "conv_shortcut.weight" in key:
value = value.squeeze()
# Transform the weights from 1x1 convs to linear
if len(value.shape) == 4 and ("proj_in" in key or "proj_out" in key):
value = value.squeeze()
if len(value.shape) == 4:
value = value.transpose(0, 2, 3, 1)
return [(key, _from_numpy(value))]
def map_clip_text_encoder_weights(key, value):
# Remove prefixes
if key.startswith("text_model."):
key = key[11:]
if key.startswith("embeddings."):
key = key[11:]
if key.startswith("encoder."):
key = key[8:]
# Map attention layers
if "self_attn." in key:
key = key.replace("self_attn.", "attention.")
if "q_proj." in key:
key = key.replace("q_proj.", "query_proj.")
if "k_proj." in key:
key = key.replace("k_proj.", "key_proj.")
if "v_proj." in key:
key = key.replace("v_proj.", "value_proj.")
# Map ffn layers
if "mlp.fc1" in key:
key = key.replace("mlp.fc1", "linear1")
if "mlp.fc2" in key:
key = key.replace("mlp.fc2", "linear2")
return [(key, _from_numpy(value))]
def map_vae_weights(key, value):
# Map up/downsampling
if "downsamplers" in key:
key = key.replace("downsamplers.0.conv", "downsample")
if "upsamplers" in key:
key = key.replace("upsamplers.0.conv", "upsample")
# Map attention layers
if "to_k" in key:
key = key.replace("to_k", "key_proj")
if "to_out.0" in key:
key = key.replace("to_out.0", "out_proj")
if "to_q" in key:
key = key.replace("to_q", "query_proj")
if "to_v" in key:
key = key.replace("to_v", "value_proj")
# Map the mid block
if "mid_block.resnets.0" in key:
key = key.replace("mid_block.resnets.0", "mid_blocks.0")
if "mid_block.attentions.0" in key:
key = key.replace("mid_block.attentions.0", "mid_blocks.1")
if "mid_block.resnets.1" in key:
key = key.replace("mid_block.resnets.1", "mid_blocks.2")
# Map the quant/post_quant layers
if "quant_conv" in key:
key = key.replace("quant_conv", "quant_proj")
value = value.squeeze()
# Map the conv_shortcut to linear
if "conv_shortcut.weight" in key:
value = value.squeeze()
if len(value.shape) == 4:
value = value.transpose(0, 2, 3, 1)
return [(key, _from_numpy(value))]
def _flatten(params):
return [(k, v) for p in params for (k, v) in p]
def _load_safetensor_weights(mapper, model, weight_file, float16: bool = False):
dtype = np.float16 if float16 else np.float32
with safetensor_open(weight_file, framework="numpy") as f:
weights = _flatten([mapper(k, f.get_tensor(k).astype(dtype)) for k in f.keys()])
model.update(tree_unflatten(weights))
def _check_key(key: str, part: str):
if key not in _MODELS:
raise ValueError(
f"[{part}] '{key}' model not found, choose one of {{{','.join(_MODELS.keys())}}}"
)
def load_unet(key: str = _DEFAULT_MODEL, float16: bool = False):
"""Load the stable diffusion UNet from Hugging Face Hub."""
_check_key(key, "load_unet")
# Download the config and create the model
unet_config = _MODELS[key]["unet_config"]
with open(hf_hub_download(key, unet_config)) as f:
config = json.load(f)
n_blocks = len(config["block_out_channels"])
model = UNetModel(
UNetConfig(
in_channels=config["in_channels"],
out_channels=config["out_channels"],
block_out_channels=config["block_out_channels"],
layers_per_block=[config["layers_per_block"]] * n_blocks,
num_attention_heads=[config["attention_head_dim"]] * n_blocks
if isinstance(config["attention_head_dim"], int)
else config["attention_head_dim"],
cross_attention_dim=[config["cross_attention_dim"]] * n_blocks,
norm_num_groups=config["norm_num_groups"],
)
)
# Download the weights and map them into the model
unet_weights = _MODELS[key]["unet"]
weight_file = hf_hub_download(key, unet_weights)
_load_safetensor_weights(map_unet_weights, model, weight_file, float16)
return model
def load_text_encoder(key: str = _DEFAULT_MODEL, float16: bool = False):
"""Load the stable diffusion text encoder from Hugging Face Hub."""
_check_key(key, "load_text_encoder")
# Download the config and create the model
text_encoder_config = _MODELS[key]["text_encoder_config"]
with open(hf_hub_download(key, text_encoder_config)) as f:
config = json.load(f)
model = CLIPTextModel(
CLIPTextModelConfig(
num_layers=config["num_hidden_layers"],
model_dims=config["hidden_size"],
num_heads=config["num_attention_heads"],
max_length=config["max_position_embeddings"],
vocab_size=config["vocab_size"],
)
)
# Download the weights and map them into the model
text_encoder_weights = _MODELS[key]["text_encoder"]
weight_file = hf_hub_download(key, text_encoder_weights)
_load_safetensor_weights(map_clip_text_encoder_weights, model, weight_file, float16)
return model
def load_autoencoder(key: str = _DEFAULT_MODEL, float16: bool = False):
"""Load the stable diffusion autoencoder from Hugging Face Hub."""
_check_key(key, "load_autoencoder")
# Download the config and create the model
vae_config = _MODELS[key]["vae_config"]
with open(hf_hub_download(key, vae_config)) as f:
config = json.load(f)
model = Autoencoder(
AutoencoderConfig(
in_channels=config["in_channels"],
out_channels=config["out_channels"],
latent_channels_out=2 * config["latent_channels"],
latent_channels_in=config["latent_channels"],
block_out_channels=config["block_out_channels"],
layers_per_block=config["layers_per_block"],
norm_num_groups=config["norm_num_groups"],
)
)
# Download the weights and map them into the model
vae_weights = _MODELS[key]["vae"]
weight_file = hf_hub_download(key, vae_weights)
_load_safetensor_weights(map_vae_weights, model, weight_file, float16)
return model
def load_diffusion_config(key: str = _DEFAULT_MODEL):
"""Load the stable diffusion config from Hugging Face Hub."""
_check_key(key, "load_diffusion_config")
diffusion_config = _MODELS[key]["diffusion_config"]
with open(hf_hub_download(key, diffusion_config)) as f:
config = json.load(f)
return DiffusionConfig(
beta_start=config["beta_start"],
beta_end=config["beta_end"],
beta_schedule=config["beta_schedule"],
num_train_steps=config["num_train_timesteps"],
)
def load_tokenizer(key: str = _DEFAULT_MODEL):
_check_key(key, "load_tokenizer")
vocab_file = hf_hub_download(key, _MODELS[key]["tokenizer_vocab"])
with open(vocab_file, encoding="utf-8") as f:
vocab = json.load(f)
merges_file = hf_hub_download(key, _MODELS[key]["tokenizer_merges"])
with open(merges_file, encoding="utf-8") as f:
bpe_merges = f.read().strip().split("\n")[1 : 49152 - 256 - 2 + 1]
bpe_merges = [tuple(m.split()) for m in bpe_merges]
bpe_ranks = dict(map(reversed, enumerate(bpe_merges)))
return Tokenizer(bpe_ranks, vocab)

View File

@@ -0,0 +1,74 @@
# Copyright © 2023 Apple Inc.
from .config import DiffusionConfig
import mlx.core as mx
def _linspace(a, b, num):
x = mx.arange(0, num) / (num - 1)
return (b - a) * x + a
def _interp(y, x_new):
"""Interpolate the function defined by (arange(0, len(y)), y) at positions x_new."""
x_low = x_new.astype(mx.int32)
x_high = mx.minimum(x_low + 1, len(y) - 1)
y_low = y[x_low]
y_high = y[x_high]
delta_x = x_new - x_low
y_new = y_low * (1 - delta_x) + delta_x * y_high
return y_new
class SimpleEulerSampler:
"""A simple Euler integrator that can be used to sample from our diffusion models.
The method ``step()`` performs one Euler step from x_t to x_t_prev.
"""
def __init__(self, config: DiffusionConfig):
# Compute the noise schedule
if config.beta_schedule == "linear":
betas = _linspace(
config.beta_start, config.beta_end, config.num_train_steps
)
elif config.beta_schedule == "scaled_linear":
betas = _linspace(
config.beta_start**0.5, config.beta_end**0.5, config.num_train_steps
).square()
else:
raise NotImplementedError(f"{config.beta_schedule} is not implemented.")
alphas = 1 - betas
alphas_cumprod = mx.cumprod(alphas)
self._sigmas = mx.concatenate(
[mx.zeros(1), ((1 - alphas_cumprod) / alphas_cumprod).sqrt()]
)
def sample_prior(self, shape, dtype=mx.float32, key=None):
noise = mx.random.normal(shape, key=key)
return (
noise * self._sigmas[-1] * (self._sigmas[-1].square() + 1).rsqrt()
).astype(dtype)
def sigmas(self, t):
return _interp(self._sigmas, t)
def timesteps(self, num_steps: int, dtype=mx.float32):
steps = _linspace(len(self._sigmas) - 1, 0, num_steps + 1).astype(dtype)
return list(zip(steps, steps[1:]))
def step(self, eps_pred, x_t, t, t_prev):
sigma = self.sigmas(t).astype(eps_pred.dtype)
sigma_prev = self.sigmas(t_prev).astype(eps_pred.dtype)
dt = sigma_prev - sigma
x_t_prev = (sigma.square() + 1).sqrt() * x_t + eps_pred * dt
x_t_prev = x_t_prev * (sigma_prev.square() + 1).rsqrt()
return x_t_prev

View File

@@ -0,0 +1,100 @@
# Copyright © 2023 Apple Inc.
import regex
class Tokenizer:
"""A simple port of CLIPTokenizer from https://github.com/huggingface/transformers/ ."""
def __init__(self, bpe_ranks, vocab):
self.bpe_ranks = bpe_ranks
self.vocab = vocab
self.pat = regex.compile(
r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""",
regex.IGNORECASE,
)
self._cache = {self.bos: self.bos, self.eos: self.eos}
@property
def bos(self):
return "<|startoftext|>"
@property
def bos_token(self):
return self.vocab[self.bos]
@property
def eos(self):
return "<|endoftext|>"
@property
def eos_token(self):
return self.vocab[self.eos]
def bpe(self, text):
if text in self._cache:
return self._cache[text]
unigrams = list(text[:-1]) + [text[-1] + "</w>"]
unique_bigrams = set(zip(unigrams, unigrams[1:]))
if not unique_bigrams:
return unigrams
# In every iteration try to merge the two most likely bigrams. If none
# was merged we are done.
#
# Ported from https://github.com/huggingface/transformers/blob/main/src/transformers/models/clip/tokenization_clip.py
while unique_bigrams:
bigram = min(
unique_bigrams, key=lambda pair: self.bpe_ranks.get(pair, float("inf"))
)
if bigram not in self.bpe_ranks:
break
new_unigrams = []
skip = False
for a, b in zip(unigrams, unigrams[1:]):
if skip:
skip = False
continue
if (a, b) == bigram:
new_unigrams.append(a + b)
skip = True
else:
new_unigrams.append(a)
if not skip:
new_unigrams.append(b)
unigrams = new_unigrams
unique_bigrams = set(zip(unigrams, unigrams[1:]))
self._cache[text] = unigrams
return unigrams
def tokenize(self, text, prepend_bos=True, append_eos=True):
if isinstance(text, list):
return [self.tokenize(t, prepend_bos, append_eos) for t in text]
# Lower case cleanup and split according to self.pat. Hugging Face does
# a much more thorough job here but this should suffice for 95% of
# cases.
clean_text = regex.sub(r"\s+", " ", text.lower())
tokens = regex.findall(self.pat, clean_text)
# Split the tokens according to the byte-pair merge file
bpe_tokens = [ti for t in tokens for ti in self.bpe(t)]
# Map to token ids and return
tokens = [self.vocab[t] for t in bpe_tokens]
if prepend_bos:
tokens = [self.bos_token] + tokens
if append_eos:
tokens.append(self.eos_token)
return tokens

View File

@@ -0,0 +1,425 @@
# Copyright © 2023 Apple Inc.
import math
from typing import Optional
import mlx.core as mx
import mlx.nn as nn
from .config import UNetConfig
def upsample_nearest(x, scale: int = 2):
B, H, W, C = x.shape
x = mx.broadcast_to(x[:, :, None, :, None, :], (B, H, scale, W, scale, C))
x = x.reshape(B, H * scale, W * scale, C)
return x
class TimestepEmbedding(nn.Module):
def __init__(self, in_channels: int, time_embed_dim: int):
super().__init__()
self.linear_1 = nn.Linear(in_channels, time_embed_dim)
self.linear_2 = nn.Linear(time_embed_dim, time_embed_dim)
def __call__(self, x):
x = self.linear_1(x)
x = nn.silu(x)
x = self.linear_2(x)
return x
class TransformerBlock(nn.Module):
def __init__(
self,
model_dims: int,
num_heads: int,
hidden_dims: Optional[int] = None,
memory_dims: Optional[int] = None,
):
super().__init__()
self.norm1 = nn.LayerNorm(model_dims)
self.attn1 = nn.MultiHeadAttention(model_dims, num_heads)
self.attn1.out_proj.bias = mx.zeros(model_dims)
memory_dims = memory_dims or model_dims
self.norm2 = nn.LayerNorm(model_dims)
self.attn2 = nn.MultiHeadAttention(
model_dims, num_heads, key_input_dims=memory_dims
)
self.attn2.out_proj.bias = mx.zeros(model_dims)
hidden_dims = hidden_dims or 4 * model_dims
self.norm3 = nn.LayerNorm(model_dims)
self.linear1 = nn.Linear(model_dims, hidden_dims)
self.linear2 = nn.Linear(model_dims, hidden_dims)
self.linear3 = nn.Linear(hidden_dims, model_dims)
def __call__(self, x, memory, attn_mask, memory_mask):
# Self attention
y = self.norm1(x)
y = self.attn1(y, y, y, attn_mask)
x = x + y
# Cross attention
y = self.norm2(x)
y = self.attn2(y, memory, memory, memory_mask)
x = x + y
# FFN
y = self.norm3(x)
y_a = self.linear1(y)
y_b = self.linear2(y)
y = y_a * nn.gelu_approx(y_b) # approximate gelu?
y = self.linear3(y)
x = x + y
return x
class Transformer2D(nn.Module):
"""A transformer model for inputs with 2 spatial dimensions."""
def __init__(
self,
in_channels: int,
model_dims: int,
encoder_dims: int,
num_heads: int,
num_layers: int = 1,
norm_num_groups: int = 32,
):
super().__init__()
self.norm = nn.GroupNorm(norm_num_groups, in_channels, pytorch_compatible=True)
self.proj_in = nn.Linear(in_channels, model_dims)
self.transformer_blocks = [
TransformerBlock(model_dims, num_heads, memory_dims=encoder_dims)
for i in range(num_layers)
]
self.proj_out = nn.Linear(model_dims, in_channels)
def __call__(self, x, encoder_x, attn_mask, encoder_attn_mask):
# Save the input to add to the output
input_x = x
# Perform the input norm and projection
B, H, W, C = x.shape
x = self.norm(x).reshape(B, -1, C)
x = self.proj_in(x)
# Apply the transformer
for block in self.transformer_blocks:
x = block(x, encoder_x, attn_mask, encoder_attn_mask)
# Apply the output projection and reshape
x = self.proj_out(x)
x = x.reshape(B, H, W, C)
return x + input_x
class ResnetBlock2D(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: Optional[int] = None,
groups: int = 32,
temb_channels: Optional[int] = None,
):
super().__init__()
out_channels = out_channels or in_channels
self.norm1 = nn.GroupNorm(groups, in_channels, pytorch_compatible=True)
self.conv1 = nn.Conv2d(
in_channels, out_channels, kernel_size=3, stride=1, padding=1
)
if temb_channels is not None:
self.time_emb_proj = nn.Linear(temb_channels, out_channels)
self.norm2 = nn.GroupNorm(groups, out_channels, pytorch_compatible=True)
self.conv2 = nn.Conv2d(
out_channels, out_channels, kernel_size=3, stride=1, padding=1
)
if in_channels != out_channels:
self.conv_shortcut = nn.Linear(in_channels, out_channels)
def __call__(self, x, temb=None):
if temb is not None:
temb = self.time_emb_proj(nn.silu(temb))
y = self.norm1(x)
y = nn.silu(y)
y = self.conv1(y)
if temb is not None:
y = y + temb[:, None, None, :]
y = self.norm2(y)
y = nn.silu(y)
y = self.conv2(y)
x = y + (x if "conv_shortcut" not in self else self.conv_shortcut(x))
return x
class UNetBlock2D(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
temb_channels: int,
prev_out_channels: Optional[int] = None,
num_layers: int = 1,
transformer_layers_per_block: int = 1,
num_attention_heads: int = 8,
cross_attention_dim=1280,
resnet_groups: int = 32,
add_downsample=True,
add_upsample=True,
add_cross_attention=True,
):
super().__init__()
# Prepare the in channels list for the resnets
if prev_out_channels is None:
in_channels_list = [in_channels] + [out_channels] * (num_layers - 1)
else:
in_channels_list = [prev_out_channels] + [out_channels] * (num_layers - 1)
res_channels_list = [out_channels] * (num_layers - 1) + [in_channels]
in_channels_list = [
a + b for a, b in zip(in_channels_list, res_channels_list)
]
# Add resnet blocks that also process the time embedding
self.resnets = [
ResnetBlock2D(
in_channels=ic,
out_channels=out_channels,
temb_channels=temb_channels,
groups=resnet_groups,
)
for ic in in_channels_list
]
# Add optional cross attention layers
if add_cross_attention:
self.attentions = [
Transformer2D(
in_channels=out_channels,
model_dims=out_channels,
num_heads=num_attention_heads,
num_layers=transformer_layers_per_block,
encoder_dims=cross_attention_dim,
)
for i in range(num_layers)
]
# Add an optional downsampling layer
if add_downsample:
self.downsample = nn.Conv2d(
out_channels, out_channels, kernel_size=3, stride=2, padding=1
)
# or upsampling layer
if add_upsample:
self.upsample = nn.Conv2d(
out_channels, out_channels, kernel_size=3, stride=1, padding=1
)
def __call__(
self,
x,
encoder_x=None,
temb=None,
attn_mask=None,
encoder_attn_mask=None,
residual_hidden_states=None,
):
output_states = []
for i in range(len(self.resnets)):
if residual_hidden_states is not None:
x = mx.concatenate([x, residual_hidden_states.pop()], axis=-1)
x = self.resnets[i](x, temb)
if "attentions" in self:
x = self.attentions[i](x, encoder_x, attn_mask, encoder_attn_mask)
output_states.append(x)
if "downsample" in self:
x = self.downsample(x)
output_states.append(x)
if "upsample" in self:
x = self.upsample(upsample_nearest(x))
output_states.append(x)
return x, output_states
class UNetModel(nn.Module):
"""The conditional 2D UNet model that actually performs the denoising."""
def __init__(self, config: UNetConfig):
super().__init__()
self.conv_in = nn.Conv2d(
config.in_channels,
config.block_out_channels[0],
config.conv_in_kernel,
padding=(config.conv_in_kernel - 1) // 2,
)
self.timesteps = nn.SinusoidalPositionalEncoding(
config.block_out_channels[0],
max_freq=1,
min_freq=math.exp(
-math.log(10000) + 2 * math.log(10000) / config.block_out_channels[0]
),
scale=1.0,
cos_first=True,
full_turns=False,
)
self.time_embedding = TimestepEmbedding(
config.block_out_channels[0],
config.block_out_channels[0] * 4,
)
# Make the downsampling blocks
block_channels = [config.block_out_channels[0]] + list(
config.block_out_channels
)
self.down_blocks = [
UNetBlock2D(
in_channels=in_channels,
out_channels=out_channels,
temb_channels=config.block_out_channels[0] * 4,
num_layers=config.layers_per_block[i],
transformer_layers_per_block=config.transformer_layers_per_block[i],
num_attention_heads=config.num_attention_heads[i],
cross_attention_dim=config.cross_attention_dim[i],
resnet_groups=config.norm_num_groups,
add_downsample=(i < len(config.block_out_channels) - 1),
add_upsample=False,
add_cross_attention=(i < len(config.block_out_channels) - 1),
)
for i, (in_channels, out_channels) in enumerate(
zip(block_channels, block_channels[1:])
)
]
# Make the middle block
self.mid_blocks = [
ResnetBlock2D(
in_channels=config.block_out_channels[-1],
out_channels=config.block_out_channels[-1],
temb_channels=config.block_out_channels[0] * 4,
groups=config.norm_num_groups,
),
Transformer2D(
in_channels=config.block_out_channels[-1],
model_dims=config.block_out_channels[-1],
num_heads=config.num_attention_heads[-1],
num_layers=config.transformer_layers_per_block[-1],
encoder_dims=config.cross_attention_dim[-1],
),
ResnetBlock2D(
in_channels=config.block_out_channels[-1],
out_channels=config.block_out_channels[-1],
temb_channels=config.block_out_channels[0] * 4,
groups=config.norm_num_groups,
),
]
# Make the upsampling blocks
block_channels = (
[config.block_out_channels[0]]
+ list(config.block_out_channels)
+ [config.block_out_channels[-1]]
)
self.up_blocks = [
UNetBlock2D(
in_channels=in_channels,
out_channels=out_channels,
temb_channels=config.block_out_channels[0] * 4,
prev_out_channels=prev_out_channels,
num_layers=config.layers_per_block[i] + 1,
transformer_layers_per_block=config.transformer_layers_per_block[i],
num_attention_heads=config.num_attention_heads[i],
cross_attention_dim=config.cross_attention_dim[i],
resnet_groups=config.norm_num_groups,
add_downsample=False,
add_upsample=(i > 0),
add_cross_attention=(i < len(config.block_out_channels) - 1),
)
for i, (in_channels, out_channels, prev_out_channels) in reversed(
list(
enumerate(
zip(block_channels, block_channels[1:], block_channels[2:])
)
)
)
]
self.conv_norm_out = nn.GroupNorm(
config.norm_num_groups,
config.block_out_channels[0],
pytorch_compatible=True,
)
self.conv_out = nn.Conv2d(
config.block_out_channels[0],
config.out_channels,
config.conv_out_kernel,
padding=(config.conv_out_kernel - 1) // 2,
)
def __call__(self, x, timestep, encoder_x, attn_mask=None, encoder_attn_mask=None):
# Compute the time embeddings
temb = self.timesteps(timestep).astype(x.dtype)
temb = self.time_embedding(temb)
# Preprocess the input
x = self.conv_in(x)
# Run the downsampling part of the unet
residuals = [x]
for block in self.down_blocks:
x, res = block(
x,
encoder_x=encoder_x,
temb=temb,
attn_mask=attn_mask,
encoder_attn_mask=encoder_attn_mask,
)
residuals.extend(res)
# Run the middle part of the unet
x = self.mid_blocks[0](x, temb)
x = self.mid_blocks[1](x, encoder_x, attn_mask, encoder_attn_mask)
x = self.mid_blocks[2](x, temb)
# Run the upsampling part of the unet
for block in self.up_blocks:
x, _ = block(
x,
encoder_x=encoder_x,
temb=temb,
attn_mask=attn_mask,
encoder_attn_mask=encoder_attn_mask,
residual_hidden_states=residuals,
)
# Postprocess the output
x = self.conv_norm_out(x)
x = nn.silu(x)
x = self.conv_out(x)
return x

View File

@@ -0,0 +1,268 @@
# Copyright © 2023 Apple Inc.
import math
from typing import List
import mlx.core as mx
import mlx.nn as nn
from .config import AutoencoderConfig
from .unet import ResnetBlock2D, upsample_nearest
class Attention(nn.Module):
"""A single head unmasked attention for use with the VAE."""
def __init__(self, dims: int, norm_groups: int = 32):
super().__init__()
self.group_norm = nn.GroupNorm(norm_groups, dims, pytorch_compatible=True)
self.query_proj = nn.Linear(dims, dims)
self.key_proj = nn.Linear(dims, dims)
self.value_proj = nn.Linear(dims, dims)
self.out_proj = nn.Linear(dims, dims)
def __call__(self, x):
B, H, W, C = x.shape
y = self.group_norm(x)
queries = self.query_proj(y).reshape(B, H * W, C)
keys = self.key_proj(y).reshape(B, H * W, C)
values = self.value_proj(y).reshape(B, H * W, C)
scale = 1 / math.sqrt(queries.shape[-1])
scores = (queries * scale) @ keys.transpose(0, 2, 1)
attn = mx.softmax(scores, axis=-1)
y = (attn @ values).reshape(B, H, W, C)
y = self.out_proj(y)
x = x + y
return x
class EncoderDecoderBlock2D(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
num_layers: int = 1,
resnet_groups: int = 32,
add_downsample=True,
add_upsample=True,
):
super().__init__()
# Add the resnet blocks
self.resnets = [
ResnetBlock2D(
in_channels=in_channels if i == 0 else out_channels,
out_channels=out_channels,
groups=resnet_groups,
)
for i in range(num_layers)
]
# Add an optional downsampling layer
if add_downsample:
self.downsample = nn.Conv2d(
out_channels, out_channels, kernel_size=3, stride=2, padding=1
)
# or upsampling layer
if add_upsample:
self.upsample = nn.Conv2d(
out_channels, out_channels, kernel_size=3, stride=1, padding=1
)
def __call__(self, x):
for resnet in self.resnets:
x = resnet(x)
if "downsample" in self:
x = self.downsample(x)
if "upsample" in self:
x = self.upsample(upsample_nearest(x))
return x
class Encoder(nn.Module):
"""Implements the encoder side of the Autoencoder."""
def __init__(
self,
in_channels: int,
out_channels: int,
block_out_channels: List[int] = [64],
layers_per_block: int = 2,
resnet_groups: int = 32,
):
super().__init__()
self.conv_in = nn.Conv2d(
in_channels, block_out_channels[0], kernel_size=3, stride=1, padding=1
)
channels = [block_out_channels[0]] + list(block_out_channels)
self.down_blocks = [
EncoderDecoderBlock2D(
in_channels,
out_channels,
num_layers=layers_per_block,
resnet_groups=resnet_groups,
add_downsample=i < len(block_out_channels) - 1,
add_upsample=False,
)
for i, (in_channels, out_channels) in enumerate(zip(channels, channels[1:]))
]
self.mid_blocks = [
ResnetBlock2D(
in_channels=block_out_channels[-1],
out_channels=block_out_channels[-1],
groups=resnet_groups,
),
Attention(block_out_channels[-1], resnet_groups),
ResnetBlock2D(
in_channels=block_out_channels[-1],
out_channels=block_out_channels[-1],
groups=resnet_groups,
),
]
self.conv_norm_out = nn.GroupNorm(
resnet_groups, block_out_channels[-1], pytorch_compatible=True
)
self.conv_out = nn.Conv2d(block_out_channels[-1], out_channels, 3, padding=1)
def __call__(self, x):
x = self.conv_in(x)
for l in self.down_blocks:
x = l(x)
x = self.mid_blocks[0](x)
x = self.mid_blocks[1](x)
x = self.mid_blocks[2](x)
x = self.conv_norm_out(x)
x = nn.silu(x)
x = self.conv_out(x)
return x
class Decoder(nn.Module):
"""Implements the decoder side of the Autoencoder."""
def __init__(
self,
in_channels: int,
out_channels: int,
block_out_channels: List[int] = [64],
layers_per_block: int = 2,
resnet_groups: int = 32,
):
super().__init__()
self.conv_in = nn.Conv2d(
in_channels, block_out_channels[-1], kernel_size=3, stride=1, padding=1
)
self.mid_blocks = [
ResnetBlock2D(
in_channels=block_out_channels[-1],
out_channels=block_out_channels[-1],
groups=resnet_groups,
),
Attention(block_out_channels[-1], resnet_groups),
ResnetBlock2D(
in_channels=block_out_channels[-1],
out_channels=block_out_channels[-1],
groups=resnet_groups,
),
]
channels = list(reversed(block_out_channels))
channels = [channels[0]] + channels
self.up_blocks = [
EncoderDecoderBlock2D(
in_channels,
out_channels,
num_layers=layers_per_block,
resnet_groups=resnet_groups,
add_downsample=False,
add_upsample=i < len(block_out_channels) - 1,
)
for i, (in_channels, out_channels) in enumerate(zip(channels, channels[1:]))
]
self.conv_norm_out = nn.GroupNorm(
resnet_groups, block_out_channels[0], pytorch_compatible=True
)
self.conv_out = nn.Conv2d(block_out_channels[0], out_channels, 3, padding=1)
def __call__(self, x):
x = self.conv_in(x)
x = self.mid_blocks[0](x)
x = self.mid_blocks[1](x)
x = self.mid_blocks[2](x)
for l in self.up_blocks:
x = l(x)
x = self.conv_norm_out(x)
x = nn.silu(x)
x = self.conv_out(x)
return x
class Autoencoder(nn.Module):
"""The autoencoder that allows us to perform diffusion in the latent space."""
def __init__(self, config: AutoencoderConfig):
super().__init__()
self.latent_channels = config.latent_channels_in
self.scaling_factor = config.scaling_factor
self.encoder = Encoder(
config.in_channels,
config.latent_channels_out,
config.block_out_channels,
config.layers_per_block,
resnet_groups=config.norm_num_groups,
)
self.decoder = Decoder(
config.latent_channels_in,
config.out_channels,
config.block_out_channels,
config.layers_per_block + 1,
resnet_groups=config.norm_num_groups,
)
self.quant_proj = nn.Linear(
config.latent_channels_out, config.latent_channels_out
)
self.post_quant_proj = nn.Linear(
config.latent_channels_in, config.latent_channels_in
)
def decode(self, z):
return self.decoder(self.post_quant_proj(z))
def __call__(self, x, key=None):
x = self.encoder(x)
x = self.quant_proj(x)
mean, logvar = x.split(2, axis=-1)
std = mx.exp(0.5 * logvar)
z = mx.random.normal(mean.shape, key=key) * std + mean
x_hat = self.decode(z)
return dict(x_hat=x_hat, z=z, mean=mean, logvar=logvar)

View File

@@ -0,0 +1,17 @@
#Create an account with a lnbits instance of your choice, add the admin key and id here. This account will be used to create a new lnbits wallet for each dvm/bot
LNBITS_ADMIN_KEY = ""
LNBITS_ADMIN_ID = ""
LNBITS_HOST = "https://lnbits.com" #Use your own/a trusted instance ideally.
# In order to create a zappable lightning address, host nostdress on your domain or use this preinstalled domain.
# We will use the api to create and manage zapable lightning addresses
NOSTDRESS_DOMAIN = "nostrdvm.com"
#Backend Specific Options for tasks that require them. A DVM needing these should only be started if these are set.
OPENAI_API_KEY = "" # Enter your OpenAI API Key to use DVMs with OpenAI services
# We will automatically create dtags and private keys based on the identifier variable in main.
# If your DVM already has a dtag and private key you can replace it here before publishing the DTAG to not create a new one.
# The name and NIP90 info of the DVM can be changed but the identifier must stay the same in order to not create a different dtag.
# We will also create new wallets on the given lnbits instance for each dvm. If you want to use an existing wallet, you can replace the parameters here as well.
# Make sure you backup this file to keep access to your wallets

View File

@@ -0,0 +1,26 @@
# NostrAI: Nostr NIP90 Data Vending Machine Framework Example
Projects in this folder contain ready-to-use DVMs. To tun the DVM following the next steps:
## To get started:
- Install Python 3.10
Create a new venv in this directory by opening the terminal here, or navigate to this directory and type: `"python -m venv venv"`
- Place .env file (based on .env_example) in this folder.
- Recommended but optional:
- Create a `LNbits` account on an accessible instance of your choice, enter one account's id and admin key (this account will create other accounts for the dvms) Open the .env file and enter this info to `LNBITS_ADMIN_KEY`, `LNBITS_ADMIN_ID`, `LNBITS_HOST`.
- If you are running an own instance of `Nostdress` enter `NOSTDRESS_DOMAIN` or use the default one.
- Activate the venv with
- MacOS/Linux: source ./venv/bin/activate
- Windows: .\venv\Scripts\activate
- Type: `pip install nostr-dvm`
- Run `python3 main.py` (or python main.py)
- The framework will then automatically create keys, nip89 tags and zapable NIP57 `lightning addresses` for your dvms in this file.
- Check the .env file if these values look correct.
- Check the `main.py` file. You can update the image/description/name of your DVM before announcing it.
- You can then in main.py set `admin_config.REBROADCAST_NIP89` and
`admin_config.UPDATE_PROFILE` to `True` to announce the NIP89 info and update the npubs profile automatically.
- After this was successful you can set these back to False until the next time you want to update the NIP89 or profile.
You are now running your own DVM.

View File

@@ -0,0 +1,54 @@
import json
from pathlib import Path
import dotenv
from nostr_dvm.tasks.textgeneration_llmlite import TextGenerationLLMLite
from nostr_dvm.utils.admin_utils import AdminConfig
from nostr_dvm.utils.dvmconfig import build_default_config
from nostr_dvm.utils.nip89_utils import NIP89Config, check_and_set_d_tag
def main():
identifier = "llama2"
name = "Ollama"
dvm_config = build_default_config(identifier)
admin_config = AdminConfig()
admin_config.REBROADCAST_NIP89 = False
admin_config.UPDATE_PROFILE = False
admin_config.LUD16 = dvm_config.LN_ADDRESS
options = {'default_model': "ollama/llama2", 'server': "http://localhost:11434"}
nip89info = {
"name": name,
"image": "https://image.nostr.build/c33ca6fc4cc038ca4adb46fdfdfda34951656f87ee364ef59095bae1495ce669.jpg",
"about": "I use a LLM connected via OLLAMA",
"encryptionSupported": True,
"cashuAccepted": True,
"nip90Params": {
}
}
nip89config = NIP89Config()
nip89config.DTAG = check_and_set_d_tag(identifier, name, dvm_config.PRIVATE_KEY, nip89info["image"])
nip89config.CONTENT = json.dumps(nip89info)
ollama = TextGenerationLLMLite(name=name, dvm_config=dvm_config, nip89config=nip89config, admin_config=admin_config,
options=options)
ollama.run()
if __name__ == '__main__':
env_path = Path('.env')
if not env_path.is_file():
with open('.env', 'w') as f:
print("Writing new .env file")
f.write('')
if env_path.is_file():
print(f'loading environment from {env_path.resolve()}')
dotenv.load_dotenv(env_path, verbose=True, override=True)
else:
raise FileNotFoundError(f'.env file not found at {env_path} ')
main()

View File

@@ -0,0 +1,94 @@
import json
import time
from pathlib import Path
from threading import Thread
import dotenv
from nostr_sdk import Keys, Client, Tag, EventBuilder, Filter, HandleNotification, Timestamp, nip04_decrypt
from nostr_dvm.utils.dvmconfig import DVMConfig
from nostr_dvm.utils.nostr_utils import send_event, check_and_set_private_key
from nostr_dvm.utils.definitions import EventDefinitions
def nostr_client_test_llm(prompt):
keys = Keys.from_sk_str(check_and_set_private_key("test_client"))
iTag = Tag.parse(["i", prompt, "text"])
relaysTag = Tag.parse(['relays', "wss://relay.damus.io", "wss://blastr.f7z.xyz", "wss://relayable.org",
"wss://nostr-pub.wellorder.net"])
alttag = Tag.parse(["alt", "This is a NIP90 DVM AI task to generate TTSt"])
event = EventBuilder(EventDefinitions.KIND_NIP90_GENERATE_TEXT, str("Generate an Audio File."),
[iTag, relaysTag, alttag]).to_event(keys)
relay_list = ["wss://relay.damus.io", "wss://blastr.f7z.xyz", "wss://relayable.org",
"wss://nostr-pub.wellorder.net"]
client = Client(keys)
for relay in relay_list:
client.add_relay(relay)
client.connect()
config = DVMConfig
send_event(event, client=client, dvm_config=config)
return event.as_json()
def nostr_client():
keys = Keys.from_sk_str(check_and_set_private_key("test_client"))
sk = keys.secret_key()
pk = keys.public_key()
print(f"Nostr Test Client public key: {pk.to_bech32()}, Hex: {pk.to_hex()} ")
client = Client(keys)
dvmconfig = DVMConfig()
for relay in dvmconfig.RELAY_LIST:
client.add_relay(relay)
client.connect()
dm_zap_filter = Filter().pubkey(pk).kinds([EventDefinitions.KIND_DM,
EventDefinitions.KIND_ZAP]).since(
Timestamp.now()) # events to us specific
dvm_filter = (Filter().kinds([EventDefinitions.KIND_NIP90_RESULT_GENERATE_TEXT,
EventDefinitions.KIND_FEEDBACK]).since(Timestamp.now())) # public events
client.subscribe([dm_zap_filter, dvm_filter])
nostr_client_test_llm("Tell me a joke about a purple Ostrich!")
print("Sending Job Request")
#nostr_client_test_image_private("a beautiful ostrich watching the sunset")
class NotificationHandler(HandleNotification):
def handle(self, relay_url, event):
print(f"Received new event from {relay_url}: {event.as_json()}")
if event.kind() == 7000:
print("[Nostr Client]: " + event.as_json())
elif 6000 < event.kind() < 6999:
print("[Nostr Client]: " + event.as_json())
print("[Nostr Client]: " + event.content())
elif event.kind() == 4:
dec_text = nip04_decrypt(sk, event.pubkey(), event.content())
print("[Nostr Client]: " + f"Received new msg: {dec_text}")
elif event.kind() == 9735:
print("[Nostr Client]: " + f"Received new zap:")
print(event.as_json())
def handle_msg(self, relay_url, msg):
return
client.handle_notifications(NotificationHandler())
while True:
time.sleep(5.0)
if __name__ == '__main__':
env_path = Path('.env')
if env_path.is_file():
print(f'loading environment from {env_path.resolve()}')
dotenv.load_dotenv(env_path, verbose=True, override=True)
else:
raise FileNotFoundError(f'.env file not found at {env_path} ')
nostr_dvm_thread = Thread(target=nostr_client())
nostr_dvm_thread.start()

View File

@@ -0,0 +1,15 @@
#Create an account with a lnbits instance of your choice, add the admin key and id here. This account will be used to create a new lnbits wallet for each dvm/bot
LNBITS_ADMIN_KEY = ""
LNBITS_ADMIN_ID = ""
LNBITS_HOST = "https://lnbits.com" #Use your own/a trusted instance ideally.
# In order to create a zappable lightning address, host nostdress on your domain or use this preinstalled domain.
# We will use the api to create and manage zapable lightning addresses
NOSTDRESS_DOMAIN = "nostrdvm.com"
# We will automatically create dtags and private keys based on the identifier variable in main.
# If your DVM already has a dtag and private key you can replace it here before publishing the DTAG to not create a new one.
# The name and NIP90 info of the DVM can be changed but the identifier must stay the same in order to not create a different dtag.
# We will also create new wallets on the given lnbits instance for each dvm. If you want to use an existing wallet, you can replace the parameters here as well.
# Make sure you backup this file to keep access to your wallets

View File

@@ -0,0 +1,26 @@
# NostrAI: Nostr NIP90 Data Vending Machine Framework Example
Projects in this folder contain ready-to-use DVMs. To tun the DVM following the next steps:
## To get started:
- Install Python 3.10
Create a new venv in this directory by opening the terminal here, or navigate to this directory and type: `"python -m venv venv"`
- Place .env file (based on .env_example) in this folder.
- Recommended but optional:
- Create a `LNbits` account on an accessible instance of your choice, enter one account's id and admin key (this account will create other accounts for the dvms) Open the .env file and enter this info to `LNBITS_ADMIN_KEY`, `LNBITS_ADMIN_ID`, `LNBITS_HOST`.
- If you are running an own instance of `Nostdress` enter `NOSTDRESS_DOMAIN` or use the default one.
- Activate the venv with
- MacOS/Linux: source ./venv/bin/activate
- Windows: .\venv\Scripts\activate
- Type: `pip install nostr-dvm`
- Run `python3 main.py` (or python main.py)
- The framework will then automatically create keys, nip89 tags and zapable NIP57 `lightning addresses` for your dvms in this file.
- Check the .env file if these values look correct.
- Check the `main.py` file. You can update the image/description/name of your DVM before announcing it.
- You can then in main.py set `admin_config.REBROADCAST_NIP89` and
`admin_config.UPDATE_PROFILE` to `True` to announce the NIP89 info and update the npubs profile automatically.
- After this was successful you can set these back to False until the next time you want to update the NIP89 or profile.
You are now running your own DVM.

60
examples/tts_dvm/main.py Normal file
View File

@@ -0,0 +1,60 @@
import json
from pathlib import Path
import dotenv
from nostr_dvm.tasks.texttospeech import TextToSpeech
from nostr_dvm.utils.admin_utils import AdminConfig
from nostr_dvm.utils.dvmconfig import build_default_config
from nostr_dvm.utils.nip89_utils import NIP89Config, check_and_set_d_tag
def main():
identifier = "tts"
name = "Guy Swann Clone"
dvm_config = build_default_config(identifier)
admin_config = AdminConfig()
admin_config.REBROADCAST_NIP89 = False
admin_config.UPDATE_PROFILE = False
admin_config.LUD16 = dvm_config.LN_ADDRESS
# Use default file if paramter is empty, else overwrite with any local wav file
options = {'input_file': ""}
nip89info = {
"name": name,
"image": "https://image.nostr.build/c33ca6fc4cc038ca4adb46fdfdfda34951656f87ee364ef59095bae1495ce669.jpg",
"about": "I Generate Speech from Text",
"encryptionSupported": True,
"cashuAccepted": True,
"nip90Params": {
"language": {
"required": False,
"values": []
}
}
}
nip89config = NIP89Config()
nip89config.DTAG = check_and_set_d_tag(identifier, name, dvm_config.PRIVATE_KEY, nip89info["image"])
nip89config.CONTENT = json.dumps(nip89info)
tts = TextToSpeech(name=name,
dvm_config=dvm_config,
nip89config=nip89config,
admin_config=admin_config,
options=options)
tts.run()
if __name__ == '__main__':
env_path = Path('.env')
if not env_path.is_file():
with open('.env', 'w') as f:
print("Writing new .env file")
f.write('')
if env_path.is_file():
print(f'loading environment from {env_path.resolve()}')
dotenv.load_dotenv(env_path, verbose=True, override=True)
else:
raise FileNotFoundError(f'.env file not found at {env_path} ')
main()

View File

@@ -0,0 +1,98 @@
import json
import time
from pathlib import Path
from threading import Thread
import dotenv
from nostr_sdk import Keys, Client, Tag, EventBuilder, Filter, HandleNotification, Timestamp, nip04_decrypt
from nostr_dvm.utils.dvmconfig import DVMConfig
from nostr_dvm.utils.nostr_utils import send_event, check_and_set_private_key
from nostr_dvm.utils.definitions import EventDefinitions
def nostr_client_test_tts(prompt):
keys = Keys.from_sk_str(check_and_set_private_key("test_client"))
iTag = Tag.parse(["i", prompt, "text"])
paramTag1 = Tag.parse(["param", "language", "en"])
bidTag = Tag.parse(['bid', str(1000 * 1000), str(1000 * 1000)])
relaysTag = Tag.parse(['relays', "wss://relay.damus.io", "wss://blastr.f7z.xyz", "wss://relayable.org",
"wss://nostr-pub.wellorder.net"])
alttag = Tag.parse(["alt", "This is a NIP90 DVM AI task to generate TTSt"])
event = EventBuilder(EventDefinitions.KIND_NIP90_TEXT_TO_SPEECH, str("Generate an Audio File."),
[iTag, paramTag1, bidTag, relaysTag, alttag]).to_event(keys)
relay_list = ["wss://relay.damus.io", "wss://blastr.f7z.xyz", "wss://relayable.org",
"wss://nostr-pub.wellorder.net"]
client = Client(keys)
for relay in relay_list:
client.add_relay(relay)
client.connect()
config = DVMConfig
send_event(event, client=client, dvm_config=config)
return event.as_json()
def nostr_client():
keys = Keys.from_sk_str(check_and_set_private_key("test_client"))
sk = keys.secret_key()
pk = keys.public_key()
print(f"Nostr Test Client public key: {pk.to_bech32()}, Hex: {pk.to_hex()} ")
client = Client(keys)
dvmconfig = DVMConfig()
for relay in dvmconfig.RELAY_LIST:
client.add_relay(relay)
client.connect()
dm_zap_filter = Filter().pubkey(pk).kinds([EventDefinitions.KIND_DM,
EventDefinitions.KIND_ZAP]).since(
Timestamp.now()) # events to us specific
dvm_filter = (Filter().kinds([EventDefinitions.KIND_NIP90_RESULT_TEXT_TO_SPEECH,
EventDefinitions.KIND_FEEDBACK]).since(Timestamp.now())) # public events
client.subscribe([dm_zap_filter, dvm_filter])
nostr_client_test_tts("Hello, this is a test. Mic check one, two.")
print("Sending Job Request")
#nostr_client_test_image_private("a beautiful ostrich watching the sunset")
class NotificationHandler(HandleNotification):
def handle(self, relay_url, event):
print(f"Received new event from {relay_url}: {event.as_json()}")
if event.kind() == 7000:
print("[Nostr Client]: " + event.as_json())
elif 6000 < event.kind() < 6999:
print("[Nostr Client]: " + event.as_json())
print("[Nostr Client]: " + event.content())
elif event.kind() == 4:
dec_text = nip04_decrypt(sk, event.pubkey(), event.content())
print("[Nostr Client]: " + f"Received new msg: {dec_text}")
elif event.kind() == 9735:
print("[Nostr Client]: " + f"Received new zap:")
print(event.as_json())
def handle_msg(self, relay_url, msg):
return
client.handle_notifications(NotificationHandler())
while True:
time.sleep(5.0)
if __name__ == '__main__':
env_path = Path('.env')
if env_path.is_file():
print(f'loading environment from {env_path.resolve()}')
dotenv.load_dotenv(env_path, verbose=True, override=True)
else:
raise FileNotFoundError(f'.env file not found at {env_path} ')
nostr_dvm_thread = Thread(target=nostr_client())
nostr_dvm_thread.start()

20
main.py
View File

@@ -5,7 +5,8 @@ import dotenv
from nostr_dvm.bot import Bot
from nostr_dvm.tasks import videogeneration_replicate_svd, imagegeneration_replicate_sdxl, textgeneration_llmlite, \
trending_notes_nostrband, discovery_inactive_follows, translation_google, textextraction_pdf, \
translation_libretranslate, textextraction_google, convert_media, imagegeneration_openai_dalle
translation_libretranslate, textextraction_google, convert_media, imagegeneration_openai_dalle, texttospeech, \
imagegeneration_mlx, advanced_search, textextraction_whisper_mlx
from nostr_dvm.utils.admin_utils import AdminConfig
from nostr_dvm.utils.backend_utils import keep_alive
from nostr_dvm.utils.definitions import EventDefinitions
@@ -133,6 +134,19 @@ def playground():
ollama = textgeneration_llmlite.build_example("LLM", "llmlite", admin_config)
bot_config.SUPPORTED_DVMS.append(ollama)
ollama.run()
tts = texttospeech.build_example("Text To Speech Test", "tts", admin_config)
bot_config.SUPPORTED_DVMS.append(tts)
tts.run()
from sys import platform
if platform == "darwin":
# Test with MLX for OSX M1/M2/M3 chips
mlx = imagegeneration_mlx.build_example("SD with MLX", "mlx_sd", admin_config)
bot_config.SUPPORTED_DVMS.append(mlx)
mlx.run()
# Run the bot
Bot(bot_config)
# Keep the main function alive for libraries that require it, like openai
@@ -141,6 +155,10 @@ def playground():
if __name__ == '__main__':
env_path = Path('.env')
if not env_path.is_file():
with open('.env', 'w') as f:
print("Writing new .env file")
f.write('')
if env_path.is_file():
print(f'loading environment from {env_path.resolve()}')
dotenv.load_dotenv(env_path, verbose=True, override=True)

View File

@@ -43,9 +43,6 @@ def send_file_to_server(filepath, address):
return result
# headers = {'Content-type': 'application/x-www-form-urlencoded'}
"""
check_n_server_status(request_form, address)
Function that requests the status of the current process with the jobID (we use the Nostr event as jobID).

View File

@@ -1,5 +1,9 @@
import importlib
import json
import os
import subprocess
from datetime import timedelta
from pathlib import Path
from nostr_sdk import PublicKey, Keys, Client, Tag, Event, EventBuilder, Filter, HandleNotification, Timestamp, \
init_logger, LogLevel, Options, nip04_encrypt
@@ -323,7 +327,8 @@ class DVM:
except Exception as e:
# Zapping back by error in post-processing is a risk for the DVM because work has been done,
# but maybe something with parsing/uploading failed. Try to avoid errors here as good as possible
send_job_status_reaction(original_event, "error", content="Error in Post-processing: " + str(e),
send_job_status_reaction(original_event, "error",
content="Error in Post-processing: " + str(e),
dvm_config=self.dvm_config,
)
if amount > 0 and self.dvm_config.LNBITS_ADMIN_KEY != "":
@@ -466,11 +471,30 @@ class DVM:
for dvm in self.dvm_config.SUPPORTED_DVMS:
try:
if task == dvm.TASK:
request_form = dvm.create_request_from_nostr_event(job_event, self.client,
self.dvm_config)
request_form = dvm.create_request_from_nostr_event(job_event, self.client, self.dvm_config)
if dvm_config.USE_OWN_VENV:
python_bin = (r'cache/venvs/' + os.path.basename(dvm_config.SCRIPT).split(".py")[0]
+ "/bin/python")
retcode = subprocess.call([python_bin, dvm_config.SCRIPT,
'--request', json.dumps(request_form),
'--identifier', dvm_config.IDENTIFIER,
'--output', 'output.txt'])
print("Finished processing, loading data..")
with open(os.path.abspath('output.txt')) as f:
resultall = f.readlines()
result = ""
for line in resultall:
if line != '\n':
result += line
os.remove(os.path.abspath('output.txt'))
else: #Some components might have issues with running code in otuside venv.
# We install locally in these cases for now
result = dvm.process(request_form)
try:
post_processed = dvm.post_process(result, job_event)
post_processed = dvm.post_process(str(result), job_event)
send_nostr_reply_event(post_processed, job_event.as_json())
except Exception as e:
send_job_status_reaction(job_event, "error", content=str(e),
@@ -495,7 +519,6 @@ class DVM:
except Exception as e:
print(e)
return
self.client.handle_notifications(NotificationHandler())

View File

@@ -1,18 +1,16 @@
import json
import os
import subprocess
from subprocess import run
import sys
from threading import Thread
from venv import create
from nostr_sdk import Keys
from nostr_dvm.dvm import DVM
from nostr_dvm.utils.admin_utils import AdminConfig
from nostr_dvm.utils.dvmconfig import DVMConfig
from nostr_dvm.utils.nip89_utils import NIP89Config
from nostr_dvm.utils.nostr_utils import check_and_set_private_key
from nostr_dvm.utils.nip89_utils import NIP89Config, check_and_set_d_tag
from nostr_dvm.utils.output_utils import post_process_result
from nostr_dvm.utils.zap_utils import check_and_set_ln_bits_keys
class DVMTaskInterface:
@@ -34,7 +32,7 @@ class DVMTaskInterface:
options=None, task=None):
self.init(name, dvm_config, admin_config, nip89config, task)
self.options = options
self.install_dependencies(self.dependencies)
self.install_dependencies(dvm_config)
def init(self, name, dvm_config, admin_config=None, nip89config=None, task=None):
self.NAME = name
@@ -58,6 +56,27 @@ class DVMTaskInterface:
self.dvm_config = dvm_config
self.admin_config = admin_config
def install_dependencies(self, dvm_config):
if dvm_config.SCRIPT != "":
if self.dvm_config.USE_OWN_VENV:
dir = r'cache/venvs/' + os.path.basename(dvm_config.SCRIPT).split(".py")[0]
if not os.path.isdir(dir):
print(dir)
create(dir, with_pip=True, upgrade_deps=True)
self.dependencies.append(("nostr-dvm", "nostr-dvm"))
for (module, package) in self.dependencies:
print("Installing Venv Module: " + module)
run(["bin/pip", "install", "--force-reinstall", package], cwd=dir)
else:
for module, package in self.dependencies:
if module != "nostr-dvm":
try:
__import__(module)
except ImportError:
print("Installing global Module: " + module)
subprocess.check_call([sys.executable, "-m", "pip", "install", package])
def run(self):
nostr_dvm_thread = Thread(target=self.DVM, args=[self.dvm_config, self.admin_config])
nostr_dvm_thread.start()
@@ -87,16 +106,6 @@ class DVMTaskInterface:
"""Post-process the data and return the result Use default function, if not overwritten"""
return post_process_result(result, event)
def install_dependencies(self, packages):
import pip
for module, package in packages:
try:
__import__(module)
except ImportError:
subprocess.check_call([sys.executable, "-m", "pip", "install", package])
@staticmethod
def set_options(request_form):
print("Setting options...")
@@ -105,3 +114,19 @@ class DVMTaskInterface:
opts = json.loads(request_form["options"])
print(opts)
return dict(opts)
@staticmethod
def process_args():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--request', dest='request')
parser.add_argument('--identifier', dest='identifier')
parser.add_argument('--output', dest='output')
args = parser.parse_args()
return args
@staticmethod
def write_output(result, output):
with open(os.path.abspath(output), 'w') as f:
f.write(result)
# f.close()

View File

@@ -1,14 +1,10 @@
import json
import os
from datetime import timedelta
from pathlib import Path
import dotenv
from nostr_sdk import Client, Timestamp, PublicKey, Tag, Keys, Options, SecretKey
from nostr_dvm.interfaces.dvmtaskinterface import DVMTaskInterface
from nostr_dvm.utils.admin_utils import AdminConfig
from nostr_dvm.utils.backend_utils import keep_alive
from nostr_dvm.utils.definitions import EventDefinitions
from nostr_dvm.utils.dvmconfig import DVMConfig, build_default_config
from nostr_dvm.utils.nip89_utils import NIP89Config, check_and_set_d_tag
@@ -27,9 +23,11 @@ class AdvancedSearch(DVMTaskInterface):
TASK: str = "search-content"
FIX_COST: float = 0
dvm_config: DVMConfig
dependencies = [("nostr-dvm", "nostr-dvm")]
def __init__(self, name, dvm_config: DVMConfig, nip89config: NIP89Config,
admin_config: AdminConfig = None, options=None):
dvm_config.SCRIPT = os.path.abspath(__file__)
super().__init__(name, dvm_config, nip89config, admin_config, options)
def is_input_supported(self, tags):
@@ -177,19 +175,12 @@ def build_example(name, identifier, admin_config):
admin_config=admin_config)
def process_venv():
args = DVMTaskInterface.process_args()
dvm_config = build_default_config(args.identifier)
dvm = AdvancedSearch(name="", dvm_config=dvm_config, nip89config=NIP89Config(), admin_config=None)
result = dvm.process(json.loads(args.request))
DVMTaskInterface.write_output(result, args.output)
if __name__ == '__main__':
env_path = Path('.env')
if env_path.is_file():
print(f'loading environment from {env_path.resolve()}')
dotenv.load_dotenv(env_path, verbose=True, override=True)
else:
raise FileNotFoundError(f'.env file not found at {env_path} ')
admin_config = AdminConfig()
admin_config.REBROADCAST_NIP89 = False
admin_config.UPDATE_PROFILE = False
dvm = build_example("Advanced Nostr Search", "discovery_content_search", admin_config)
dvm.run()
keep_alive()
process_venv()

View File

@@ -1,14 +1,10 @@
import json
from pathlib import Path
import dotenv
import os
from nostr_dvm.interfaces.dvmtaskinterface import DVMTaskInterface
from nostr_dvm.utils.admin_utils import AdminConfig
from nostr_dvm.utils.backend_utils import keep_alive
from nostr_dvm.utils.definitions import EventDefinitions
from nostr_dvm.utils.dvmconfig import DVMConfig, build_default_config
from nostr_dvm.utils.nip89_utils import NIP89Config, check_and_set_d_tag
from nostr_dvm.utils.nip89_utils import NIP89Config
from nostr_dvm.utils.mediasource_utils import organize_input_media_data
from nostr_dvm.utils.output_utils import upload_media_to_hoster
@@ -30,6 +26,7 @@ class MediaConverter(DVMTaskInterface):
def __init__(self, name, dvm_config: DVMConfig, nip89config: NIP89Config,
admin_config: AdminConfig = None, options=None):
dvm_config.SCRIPT = os.path.abspath(__file__)
super().__init__(name, dvm_config, nip89config, admin_config, options)
def is_input_supported(self, tags):
@@ -92,7 +89,7 @@ def build_example(name, identifier, admin_config):
"encryptionSupported": True,
"cashuAccepted": True,
"nip90Params": {
"media_format": {
"format": {
"required": False,
"values": ["video/mp4", "audio/mp3"]
}
@@ -100,26 +97,18 @@ def build_example(name, identifier, admin_config):
}
nip89config = NIP89Config()
nip89config.DTAG = check_and_set_d_tag(identifier, name, dvm_config.PRIVATE_KEY, nip89info["image"])
nip89config.CONTENT = json.dumps(nip89info)
return MediaConverter(name=name, dvm_config=dvm_config, nip89config=nip89config,
admin_config=admin_config)
def process_venv():
args = DVMTaskInterface.process_args()
dvm_config = build_default_config(args.identifier)
dvm = MediaConverter(name="", dvm_config=dvm_config, nip89config=NIP89Config(), admin_config=None)
result = dvm.process(json.loads(args.request))
DVMTaskInterface.write_output(result, args.output)
if __name__ == '__main__':
env_path = Path('.env')
if env_path.is_file():
print(f'loading environment from {env_path.resolve()}')
dotenv.load_dotenv(env_path, verbose=True, override=True)
else:
raise FileNotFoundError(f'.env file not found at {env_path} ')
admin_config = AdminConfig()
admin_config.REBROADCAST_NIP89 = False
admin_config.UPDATE_PROFILE = False
dvm = build_example("Media Bringer", "media_converter", admin_config)
dvm.run()
keep_alive()
process_venv()

View File

@@ -1,15 +1,12 @@
import json
import os
from datetime import timedelta
from pathlib import Path
from threading import Thread
import dotenv
from nostr_sdk import Client, Timestamp, PublicKey, Tag, Keys, Options
from nostr_dvm.interfaces.dvmtaskinterface import DVMTaskInterface
from nostr_dvm.utils.admin_utils import AdminConfig
from nostr_dvm.utils.backend_utils import keep_alive
from nostr_dvm.utils.definitions import EventDefinitions
from nostr_dvm.utils.dvmconfig import DVMConfig, build_default_config
from nostr_dvm.utils.nip89_utils import NIP89Config, check_and_set_d_tag
@@ -33,6 +30,7 @@ class DiscoverInactiveFollows(DVMTaskInterface):
def __init__(self, name, dvm_config: DVMConfig, nip89config: NIP89Config,
admin_config: AdminConfig = None, options=None):
dvm_config.SCRIPT = os.path.abspath(__file__)
super().__init__(name, dvm_config, nip89config, admin_config, options)
def is_input_supported(self, tags):
@@ -198,19 +196,12 @@ def build_example(name, identifier, admin_config):
return DiscoverInactiveFollows(name=name, dvm_config=dvm_config, nip89config=nip89config,
admin_config=admin_config)
def process_venv():
args = DVMTaskInterface.process_args()
dvm_config = build_default_config(args.identifier)
dvm = DiscoverInactiveFollows(name="", dvm_config=dvm_config, nip89config=NIP89Config(), admin_config=None)
result = dvm.process(json.loads(args.request))
DVMTaskInterface.write_output(result, args.output)
if __name__ == '__main__':
env_path = Path('.env')
if env_path.is_file():
print(f'loading environment from {env_path.resolve()}')
dotenv.load_dotenv(env_path, verbose=True, override=True)
else:
raise FileNotFoundError(f'.env file not found at {env_path} ')
admin_config = AdminConfig()
admin_config.REBROADCAST_NIP89 = False
admin_config.UPDATE_PROFILE = False
dvm = build_example("Bygones", "discovery_inactive_follows", admin_config)
dvm.run()
keep_alive()
process_venv()

View File

@@ -0,0 +1,186 @@
import json
import os
from PIL import Image
from tqdm import tqdm
from nostr_dvm.interfaces.dvmtaskinterface import DVMTaskInterface
from nostr_dvm.utils.admin_utils import AdminConfig
from nostr_dvm.utils.definitions import EventDefinitions
from nostr_dvm.utils.dvmconfig import DVMConfig, build_default_config
from nostr_dvm.utils.nip89_utils import NIP89Config, check_and_set_d_tag
from nostr_dvm.utils.output_utils import upload_media_to_hoster
from nostr_dvm.utils.zap_utils import get_price_per_sat
"""
This File contains a Module to generate an Image on replicate and receive results back.
Accepted Inputs: Prompt (text)
Outputs: An url to an Image
Params:
"""
class ImageGenerationMLX(DVMTaskInterface):
KIND: int = EventDefinitions.KIND_NIP90_GENERATE_IMAGE
TASK: str = "text-to-image"
FIX_COST: float = 120
dependencies = [("nostr-dvm", "nostr-dvm"),
("mlx", "mlx"),
("safetensors", "safetensors"),
("huggingface-hub", "huggingface-hub"),
("regex", "regex"),
("tqdm", "tqdm"),
]
def __init__(self, name, dvm_config: DVMConfig, nip89config: NIP89Config,
admin_config: AdminConfig = None, options=None):
dvm_config.SCRIPT = os.path.abspath(__file__)
super().__init__(name, dvm_config, nip89config, admin_config, options)
def is_input_supported(self, tags):
for tag in tags:
if tag.as_vec()[0] == 'i':
input_value = tag.as_vec()[1]
input_type = tag.as_vec()[2]
if input_type != "text":
return False
elif tag.as_vec()[0] == 'output':
output = tag.as_vec()[1]
if (output == "" or
not (output == "image/png" or "image/jpg"
or output == "image/png;format=url" or output == "image/jpg;format=url")):
print("Output format not supported, skipping..")
return False
return True
def create_request_from_nostr_event(self, event, client=None, dvm_config=None):
request_form = {"jobID": event.id().to_hex() + "_" + self.NAME.replace(" ", "")}
prompt = ""
width = "1024"
height = "1024"
for tag in event.tags():
if tag.as_vec()[0] == 'i':
input_type = tag.as_vec()[2]
if input_type == "text":
prompt = tag.as_vec()[1]
elif tag.as_vec()[0] == 'param':
print("Param: " + tag.as_vec()[1] + ": " + tag.as_vec()[2])
if tag.as_vec()[1] == "size":
if len(tag.as_vec()) > 3:
width = (tag.as_vec()[2])
height = (tag.as_vec()[3])
elif len(tag.as_vec()) == 3:
split = tag.as_vec()[2].split("x")
if len(split) > 1:
width = split[0]
height = split[1]
elif tag.as_vec()[1] == "model":
model = tag.as_vec()[2]
elif tag.as_vec()[1] == "quality":
quality = tag.as_vec()[2]
options = {
"prompt": prompt,
"size": width + "x" + height,
"number": 1
}
request_form['options'] = json.dumps(options)
return request_form
def process(self, request_form):
try:
import mlx.core as mx
from backends.mlx.stable_diffusion import StableDiffusion
options = DVMTaskInterface.set_options(request_form)
sd = StableDiffusion()
cfg_weight = 7.5
batchsize = 1
n_rows = 1
steps = 50
n_images = options["number"]
# Generate the latent vectors using diffusion
latents = sd.generate_latents(
options["prompt"],
n_images=n_images,
cfg_weight=cfg_weight,
num_steps=steps,
negative_text="",
)
for x_t in tqdm(latents, total=steps):
mx.simplify(x_t)
mx.simplify(x_t)
mx.eval(x_t)
# Decode them into images
decoded = []
for i in tqdm(range(0, 1, batchsize)):
decoded.append(sd.decode(x_t[i: i + batchsize]))
mx.eval(decoded[-1])
# Arrange them on a grid
x = mx.concatenate(decoded, axis=0)
x = mx.pad(x, [(0, 0), (8, 8), (8, 8), (0, 0)])
B, H, W, C = x.shape
x = x.reshape(n_rows, B // n_rows, H, W, C).transpose(0, 2, 1, 3, 4)
x = x.reshape(n_rows * H, B // n_rows * W, C)
x = (x * 255).astype(mx.uint8)
# Save them to disc
image = Image.fromarray(x.__array__())
image.save("./outputs/image.jpg")
result = upload_media_to_hoster("./outputs/image.jpg")
return result
except Exception as e:
print("Error in Module")
raise Exception(e)
# We build an example here that we can call by either calling this file directly from the main directory,
# or by adding it to our playground. You can call the example and adjust it to your needs or redefine it in the
# playground or elsewhere
def build_example(name, identifier, admin_config):
dvm_config = build_default_config(identifier)
admin_config.LUD16 = dvm_config.LN_ADDRESS
profit_in_sats = 10
dvm_config.FIX_COST = int(((4.0 / (get_price_per_sat("USD") * 100)) + profit_in_sats))
nip89info = {
"name": name,
"image": "https://image.nostr.build/c33ca6fc4cc038ca4adb46fdfdfda34951656f87ee364ef59095bae1495ce669.jpg",
"about": "I use Replicate to run StableDiffusion XL",
"encryptionSupported": True,
"cashuAccepted": True,
"nip90Params": {
"size": {
"required": False,
"values": ["1024:1024", "1024x1792", "1792x1024"]
}
}
}
nip89config = NIP89Config()
nip89config.DTAG = check_and_set_d_tag(identifier, name, dvm_config.PRIVATE_KEY, nip89info["image"])
nip89config.CONTENT = json.dumps(nip89info)
return ImageGenerationMLX(name=name, dvm_config=dvm_config, nip89config=nip89config,
admin_config=admin_config)
def process_venv():
args = DVMTaskInterface.process_args()
dvm_config = build_default_config(args.identifier)
dvm = ImageGenerationMLX(name="", dvm_config=dvm_config, nip89config=NIP89Config(), admin_config=None)
result = dvm.process(json.loads(args.request))
DVMTaskInterface.write_output(result, args.output)
if __name__ == '__main__':
process_venv()

View File

@@ -1,15 +1,13 @@
import json
import os
import time
from io import BytesIO
from pathlib import Path
import dotenv
import requests
from PIL import Image
from nostr_dvm.interfaces.dvmtaskinterface import DVMTaskInterface
from nostr_dvm.utils.admin_utils import AdminConfig
from nostr_dvm.utils.backend_utils import keep_alive
from nostr_dvm.utils.definitions import EventDefinitions
from nostr_dvm.utils.dvmconfig import DVMConfig, build_default_config
from nostr_dvm.utils.nip89_utils import NIP89Config, check_and_set_d_tag
@@ -28,10 +26,12 @@ class ImageGenerationDALLE(DVMTaskInterface):
KIND: int = EventDefinitions.KIND_NIP90_GENERATE_IMAGE
TASK: str = "text-to-image"
FIX_COST: float = 120
dependencies = [("openai", "openai==1.3.5")]
dependencies = [("nostr-dvm", "nostr-dvm"),
("openai", "openai==1.3.5")]
def __init__(self, name, dvm_config: DVMConfig, nip89config: NIP89Config,
admin_config: AdminConfig = None, options=None):
dvm_config.SCRIPT = os.path.abspath(__file__)
super().__init__(name, dvm_config, nip89config, admin_config, options)
def is_input_supported(self, tags):
@@ -107,6 +107,7 @@ class ImageGenerationDALLE(DVMTaskInterface):
n=int(options['number']),
)
image_url = response.data[0].url
# rehost the result instead of relying on the openai link
response = requests.get(image_url)
@@ -125,6 +126,7 @@ class ImageGenerationDALLE(DVMTaskInterface):
# playground or elsewhere
def build_example(name, identifier, admin_config):
dvm_config = build_default_config(identifier)
dvm_config.USE_OWN_VENV = True
admin_config.LUD16 = dvm_config.LN_ADDRESS
profit_in_sats = 10
cost_in_cent = 4.0
@@ -151,18 +153,17 @@ def build_example(name, identifier, admin_config):
return ImageGenerationDALLE(name=name, dvm_config=dvm_config, nip89config=nip89config, admin_config=admin_config)
def process_venv():
args = DVMTaskInterface.process_args()
dvm_config = build_default_config(args.identifier)
dvm = ImageGenerationDALLE(name="", dvm_config=dvm_config, nip89config=NIP89Config(), admin_config=None)
result = ""
while result == "":
result = dvm.process(json.loads(args.request))
time.sleep(10)
DVMTaskInterface.write_output(result, args.output)
if __name__ == '__main__':
env_path = Path('.env')
if env_path.is_file():
print(f'loading environment from {env_path.resolve()}')
dotenv.load_dotenv(env_path, verbose=True, override=True)
else:
raise FileNotFoundError(f'.env file not found at {env_path} ')
admin_config = AdminConfig()
admin_config.REBROADCAST_NIP89 = False
admin_config.UPDATE_PROFILE = False
dvm = build_example("Dall-E 3", "dalle3", admin_config)
dvm.run()
keep_alive()
process_venv()

View File

@@ -1,9 +1,6 @@
import json
import os
from io import BytesIO
from pathlib import Path
import dotenv
import requests
from PIL import Image
@@ -29,10 +26,12 @@ class ImageGenerationReplicateSDXL(DVMTaskInterface):
KIND: int = EventDefinitions.KIND_NIP90_GENERATE_IMAGE
TASK: str = "text-to-image"
FIX_COST: float = 120
dependencies = [("replicate", "replicate==0.21.1")]
dependencies = [("nostr-dvm", "nostr-dvm"),
("replicate", "replicate==0.21.1")]
def __init__(self, name, dvm_config: DVMConfig, nip89config: NIP89Config,
admin_config: AdminConfig = None, options=None):
dvm_config.SCRIPT = os.path.abspath(__file__)
super().__init__(name, dvm_config, nip89config, admin_config, options)
def is_input_supported(self, tags):
@@ -147,18 +146,12 @@ def build_example(name, identifier, admin_config):
admin_config=admin_config)
def process_venv():
args = DVMTaskInterface.process_args()
dvm_config = build_default_config(args.identifier)
dvm = ImageGenerationReplicateSDXL(name="", dvm_config=dvm_config, nip89config=NIP89Config(), admin_config=None)
result = dvm.process(json.loads(args.request))
DVMTaskInterface.write_output(result, args.output)
if __name__ == '__main__':
env_path = Path('.env')
if env_path.is_file():
print(f'loading environment from {env_path.resolve()}')
dotenv.load_dotenv(env_path, verbose=True, override=True)
else:
raise FileNotFoundError(f'.env file not found at {env_path} ')
admin_config = AdminConfig()
admin_config.REBROADCAST_NIP89 = False
admin_config.UPDATE_PROFILE = False
dvm = build_example("Stable Diffusion XL", "replicate_sdxl", admin_config)
dvm.run()
keep_alive()
process_venv()

View File

@@ -27,10 +27,13 @@ class SpeechToTextGoogle(DVMTaskInterface):
TASK: str = "speech-to-text"
FIX_COST: float = 10
PER_UNIT_COST: float = 0.1
dependencies = [("speech_recognition", "SpeechRecognition==3.10.0")]
dependencies = [("nostr-dvm", "nostr-dvm"),
("speech_recognition", "SpeechRecognition==3.10.0")]
def __init__(self, name, dvm_config: DVMConfig, nip89config: NIP89Config,
admin_config: AdminConfig = None, options=None):
dvm_config.SCRIPT = os.path.abspath(__file__)
super().__init__(name, dvm_config, nip89config, admin_config, options)
if options is None:
options = {}
@@ -155,20 +158,13 @@ def build_example(name, identifier, admin_config):
return SpeechToTextGoogle(name=name, dvm_config=dvm_config, nip89config=nip89config,
admin_config=admin_config, options=options)
def process_venv():
args = DVMTaskInterface.process_args()
dvm_config = build_default_config(args.identifier)
dvm = SpeechToTextGoogle(name="", dvm_config=dvm_config, nip89config=NIP89Config(), admin_config=None)
result = dvm.process(json.loads(args.request))
DVMTaskInterface.write_output(result, args.output)
if __name__ == '__main__':
env_path = Path('.env')
if env_path.is_file():
print(f'loading environment from {env_path.resolve()}')
dotenv.load_dotenv(env_path, verbose=True, override=True)
else:
raise FileNotFoundError(f'.env file not found at {env_path} ')
admin_config = AdminConfig()
admin_config.REBROADCAST_NIP89 = False
admin_config.UPDATE_PROFILE = False
dvm = build_example("Transcriptor", "speech_recognition", admin_config)
dvm.run()
keep_alive()
process_venv()

View File

@@ -26,11 +26,13 @@ class TextExtractionPDF(DVMTaskInterface):
KIND: int = EventDefinitions.KIND_NIP90_EXTRACT_TEXT
TASK: str = "pdf-to-text"
FIX_COST: float = 0
dependencies = [("pypdf", "pypdf==3.17.1")]
dependencies = [("nostr-dvm", "nostr-dvm"),
("pypdf", "pypdf==3.17.1")]
def __init__(self, name, dvm_config: DVMConfig, nip89config: NIP89Config,
admin_config: AdminConfig = None, options=None):
dvm_config.SCRIPT = os.path.abspath(__file__)
super().__init__(name, dvm_config, nip89config, admin_config, options)
@@ -116,19 +118,13 @@ def build_example(name, identifier, admin_config):
return TextExtractionPDF(name=name, dvm_config=dvm_config, nip89config=nip89config,
admin_config=admin_config)
def process_venv():
args = DVMTaskInterface.process_args()
dvm_config = build_default_config(args.identifier)
dvm = TextExtractionPDF(name="", dvm_config=dvm_config, nip89config=NIP89Config(), admin_config=None)
result = dvm.process(json.loads(args.request))
DVMTaskInterface.write_output(result, args.output)
if __name__ == '__main__':
env_path = Path('.env')
if env_path.is_file():
print(f'loading environment from {env_path.resolve()}')
dotenv.load_dotenv(env_path, verbose=True, override=True)
else:
raise FileNotFoundError(f'.env file not found at {env_path} ')
admin_config = AdminConfig()
admin_config.REBROADCAST_NIP89 = False
admin_config.UPDATE_PROFILE = False
dvm = build_example("PDF Extractor", "pdf_extractor", admin_config)
dvm.run()
keep_alive()
process_venv()

View File

@@ -20,14 +20,16 @@ Outputs: Generated text
"""
class TextGenerationOLLAMA(DVMTaskInterface):
class TextGenerationLLMLite(DVMTaskInterface):
KIND: int = EventDefinitions.KIND_NIP90_GENERATE_TEXT
TASK: str = "text-to-text"
FIX_COST: float = 0
dependencies = [("litellm", "litellm==1.12.3")]
dependencies = [("nostr-dvm", "nostr-dvm"),
("litellm", "litellm==1.12.3")]
def __init__(self, name, dvm_config: DVMConfig, nip89config: NIP89Config,
admin_config: AdminConfig = None, options=None):
dvm_config.SCRIPT = os.path.abspath(__file__)
super().__init__(name, dvm_config, nip89config, admin_config, options)
@@ -78,7 +80,8 @@ class TextGenerationOLLAMA(DVMTaskInterface):
response = completion(
model=options["model"],
messages=[{"content": options["prompt"], "role": "user"}],
api_base=options["server"]
api_base=options["server"],
stream=False
)
print(response.choices[0].message.content)
return response.choices[0].message.content
@@ -110,34 +113,23 @@ def build_example(name, identifier, admin_config):
"about": "I use a LLM connected via OLLAMA",
"encryptionSupported": True,
"cashuAccepted": True,
"nip90Params": {
"size": {
"required": False,
"values": ["1024:1024", "1024x1792", "1792x1024"]
}
}
"nip90Params": {}
}
nip89config = NIP89Config()
nip89config.DTAG = check_and_set_d_tag(identifier, name, dvm_config.PRIVATE_KEY, nip89info["image"])
nip89config.CONTENT = json.dumps(nip89info)
return TextGenerationOLLAMA(name=name, dvm_config=dvm_config, nip89config=nip89config, admin_config=admin_config, options=options)
return TextGenerationLLMLite(name=name, dvm_config=dvm_config, nip89config=nip89config, admin_config=admin_config, options=options)
def process_venv():
args = DVMTaskInterface.process_args()
dvm_config = build_default_config(args.identifier)
dvm = TextGenerationLLMLite(name="", dvm_config=dvm_config, nip89config=NIP89Config(), admin_config=None)
result = dvm.process(json.loads(args.request))
DVMTaskInterface.write_output(result, args.output)
if __name__ == '__main__':
env_path = Path('.env')
if env_path.is_file():
print(f'loading environment from {env_path.resolve()}')
dotenv.load_dotenv(env_path, verbose=True, override=True)
else:
raise FileNotFoundError(f'.env file not found at {env_path} ')
admin_config = AdminConfig()
admin_config.REBROADCAST_NIP89 = False
admin_config.UPDATE_PROFILE = False
dvm = build_example("LLM", "llmlite", admin_config)
dvm.run()
keep_alive()
process_venv()

View File

@@ -0,0 +1,138 @@
import json
import os
os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
from pathlib import Path
import urllib.request
from nostr_dvm.interfaces.dvmtaskinterface import DVMTaskInterface
from nostr_dvm.utils.admin_utils import AdminConfig
from nostr_dvm.utils.definitions import EventDefinitions
from nostr_dvm.utils.dvmconfig import DVMConfig, build_default_config
from nostr_dvm.utils.nip89_utils import NIP89Config, check_and_set_d_tag
from nostr_dvm.utils.output_utils import upload_media_to_hoster
"""
This File contains a Module to generate Audio based on an input and a voice
Accepted Inputs: Text
Outputs: Generated Audiofile
"""
class TextToSpeech(DVMTaskInterface):
KIND: int = EventDefinitions.KIND_NIP90_TEXT_TO_SPEECH
TASK: str = "text-to-speech"
FIX_COST: float = 200
dependencies = [("nostr-dvm", "nostr-dvm"),
("TTS", "TTS==0.22.0")]
def __init__(self, name, dvm_config: DVMConfig, nip89config: NIP89Config,
admin_config: AdminConfig = None, options=None):
dvm_config.SCRIPT = os.path.abspath(__file__)
super().__init__(name, dvm_config, nip89config, admin_config, options)
def is_input_supported(self, tags):
for tag in tags:
if tag.as_vec()[0] == 'i':
input_value = tag.as_vec()[1]
input_type = tag.as_vec()[2]
if input_type != "text":
return False
return True
def create_request_from_nostr_event(self, event, client=None, dvm_config=None):
request_form = {"jobID": event.id().to_hex() + "_" + self.NAME.replace(" ", "")}
prompt = "test"
if self.options.get("input_file") and self.options.get("input_file") != "":
input_file = self.options['input_file']
else:
if not Path.exists(Path('cache/input.wav')):
input_file_url = "https://media.nostr.build/av/de104e3260be636533a56fd4468b905c1eb22b226143a997aa936b011122af8a.wav"
urllib.request.urlretrieve(input_file_url, "cache/input.wav")
input_file = "cache/input.wav"
language = "en"
for tag in event.tags():
if tag.as_vec()[0] == 'i':
input_type = tag.as_vec()[2]
if input_type == "text":
prompt = tag.as_vec()[1]
if input_type == "url":
input_file = tag.as_vec()[1]
elif tag.as_vec()[0] == 'param':
param = tag.as_vec()[1]
if param == "language": # check for param type
language = tag.as_vec()[2]
options = {
"prompt": prompt,
"input_wav": input_file,
"language": language
}
request_form['options'] = json.dumps(options)
return request_form
def process(self, request_form):
import torch
from TTS.api import TTS
options = DVMTaskInterface.set_options(request_form)
device = "cuda" if torch.cuda.is_available() else "cpu"
#else "mps" if torch.backends.mps.is_available() \
print(TTS().list_models())
try:
tts = TTS("tts_models/multilingual/multi-dataset/xtts_v2").to(device)
tts.tts_to_file(
text=options["prompt"],
speaker_wav=options["input_wav"], language=options["language"], file_path="outputs/output.wav")
result = upload_media_to_hoster("outputs/output.wav")
return result
except Exception as e:
print("Error in Module: " + str(e))
raise Exception(e)
# We build an example here that we can call by either calling this file directly from the main directory,
# or by adding it to our playground. You can call the example and adjust it to your needs or redefine it in the
# playground or elsewhere
def build_example(name, identifier, admin_config):
dvm_config = build_default_config(identifier)
admin_config.LUD16 = dvm_config.LN_ADDRESS
#use an alternative local wav file you want to use for cloning
options = {'input_file': ""}
nip89info = {
"name": name,
"image": "https://image.nostr.build/c33ca6fc4cc038ca4adb46fdfdfda34951656f87ee364ef59095bae1495ce669.jpg",
"about": "I Generate Speech from Text",
"encryptionSupported": True,
"cashuAccepted": True,
"nip90Params": {
"language": {
"required": False,
"values": []
}
}
}
nip89config = NIP89Config()
nip89config.DTAG = check_and_set_d_tag(identifier, name, dvm_config.PRIVATE_KEY, nip89info["image"])
nip89config.CONTENT = json.dumps(nip89info)
return TextToSpeech(name=name, dvm_config=dvm_config, nip89config=nip89config, admin_config=admin_config,
options=options)
def process_venv():
args = DVMTaskInterface.process_args()
dvm_config = build_default_config(args.identifier)
dvm = TextToSpeech(name="", dvm_config=dvm_config, nip89config=NIP89Config(), admin_config=None)
result = dvm.process(json.loads(args.request))
DVMTaskInterface.write_output(result, args.output)
if __name__ == '__main__':
process_venv()

View File

@@ -1,4 +1,5 @@
import json
import os
from pathlib import Path
import dotenv
@@ -25,10 +26,12 @@ class TranslationGoogle(DVMTaskInterface):
KIND: int = EventDefinitions.KIND_NIP90_TRANSLATE_TEXT
TASK: str = "translation"
FIX_COST: float = 0
dependencies = [("translatepy", "translatepy==2.3")]
dependencies = [("nostr-dvm", "nostr-dvm"),
("translatepy", "translatepy==2.3")]
def __init__(self, name, dvm_config: DVMConfig, nip89config: NIP89Config,
admin_config: AdminConfig = None, options=None):
dvm_config.SCRIPT = os.path.abspath(__file__)
super().__init__(name, dvm_config, nip89config, admin_config, options)
def is_input_supported(self, tags):
@@ -141,18 +144,15 @@ def build_example(name, identifier, admin_config):
return TranslationGoogle(name=name, dvm_config=dvm_config, nip89config=nip89config, admin_config=admin_config)
def process_venv():
args = DVMTaskInterface.process_args()
dvm_config = build_default_config(args.identifier)
dvm = TranslationGoogle(name="", dvm_config=dvm_config, nip89config=NIP89Config(), admin_config=None)
result = dvm.process(json.loads(args.request))
DVMTaskInterface.write_output(result, args.output)
if __name__ == '__main__':
env_path = Path('.env')
if env_path.is_file():
print(f'loading environment from {env_path.resolve()}')
dotenv.load_dotenv(env_path, verbose=True, override=True)
else:
raise FileNotFoundError(f'.env file not found at {env_path} ')
admin_config = AdminConfig()
admin_config.REBROADCAST_NIP89 = False
admin_config.UPDATE_PROFILE = False
dvm = build_example("Google Translator", "google_translator", admin_config)
dvm.run()
keep_alive()
process_venv()

View File

@@ -13,7 +13,6 @@ from nostr_dvm.utils.dvmconfig import DVMConfig, build_default_config
from nostr_dvm.utils.nip89_utils import NIP89Config, check_and_set_d_tag
from nostr_dvm.utils.nostr_utils import get_referenced_event_by_id, get_event_by_id
"""
This File contains a Module to call Libre Translate Services
@@ -32,6 +31,7 @@ class TranslationLibre(DVMTaskInterface):
def __init__(self, name, dvm_config: DVMConfig, nip89config: NIP89Config,
admin_config: AdminConfig = None, options=None, task=None):
dvm_config.SCRIPT = os.path.abspath(__file__)
super().__init__(name, dvm_config, nip89config, admin_config, options, task)
def is_input_supported(self, tags):
@@ -125,12 +125,18 @@ def build_example(name, identifier, admin_config):
"nip90Params": {
"language": {
"required": False,
"values": ["en", "az", "be", "bg", "bn", "bs", "ca", "ceb", "co", "cs", "cy", "da", "de", "el", "eo", "es",
"et", "eu", "fa", "fi", "fr", "fy", "ga", "gd", "gl", "gu", "ha", "haw", "hi", "hmn", "hr", "ht",
"hu", "hy", "id", "ig", "is", "it", "he", "ja", "jv", "ka", "kk", "km", "kn", "ko", "ku", "ky",
"la", "lb", "lo", "lt", "lv", "mg", "mi", "mk", "ml", "mn", "mr", "ms", "mt", "my", "ne", "nl",
"no", "ny", "or", "pa", "pl", "ps", "pt", "ro", "ru", "sd", "si", "sk", "sl", "sm", "sn", "so",
"sq", "sr", "st", "su", "sv", "sw", "ta", "te", "tg", "th", "tl", "tr", "ug", "uk", "ur", "uz",
"values": ["en", "az", "be", "bg", "bn", "bs", "ca", "ceb", "co", "cs", "cy", "da", "de", "el", "eo",
"es",
"et", "eu", "fa", "fi", "fr", "fy", "ga", "gd", "gl", "gu", "ha", "haw", "hi", "hmn", "hr",
"ht",
"hu", "hy", "id", "ig", "is", "it", "he", "ja", "jv", "ka", "kk", "km", "kn", "ko", "ku",
"ky",
"la", "lb", "lo", "lt", "lv", "mg", "mi", "mk", "ml", "mn", "mr", "ms", "mt", "my", "ne",
"nl",
"no", "ny", "or", "pa", "pl", "ps", "pt", "ro", "ru", "sd", "si", "sk", "sl", "sm", "sn",
"so",
"sq", "sr", "st", "su", "sv", "sw", "ta", "te", "tg", "th", "tl", "tr", "ug", "uk", "ur",
"uz",
"vi", "xh", "yi", "yo", "zh", "zu"]
}
}
@@ -143,18 +149,12 @@ def build_example(name, identifier, admin_config):
admin_config=admin_config, options=options)
def process_venv():
args = DVMTaskInterface.process_args()
dvm_config = build_default_config(args.identifier)
dvm = TranslationLibre(name="", dvm_config=dvm_config, nip89config=NIP89Config(), admin_config=None)
result = dvm.process(json.loads(args.request))
DVMTaskInterface.write_output(result, args.output)
if __name__ == '__main__':
env_path = Path('.env')
if env_path.is_file():
print(f'loading environment from {env_path.resolve()}')
dotenv.load_dotenv(env_path, verbose=True, override=True)
else:
raise FileNotFoundError(f'.env file not found at {env_path} ')
admin_config = AdminConfig()
admin_config.REBROADCAST_NIP89 = False
admin_config.UPDATE_PROFILE = False
dvm = build_example("Libre Translator", "libre_translator", admin_config)
dvm.run()
keep_alive()
process_venv()

View File

@@ -29,6 +29,7 @@ class TrendingNotesNostrBand(DVMTaskInterface):
def __init__(self, name, dvm_config: DVMConfig, nip89config: NIP89Config,
admin_config: AdminConfig = None, options=None):
dvm_config.SCRIPT = os.path.abspath(__file__)
super().__init__(name, dvm_config, nip89config, admin_config, options)
def is_input_supported(self, tags):
@@ -117,20 +118,12 @@ def build_example(name, identifier, admin_config):
return TrendingNotesNostrBand(name=name, dvm_config=dvm_config, nip89config=nip89config,
admin_config=admin_config)
def process_venv():
args = DVMTaskInterface.process_args()
dvm_config = build_default_config(args.identifier)
dvm = TrendingNotesNostrBand(name="", dvm_config=dvm_config, nip89config=NIP89Config(), admin_config=None)
result = dvm.process(json.loads(args.request))
DVMTaskInterface.write_output(result, args.output)
if __name__ == '__main__':
env_path = Path('.env')
if env_path.is_file():
print(f'loading environment from {env_path.resolve()}')
dotenv.load_dotenv(env_path, verbose=True, override=True)
else:
raise FileNotFoundError(f'.env file not found at {env_path} ')
admin_config = AdminConfig()
admin_config.REBROADCAST_NIP89 = False
admin_config.UPDATE_PROFILE = False
dvm = build_example("Trending Notes on Nostr.band", "trending_notes_nostrband", admin_config)
dvm.run()
keep_alive()
process_venv()

View File

@@ -1,5 +1,6 @@
import json
import os
import subprocess
from io import BytesIO
from pathlib import Path
@@ -30,12 +31,17 @@ class VideoGenerationReplicateSVD(DVMTaskInterface):
KIND: int = EventDefinitions.KIND_NIP90_GENERATE_VIDEO
TASK: str = "image-to-video"
FIX_COST: float = 120
dependencies = [("replicate", "replicate==0.21.1")]
dependencies = [("nostr-dvm", "nostr-dvm"),
("replicate", "replicate==0.21.1")]
def __init__(self, name, dvm_config: DVMConfig, nip89config: NIP89Config,
admin_config: AdminConfig = None, options=None):
dvm_config.SCRIPT = os.path.abspath(__file__)
super().__init__(name, dvm_config, nip89config, admin_config, options)
def is_input_supported(self, tags):
for tag in tags:
if tag.as_vec()[0] == 'i':
@@ -134,20 +140,13 @@ def build_example(name, identifier, admin_config):
return VideoGenerationReplicateSVD(name=name, dvm_config=dvm_config, nip89config=nip89config,
admin_config=admin_config)
def process_venv():
args = DVMTaskInterface.process_args()
dvm_config = build_default_config(args.identifier)
dvm = VideoGenerationReplicateSVD(name="", dvm_config=dvm_config, nip89config=NIP89Config(), admin_config=None)
result = dvm.process(json.loads(args.request))
DVMTaskInterface.write_output(result, args.output)
if __name__ == '__main__':
env_path = Path('.env')
if env_path.is_file():
print(f'loading environment from {env_path.resolve()}')
dotenv.load_dotenv(env_path, verbose=True, override=True)
else:
raise FileNotFoundError(f'.env file not found at {env_path} ')
admin_config = AdminConfig()
admin_config.REBROADCAST_NIP89 = False
admin_config.UPDATE_PROFILE = False
dvm = build_example("Stable Video Diffusion", "replicate_svd", admin_config)
dvm.run()
keep_alive()
process_venv()

View File

@@ -26,8 +26,10 @@ class User:
def create_sql_table(db):
try:
import os
if not os.path.exists(r'.\db'):
os.makedirs(r'.\db')
if not os.path.exists(r'db'):
os.makedirs(r'db')
if not os.path.exists(r'outputs'):
os.makedirs(r'outputs')
con = sqlite3.connect(db)
cur = con.cursor()
cur.execute(""" CREATE TABLE IF NOT EXISTS users (

View File

@@ -9,27 +9,29 @@ class EventDefinitions:
KIND_NIP94_METADATA = 1063
KIND_FEEDBACK = 7000
KIND_NIP90_EXTRACT_TEXT = 5000
KIND_NIP90_RESULT_EXTRACT_TEXT = 6000
KIND_NIP90_RESULT_EXTRACT_TEXT = KIND_NIP90_EXTRACT_TEXT + 1000
KIND_NIP90_SUMMARIZE_TEXT = 5001
KIND_NIP90_RESULT_SUMMARIZE_TEXT = 6001
KIND_NIP90_RESULT_SUMMARIZE_TEXT = KIND_NIP90_SUMMARIZE_TEXT + 1000
KIND_NIP90_TRANSLATE_TEXT = 5002
KIND_NIP90_RESULT_TRANSLATE_TEXT = 6002
KIND_NIP90_RESULT_TRANSLATE_TEXT = KIND_NIP90_TRANSLATE_TEXT + 1000
KIND_NIP90_TEXT_TO_SPEECH = 5005
KIND_NIP90_RESULT_TEXT_TO_SPEECH = KIND_NIP90_TEXT_TO_SPEECH + 1000
KIND_NIP90_GENERATE_TEXT = 5050
KIND_NIP90_RESULT_GENERATE_TEXT = 6050
KIND_NIP90_RESULT_GENERATE_TEXT = KIND_NIP90_GENERATE_TEXT + 1000
KIND_NIP90_GENERATE_IMAGE = 5100
KIND_NIP90_RESULT_GENERATE_IMAGE = 6100
KIND_NIP90_RESULT_GENERATE_IMAGE = KIND_NIP90_GENERATE_IMAGE + 1000
KIND_NIP90_CONVERT_VIDEO = 5200
KIND_NIP90_RESULT_CONVERT_VIDEO = 6200
KIND_NIP90_RESULT_CONVERT_VIDEO = KIND_NIP90_CONVERT_VIDEO + 1000
KIND_NIP90_GENERATE_VIDEO = 5202
KIND_NIP90_RESULT_GENERATE_VIDEO = 6202
KIND_NIP90_RESULT_GENERATE_VIDEO = KIND_NIP90_GENERATE_VIDEO + 1000
KIND_NIP90_CONTENT_DISCOVERY = 5300
KIND_NIP90_RESULT_CONTENT_DISCOVERY = 6300
KIND_NIP90_RESULT_CONTENT_DISCOVERY = KIND_NIP90_CONTENT_DISCOVERY + 1000
KIND_NIP90_PEOPLE_DISCOVERY = 5301
KIND_NIP90_RESULT_PEOPLE_DISCOVERY = 6301
KIND_NIP90_RESULT_PEOPLE_DISCOVERY = KIND_NIP90_PEOPLE_DISCOVERY + 1000
KIND_NIP90_CONTENT_SEARCH = 5302
KIND_NIP90_RESULTS_CONTENT_SEARCH = 6302
KIND_NIP90_RESULTS_CONTENT_SEARCH = KIND_NIP90_CONTENT_SEARCH + 1000
KIND_NIP90_GENERIC = 5999
KIND_NIP90_RESULT_GENERIC = 6999
KIND_NIP90_RESULT_GENERIC = KIND_NIP90_GENERIC + 1000
ANY_RESULT = [KIND_NIP90_RESULT_EXTRACT_TEXT,
KIND_NIP90_RESULT_SUMMARIZE_TEXT,
KIND_NIP90_RESULT_TRANSLATE_TEXT,

View File

@@ -23,10 +23,13 @@ class DVMConfig:
RELAY_TIMEOUT = 3
EXTERNAL_POST_PROCESS_TYPE = PostProcessFunctionType.NONE # Leave this on None, except the DVM is external
LNBITS_INVOICE_KEY = ''
LNBITS_INVOICE_KEY = '' # Will all automatically generated by default, or read from .env
LNBITS_ADMIN_KEY = '' # In order to pay invoices, e.g. from the bot to DVMs, or reimburse users.
LNBITS_URL = 'https://lnbits.com'
LN_ADDRESS = ''
SCRIPT = ''
IDENTIFIER = ''
USE_OWN_VENV = True # Make an own venv for each dvm's process function.Disable if you want to install packages into main venv. Only recommended if you dont want to run dvms with different dependency versions
DB: str
NEW_USER_BALANCE: int = 0 # Free credits for new users
NIP89: NIP89Config
@@ -36,6 +39,7 @@ class DVMConfig:
def build_default_config(identifier):
dvm_config = DVMConfig()
dvm_config.PRIVATE_KEY = check_and_set_private_key(identifier)
dvm_config.IDENTIFIER = identifier
npub = Keys.from_sk_str(dvm_config.PRIVATE_KEY).public_key().to_bech32()
invoice_key, admin_key, wallet_id, user_id, lnaddress = check_and_set_ln_bits_keys(identifier, npub)
dvm_config.LNBITS_INVOICE_KEY = invoice_key

View File

@@ -6,6 +6,8 @@ import ffmpegio
from decord import AudioReader, cpu
import requests
from nostr_dvm.utils.nostr_utils import get_event_by_id
from nostr_dvm.utils.scrapper.media_scrapper import OvercastDownload, XitterDownload, TiktokDownloadAll, \
InstagramDownload, YouTubeDownload
def input_data_file_duration(event, dvm_config, client, start=0, end=0):
@@ -18,7 +20,7 @@ def input_data_file_duration(event, dvm_config, client, start=0, end=0):
input_type = tag.as_vec()[2]
if input_type == "text":
#For now, ingore length of any text, just return 1.
# For now, ignore length of any text, just return 1.
return 1
if input_type == "event": # NIP94 event
@@ -52,7 +54,8 @@ def input_data_file_duration(event, dvm_config, client, start=0, end=0):
return 1
def organize_input_media_data(input_value, input_type, start, end, dvm_config, client, process=True, media_format="audio/mp3") -> str:
def organize_input_media_data(input_value, input_type, start, end, dvm_config, client, process=True,
media_format="audio/mp3") -> str:
if input_type == "event": # NIP94 event
evt = get_event_by_id(input_value, client=client, config=dvm_config)
if evt is not None:
@@ -209,7 +212,7 @@ def get_overcast(input_value, start, end):
print("Found overcast.fm Link.. downloading")
start_time = start
end_time = end
downloadOvercast(input_value, filename)
download_overcast(input_value, filename)
finaltag = str(input_value).replace("https://overcast.fm/", "").split('/')
if start == 0.0:
if len(finaltag) > 1:
@@ -227,7 +230,7 @@ def get_overcast(input_value, start, end):
def get_TikTok(input_value, start, end):
filepath = os.path.abspath(os.curdir + r'/outputs/')
try:
filename = downloadTikTok(input_value, filepath)
filename = download_tik_tok(input_value, filepath)
print(filename)
except Exception as e:
print(e)
@@ -238,7 +241,7 @@ def get_TikTok(input_value, start, end):
def get_Instagram(input_value, start, end):
filepath = os.path.abspath(os.curdir + r'/outputs/')
try:
filename = downloadInstagram(input_value, filepath)
filename = download_instagram(input_value, filepath)
print(filename)
except Exception as e:
print(e)
@@ -250,7 +253,7 @@ def get_Twitter(input_value, start, end):
filepath = os.path.abspath(os.curdir) + r'/outputs/'
cleanlink = str(input_value).replace("twitter.com", "x.com")
try:
filename = downloadTwitter(cleanlink, filepath)
filename = download_twitter(cleanlink, filepath)
except Exception as e:
print(e)
return "", start, end
@@ -259,9 +262,10 @@ def get_Twitter(input_value, start, end):
def get_youtube(input_value, start, end, audioonly=True):
filepath = os.path.abspath(os.curdir) + r'/outputs/'
print(filepath)
filename = ""
try:
filename = downloadYouTube(input_value, filepath, audioonly)
filename = download_youtube(input_value, filepath, audioonly)
except Exception as e:
print("Youtube " + str(e))
@@ -331,31 +335,25 @@ def get_media_link(url) -> (str, str):
return None, None
def downloadOvercast(source_url, target_location):
from scrapper.media_scrapper import OvercastDownload
def download_overcast(source_url, target_location):
result = OvercastDownload(source_url, target_location)
return result
def downloadTwitter(videourl, path):
from scrapper.media_scrapper import XitterDownload
def download_twitter(videourl, path):
result = XitterDownload(videourl, path + "x.mp4")
return result
def downloadTikTok(videourl, path):
from scrapper.media_scrapper import TiktokDownloadAll
def download_tik_tok(videourl, path):
result = TiktokDownloadAll([videourl], path)
return result
def downloadInstagram(videourl, path):
from scrapper.media_scrapper import InstagramDownload
def download_instagram(videourl, path):
result = InstagramDownload(videourl, "insta", path)
return result
def downloadYouTube(link, path, audioonly=True):
from scrapper.media_scrapper import YouTubeDownload
result = YouTubeDownload(link, path, audio_only=audioonly)
return result
def download_youtube(link, path, audioonly=True):
return YouTubeDownload(link, path, audio_only=audioonly)

View File

@@ -1 +0,0 @@
from dvm import DVM

View File

@@ -18,7 +18,15 @@ def XitterDownload(source_url, target_location):
features, variables = request_details["features"], request_details["variables"]
def get_tokens(tweet_url):
html = requests.get(tweet_url)
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:84.0) Gecko/20100101 Firefox/84.0",
"Accept": "*/*",
"Accept-Language": "de,en-US;q=0.7,en;q=0.3",
"Accept-Encoding": "gzip, deflate, br",
"TE": "trailers",
}
html = requests.get(tweet_url, headers=headers)
assert (
html.status_code == 200
@@ -34,7 +42,6 @@ def XitterDownload(source_url, target_location):
), f"Failed to find main.js file. If you are using the correct Twitter URL this suggests a bug in the script. Please open a GitHub issue and copy and paste this message. Tweet url: {tweet_url}"
mainjs_url = mainjs_url[0]
mainjs = requests.get(mainjs_url)
assert (
@@ -80,9 +87,11 @@ def XitterDownload(source_url, target_location):
variables["tweetId"] = tweet_id
return f"https://twitter.com/i/api/graphql/0hWvDhmW8YQ-S_ib3azIrw/TweetResultByRestId?variables={urllib.parse.quote(json.dumps(variables))}&features={urllib.parse.quote(json.dumps(features))}"
# return f"https://api.twitter.com/graphql/ncDeACNGIApPMaqGVuF_rw/TweetResultByRestId?variables={urllib.parse.quote(json.dumps(variables))}&features={urllib.parse.quote(json.dumps(features))}"
def get_tweet_details(tweet_url, guest_token, bearer_token):
tweet_id = re.findall(r"(?<=status/)\d+", tweet_url)
assert (
tweet_id is not None and len(tweet_id) == 1
), f"Could not parse tweet id from your url. Make sure you are using the correct url. If you are, then file a GitHub issue and copy and paste this message. Tweet url: {tweet_url}"
@@ -172,7 +181,7 @@ def XitterDownload(source_url, target_location):
pattern = (
r'"expanded_url"\s*:\s*"https://x\.com/[^/]+/status/'
+ sid
+ '/[^"]+",\s*"id_str"\s*:\s*"\d+",'
+ r'/[^"]+",\s*"id_str"\s*:\s*"\d+",'
)
matches = re.findall(pattern, j)
if len(matches) > 0:
@@ -240,17 +249,49 @@ def XitterDownload(source_url, target_location):
urls = [x["url"] for x in results.values()]
urls += container_matches
return urls
return [x["url"] for x in results.values()]
def extract_mp4_fmp4(j):
"""
Extract the URL of the MP4 video from the detailed information of the tweet.
Returns a list of URLs, tweet IDs, and resolution information (dictionary type)
and a list of tweet IDs as return values.
"""
# Empty list to store tweet IDs
tweet_id_list = []
mp4_info_dict_list = []
amplitude_pattern = re.compile(
r"(https://video.twimg.com/amplify_video/(\d+)/vid/(avc1/)(\d+x\d+)/[^.]+.mp4\?tag=\d+)"
)
ext_tw_pattern = re.compile(
r"(https://video.twimg.com/ext_tw_video/(\d+)/pu/vid/(avc1/)?(\d+x\d+)/[^.]+.mp4\?tag=\d+)"
)
tweet_video_pattern = re.compile(r'https://video.twimg.com/tweet_video/[^"]+')
container_pattern = re.compile(r'https://video.twimg.com/[^"]*container=fmp4')
matches = amplitude_pattern.findall(j)
matches += ext_tw_pattern.findall(j)
container_matches = container_pattern.findall(j)
tweet_video_url_list = tweet_video_pattern.findall(j)
for match in matches:
url, tweet_id, _, resolution = match
tweet_id_list.append(int(tweet_id))
mp4_info_dict_list.append({"resolution": resolution, "url": url})
tweet_id_list = list(dict.fromkeys(tweet_id_list))
if len(container_matches) > 0:
for url in container_matches:
mp4_info_dict_list.append({"url": url})
return tweet_id_list, mp4_info_dict_list, tweet_video_url_list
def download_parts(url, output_filename):
resp = requests.get(url, stream=True)
# container begins with / ends with fmp4 and has a resolution in it we want to capture
pattern = re.compile(r"(/[^\n]*/(\d+x\d+)/[^\n]*container=fmp4)")
matches = pattern.findall(resp.text)
max_res = 0
max_res_url = None
@@ -301,12 +342,9 @@ def XitterDownload(source_url, target_location):
def repost_check(j, exclude_replies=True):
try:
# This line extract the index of the first reply
reply_index = j.index('"conversationthread-')
except ValueError:
# If there are no replies we use the enrire response data length
reply_index = len(j)
# We truncate the response data to exclude replies
if exclude_replies:
j = j[0:reply_index]
@@ -360,6 +398,7 @@ def XitterDownload(source_url, target_location):
bearer_token, guest_token = get_tokens(tweet_url)
resp = get_tweet_details(tweet_url, guest_token, bearer_token)
mp4s = extract_mp4s(resp.text, tweet_url, target_all_videos)
if target_all_videos:
video_counter = 1
original_urls = repost_check(resp.text, exclude_replies=False)
@@ -377,6 +416,7 @@ def XitterDownload(source_url, target_location):
download_parts(mp4, output_file)
else:
# use a stream to download the file
r = requests.get(mp4, stream=True)
with open(output_file, "wb") as f:
for chunk in r.iter_content(chunk_size=1024):

View File

@@ -9,12 +9,12 @@ from Crypto.Cipher import AES
from Crypto.Util.Padding import pad
from bech32 import bech32_decode, convertbits, bech32_encode
from nostr_sdk import nostr_sdk, PublicKey, SecretKey, Event, EventBuilder, Tag, Keys
from nostr_dvm.utils.nostr_utils import get_event_by_id, check_and_decrypt_own_tags
import lnurl
from hashlib import sha256
import dotenv
# TODO tor connection to lnbits
# proxies = {
# 'http': 'socks5h://127.0.0.1:9050',
@@ -23,6 +23,7 @@ import dotenv
proxies = {}
def parse_zap_event_tags(zap_event, keys, name, client, config):
zapped_event = None
invoice_amount = 0
@@ -126,10 +127,11 @@ def create_bolt11_lud16(lud16, amount):
except:
return None
def create_lnbits_account(name):
if os.getenv("LNBITS_ADMIN_ID") is None or os.getenv("LNBITS_ADMIN_ID") == "":
print("No admin id set, no wallet created.")
return
return "","","","", "failed"
data = {
'admin_id': os.getenv("LNBITS_ADMIN_ID"),
'wallet_name': name,
@@ -144,9 +146,11 @@ def create_lnbits_account(name):
walletjson = json.loads(r.text)
print(walletjson)
if walletjson.get("wallets"):
return walletjson['wallets'][0]['inkey'], walletjson['wallets'][0]['adminkey'], walletjson['wallets'][0]['id'], walletjson['id'], "success"
return walletjson['wallets'][0]['inkey'], walletjson['wallets'][0]['adminkey'], walletjson['wallets'][0][
'id'], walletjson['id'], "success"
except:
print("error creating wallet")
return "", "", "", "", "failed"
def check_bolt11_ln_bits_is_paid(payment_hash: str, config):
@@ -278,9 +282,6 @@ def zap(lud16: str, amount: int, content, zapped_event: Event, keys, dvm_config,
return None
def get_price_per_sat(currency):
import requests
@@ -298,13 +299,7 @@ def get_price_per_sat(currency):
return price_currency_per_sat
def make_ln_address_nostdress(identifier, npub, pin, nostdressdomain):
#env_path = Path('.env')
#if env_path.is_file():
# dotenv.load_dotenv(env_path, verbose=True, override=True)
print(os.getenv("LNBITS_INVOICE_KEY_" + identifier.upper()))
data = {
'name': identifier,
@@ -317,7 +312,6 @@ def make_ln_address_nostdress(identifier, npub, pin, nostdressdomain):
'currentname': " "
}
try:
url = "https://" + nostdressdomain + "/api/easy/"
res = requests.post(url, data=data)
@@ -330,8 +324,8 @@ def make_ln_address_nostdress(identifier, npub, pin, nostdressdomain):
print(e)
return "", ""
def check_and_set_ln_bits_keys(identifier, npub):
def check_and_set_ln_bits_keys(identifier, npub):
if not os.getenv("LNBITS_INVOICE_KEY_" + identifier.upper()):
invoicekey, adminkey, walletid, userid, success = create_lnbits_account(identifier)
add_key_to_env_file("LNBITS_INVOICE_KEY_" + identifier.upper(), invoicekey)
@@ -341,7 +335,7 @@ def check_and_set_ln_bits_keys(identifier, npub):
lnaddress = ""
pin = ""
if os.getenv("NOSTDRESS_DOMAIN"):
if os.getenv("NOSTDRESS_DOMAIN") and success != "failed":
print(os.getenv("NOSTDRESS_DOMAIN"))
lnaddress, pin = make_ln_address_nostdress(identifier, npub, " ", os.getenv("NOSTDRESS_DOMAIN"))
add_key_to_env_file("LNADDRESS_" + identifier.upper(), lnaddress)
@@ -356,14 +350,8 @@ def check_and_set_ln_bits_keys(identifier, npub):
os.getenv("LNADDRESS_" + identifier.upper()))
def add_key_to_env_file(value, oskey):
env_path = Path('.env')
if env_path.is_file():
dotenv.load_dotenv(env_path, verbose=True, override=True)
dotenv.set_key(env_path, value, oskey)

View File

@@ -1,107 +0,0 @@
aiohttp==3.9.1
aiosignal==1.3.1
anyio==3.7.1
appdirs==1.4.4
asn1crypto==1.5.1
async-timeout==4.0.3
attrs==23.1.0
base58==2.1.1
beautifulsoup4==4.12.2
bech32==1.2.0
bip32==3.4
bitarray==2.8.3
bitstring==3.1.9
blessed==1.20.0
cassidy==0.1.4
certifi==2023.7.22
cffi==1.16.0
charset-normalizer==3.3.2
click==8.1.7
coincurve==18.0.0
cryptography==41.0.4
decorator==4.4.2
distro==1.8.0
ecdsa==0.18.0
emoji==2.8.0
enumb==0.1.5
environs==9.5.0
eva-decord==0.6.1
exceptiongroup==1.1.3
expo==0.1.2
fastapi==0.103.0
ffmpegio==0.8.5
ffmpegio-core==0.8.5
filelock==3.13.1
frozenlist==1.4.0
fsspec==2023.12.1
h11==0.14.0
httpcore==0.18.0
httpx==0.25.1
huggingface-hub==0.19.4
idna==3.4
imageio==2.33.0
imageio-ffmpeg==0.4.9
importlib-metadata==6.8.0
inquirer==3.1.3
install==1.3.5
instaloader==4.10.1
Jinja2==3.1.2
litellm==1.12.3
lnurl==0.4.1
loguru==0.7.2
MarkupSafe==2.1.3
marshmallow==3.20.1
mediatype==0.1.6
mnemonic==0.20
moviepy==2.0.0.dev2
multidict==6.0.4
nostr-sdk==0.0.5
numpy==1.26.2
openai==1.3.5
outcome==1.2.0
packaging==23.2
pandas==2.1.3
Pillow==10.1.0
pluggy==1.3.0
proglog==0.1.10
pycparser==2.21
pycryptodome==3.19.0
pycryptodomex==3.19.0
pydantic==1.10.13
pydantic_core==2.14.5
pypdf==3.17.1
python-dateutil==2.8.2
python-dotenv==1.0.0
python-editor==1.0.4
pytube==15.0.0
pytz==2023.3.post1
PyUpload==0.1.4
pyuseragents==1.0.5
PyYAML==6.0.1
readchar==4.0.5
regex==2023.10.3
replicate==0.21.1
Represent==1.6.0.post0
requests==2.31.0
requests-toolbelt==1.0.0
safeIO==1.2
six==1.16.0
sniffio==1.3.0
socksio==1.0.0
soupsieve==2.5
SpeechRecognition==3.10.0
SQLAlchemy==1.3.24
sqlalchemy-aio==0.17.0
starlette==0.27.0
tiktoken==0.5.2
tokenizers==0.15.0
tqdm==4.66.1
translatepy==2.3
typing_extensions==4.8.0
tzdata==2023.3
urllib3==2.1.0
uvicorn==0.23.2
wcwidth==0.2.10
websocket-client==1.6.4
yarl==1.9.4
zipp==3.17.0

View File

@@ -1,8 +1,8 @@
from setuptools import setup, find_packages
VERSION = '0.0.2'
DESCRIPTION = 'A framework to build and run NIP90 Data Vending Machines'
LONG_DESCRIPTION = ('A framework to build and run NIP90 Data Vending Machines. '
VERSION = '0.1.0'
DESCRIPTION = 'A framework to build and run Nostr NIP90 Data Vending Machines'
LONG_DESCRIPTION = ('A framework to build and run Nostr NIP90 Data Vending Machines. '
'This is an early stage release. Interfaces might change/brick')
# Setting up
@@ -13,7 +13,10 @@ setup(
author_email="believethehypeonnostr@proton.me",
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
packages=find_packages(include=['nostr_dvm']),
packages=find_packages(include=['nostr_dvm', 'nostr_dvm.interfaces', 'nostr_dvm.tasks',
'nostr_dvm.utils', 'nostr_dvm.utils.scrapper',
'nostr_dvm.backends', 'nostr_dvm.backends.mlx',
'nostr_dvm.backends.mlx.stablediffusion']),
install_requires=["nostr-sdk==0.0.5",
"bech32==1.2.0",
"pycryptodome==3.19.0",
@@ -29,7 +32,9 @@ setup(
"instaloader==4.10.1",
"pytube==15.0.0",
"moviepy==2.0.0.dev2",
"zipp==3.17.0"
"zipp==3.17.0",
"urllib3==2.1.0",
"typing_extensions>=4.9.0"
],
keywords=['nostr', 'nip90', 'dvm', 'data vending machine'],
url="https://github.com/believethehype/nostrdvm",

View File

@@ -65,11 +65,35 @@ def nostr_client_test_image(prompt):
config = DVMConfig
send_event(event, client=client, dvm_config=config)
return event.as_json()
def nostr_client_test_tts(prompt):
keys = Keys.from_sk_str(check_and_set_private_key("test_client"))
iTag = Tag.parse(["i", prompt, "text"])
paramTag1 = Tag.parse(["param", "language", "en"])
bidTag = Tag.parse(['bid', str(1000 * 1000), str(1000 * 1000)])
relaysTag = Tag.parse(['relays', "wss://relay.damus.io", "wss://blastr.f7z.xyz", "wss://relayable.org",
"wss://nostr-pub.wellorder.net"])
alttag = Tag.parse(["alt", "This is a NIP90 DVM AI task to generate TTSt"])
event = EventBuilder(EventDefinitions.KIND_NIP90_TEXT_TO_SPEECH, str("Generate an Audio File."),
[iTag, paramTag1, bidTag, relaysTag, alttag]).to_event(keys)
relay_list = ["wss://relay.damus.io", "wss://blastr.f7z.xyz", "wss://relayable.org",
"wss://nostr-pub.wellorder.net"]
client = Client(keys)
for relay in relay_list:
client.add_relay(relay)
client.connect()
config = DVMConfig
send_event(event, client=client, dvm_config=config)
return event.as_json()
def nostr_client_test_image_private(prompt, cashutoken):
keys = Keys.from_sk_str(check_and_set_private_key("test_client"))
receiver_keys = Keys.from_sk_str(check_and_set_private_key("sketcher"))
receiver_keys = Keys.from_sk_str(check_and_set_private_key("replicate_sdxl"))
# TODO more advanced logic, more parsing, params etc, just very basic test functions for now
@@ -125,19 +149,20 @@ def nostr_client():
client.subscribe([dm_zap_filter, dvm_filter])
#nostr_client_test_translation("This is the result of the DVM in spanish", "text", "es", 20, 20)
nostr_client_test_translation("note1p8cx2dz5ss5gnk7c59zjydcncx6a754c0hsyakjvnw8xwlm5hymsnc23rs", "event", "es", 20,20)
#nostr_client_test_translation("note1p8cx2dz5ss5gnk7c59zjydcncx6a754c0hsyakjvnw8xwlm5hymsnc23rs", "event", "es", 20,20)
#nostr_client_test_translation("44a0a8b395ade39d46b9d20038b3f0c8a11168e67c442e3ece95e4a1703e2beb", "event", "zh", 20, 20)
nostr_client_test_image("a beautiful purple ostrich watching the sunset")
#nostr_client_test_tts("Hello, this is a test. Mic check one, two.")
#nostr_client_test_image("a beautiful purple ostrich watching the sunset")
#cashutoken = "cashuAeyJ0b2tlbiI6W3sicHJvb2ZzIjpbeyJpZCI6InZxc1VRSVorb0sxOSIsImFtb3VudCI6MSwiQyI6IjAyNWU3ODZhOGFkMmExYTg0N2YxMzNiNGRhM2VhMGIyYWRhZGFkOTRiYzA4M2E2NWJjYjFlOTgwYTE1NGIyMDA2NCIsInNlY3JldCI6InQ1WnphMTZKMGY4UElQZ2FKTEg4V3pPck5rUjhESWhGa291LzVzZFd4S0U9In0seyJpZCI6InZxc1VRSVorb0sxOSIsImFtb3VudCI6NCwiQyI6IjAyOTQxNmZmMTY2MzU5ZWY5ZDc3MDc2MGNjZmY0YzliNTMzMzVmZTA2ZGI5YjBiZDg2Njg5Y2ZiZTIzMjVhYWUwYiIsInNlY3JldCI6IlRPNHB5WE43WlZqaFRQbnBkQ1BldWhncm44UHdUdE5WRUNYWk9MTzZtQXM9In0seyJpZCI6InZxc1VRSVorb0sxOSIsImFtb3VudCI6MTYsIkMiOiIwMmRiZTA3ZjgwYmMzNzE0N2YyMDJkNTZiMGI3ZTIzZTdiNWNkYTBhNmI3Yjg3NDExZWYyOGRiZDg2NjAzNzBlMWIiLCJzZWNyZXQiOiJHYUNIdHhzeG9HM3J2WWNCc0N3V0YxbU1NVXczK0dDN1RKRnVwOHg1cURzPSJ9XSwibWludCI6Imh0dHBzOi8vbG5iaXRzLmJpdGNvaW5maXhlc3RoaXMub3JnL2Nhc2h1L2FwaS92MS9ScDlXZGdKZjlxck51a3M1eVQ2SG5rIn1dfQ=="
#nostr_client_test_image_private("a beautiful ostrich watching the sunset", cashutoken )
#nostr_client_test_image_private("a beautiful ostrich watching the sunset")
class NotificationHandler(HandleNotification):
def handle(self, relay_url, event):
print(f"Received new event from {relay_url}: {event.as_json()}")
if event.kind() == 7000:
print("[Nostr Client]: " + event.as_json())
elif event.kind() > 6000 and event.kind() < 6999:
elif 6000 < event.kind() < 6999:
print("[Nostr Client]: " + event.as_json())
print("[Nostr Client]: " + event.content())