option to use venv or not per module, added tts

This commit is contained in:
Believethehype 2023-12-17 14:38:58 +01:00
parent 6792fbc7dd
commit f2c7aa3c1e
21 changed files with 247 additions and 364 deletions

1
.gitignore vendored
View File

@ -174,3 +174,4 @@ backends/nserver/venv
backends/nserver/cache
backends/nserver/modules/image_upscale/weights
cache/venvs/
cache/input.wav

View File

@ -5,7 +5,7 @@ import dotenv
from nostr_dvm.bot import Bot
from nostr_dvm.tasks import videogeneration_replicate_svd, imagegeneration_replicate_sdxl, textgeneration_llmlite, \
trending_notes_nostrband, discovery_inactive_follows, translation_google, textextraction_pdf, \
translation_libretranslate, textextraction_google, convert_media, imagegeneration_openai_dalle
translation_libretranslate, textextraction_google, convert_media, imagegeneration_openai_dalle, texttospeech
from nostr_dvm.utils.admin_utils import AdminConfig
from nostr_dvm.utils.backend_utils import keep_alive
from nostr_dvm.utils.definitions import EventDefinitions
@ -133,6 +133,11 @@ def playground():
ollama = textgeneration_llmlite.build_example("LLM", "llmlite", admin_config)
bot_config.SUPPORTED_DVMS.append(ollama)
ollama.run()
tts = texttospeech.build_example("Text To Speech Test", "tts", admin_config)
bot_config.SUPPORTED_DVMS.append(tts)
tts.run()
# Run the bot
Bot(bot_config)
# Keep the main function alive for libraries that require it, like openai

View File

@ -473,20 +473,24 @@ class DVM:
if task == dvm.TASK:
request_form = dvm.create_request_from_nostr_event(job_event, self.client, self.dvm_config)
python_bin = (r'cache/venvs/' + os.path.basename(dvm_config.SCRIPT).split(".py")[0]
+ "/bin/python")
subprocess.run([python_bin, dvm_config.SCRIPT,
'--request', json.dumps(request_form),
'--identifier', dvm_config.IDENTIFIER,
'--output', 'output.txt'])
print("Finished processing, loading data..")
if dvm_config.USE_OWN_VENV:
python_bin = (r'cache/venvs/' + os.path.basename(dvm_config.SCRIPT).split(".py")[0]
+ "/bin/python")
retcode = subprocess.call([python_bin, dvm_config.SCRIPT,
'--request', json.dumps(request_form),
'--identifier', dvm_config.IDENTIFIER,
'--output', 'output.txt'])
print("Finished processing, loading data..")
with open(os.path.abspath('output.txt')) as f:
result = f.readlines()[0]
print(result)
#f.close()
os.remove(os.path.abspath('output.txt'))
with open(os.path.abspath('output.txt')) as f:
result = f.readlines()[0]
print(result)
#f.close()
os.remove(os.path.abspath('output.txt'))
else: #Some components might have issues with running code in otuside venv.
# We install locally in these cases for now
result = dvm.process(request_form)
try:
post_processed = dvm.post_process(str(result), job_event)
send_nostr_reply_event(post_processed, job_event.as_json())

View File

@ -9,7 +9,7 @@ from nostr_sdk import Keys
from nostr_dvm.dvm import DVM
from nostr_dvm.utils.admin_utils import AdminConfig
from nostr_dvm.utils.dvmconfig import DVMConfig
from nostr_dvm.utils.nip89_utils import NIP89Config
from nostr_dvm.utils.nip89_utils import NIP89Config, check_and_set_d_tag
from nostr_dvm.utils.output_utils import post_process_result
@ -28,13 +28,11 @@ class DVMTaskInterface:
admin_config: AdminConfig
dependencies = []
def __init__(self, name, dvm_config: DVMConfig, nip89config: NIP89Config, admin_config: AdminConfig = None,
options=None, task=None):
self.init(name, dvm_config, admin_config, nip89config, task)
self.options = options
self.make_venv(dvm_config)
self.install_dependencies(dvm_config)
def init(self, name, dvm_config, admin_config=None, nip89config=None, task=None):
self.NAME = name
@ -58,15 +56,24 @@ class DVMTaskInterface:
self.dvm_config = dvm_config
self.admin_config = admin_config
def make_venv(self, dvm_config):
def install_dependencies(self, dvm_config):
if dvm_config.SCRIPT != "":
dir = r'cache/venvs/' + os.path.basename(dvm_config.SCRIPT).split(".py")[0]
if not os.path.isdir(dir):
print(dir)
create(dir, with_pip=True, upgrade_deps=True)
for (module, package) in self.dependencies:
print("Installing Module: " + module)
run(["bin/pip", "install", package], cwd=dir)
if self.dvm_config.USE_OWN_VENV:
dir = r'cache/venvs/' + os.path.basename(dvm_config.SCRIPT).split(".py")[0]
if not os.path.isdir(dir):
print(dir)
create(dir, with_pip=True, upgrade_deps=True)
for (module, package) in self.dependencies:
print("Installing Venv Module: " + module)
run(["bin/pip", "install", package], cwd=dir)
else:
for module, package in self.dependencies:
if module != "nostr-dvm":
try:
__import__(module)
except ImportError:
print("Installing global Module: " + module)
subprocess.check_call([sys.executable, "-m", "pip", "install", package])
def run(self):
nostr_dvm_thread = Thread(target=self.DVM, args=[self.dvm_config, self.admin_config])
@ -120,10 +127,4 @@ class DVMTaskInterface:
def write_output(result, output):
with open(os.path.abspath(output), 'w') as f:
f.write(result)
#f.close()
def process_venv(self):
pass
if __name__ == '__main__':
process_venv()
# f.close()

View File

@ -1,14 +1,10 @@
import json
import os
from datetime import timedelta
from pathlib import Path
import dotenv
from nostr_sdk import Client, Timestamp, PublicKey, Tag, Keys, Options, SecretKey
from nostr_dvm.interfaces.dvmtaskinterface import DVMTaskInterface
from nostr_dvm.utils.admin_utils import AdminConfig
from nostr_dvm.utils.backend_utils import keep_alive
from nostr_dvm.utils.definitions import EventDefinitions
from nostr_dvm.utils.dvmconfig import DVMConfig, build_default_config
from nostr_dvm.utils.nip89_utils import NIP89Config, check_and_set_d_tag
@ -186,21 +182,5 @@ def process_venv():
result = dvm.process(json.loads(args.request))
DVMTaskInterface.write_output(result, args.output)
#if __name__ == '__main__':
# process_venv()
#if env_path.is_file():
# print(f'loading environment from {env_path.resolve()}')
# dotenv.load_dotenv(env_path, verbose=True, override=True)
#else:
# raise FileNotFoundError(f'.env file not found at {env_path} ')
#
#admin_config = AdminConfig()
#admin_config.REBROADCAST_NIP89 = False
#admin_config.UPDATE_PROFILE = False
#dvm = build_example("Advanced Nostr Search", "discovery_content_search", admin_config)
#dvm.run()
#keep_alive()
if __name__ == '__main__':
process_venv()

View File

@ -1,12 +1,7 @@
import json
import os
from pathlib import Path
import dotenv
from nostr_dvm.interfaces.dvmtaskinterface import DVMTaskInterface
from nostr_dvm.utils.admin_utils import AdminConfig
from nostr_dvm.utils.backend_utils import keep_alive
from nostr_dvm.utils.definitions import EventDefinitions
from nostr_dvm.utils.dvmconfig import DVMConfig, build_default_config
from nostr_dvm.utils.nip89_utils import NIP89Config, check_and_set_d_tag
@ -102,8 +97,7 @@ def build_example(name, identifier, admin_config):
}
nip89config = NIP89Config()
nip89config.DTAG = check_and_set_d_tag(identifier, name, dvm_config.PRIVATE_KEY, nip89info["image"])
nip89config.CONTENT = json.dumps(nip89info)
return MediaConverter(name=name, dvm_config=dvm_config, nip89config=nip89config,
admin_config=admin_config)
@ -115,21 +109,6 @@ def process_venv():
result = dvm.process(json.loads(args.request))
DVMTaskInterface.write_output(result, args.output)
#if __name__ == '__main__':
# process_venv()
#env_path = Path('.env')
#if env_path.is_file():
# print(f'loading environment from {env_path.resolve()}')
# dotenv.load_dotenv(env_path, verbose=True, override=True)
#else:
# raise FileNotFoundError(f'.env file not found at {env_path} ')
#admin_config = AdminConfig()
#admin_config.REBROADCAST_NIP89 = False
#admin_config.UPDATE_PROFILE = False
#dvm = build_example("Media Bringer", "media_converter", admin_config)
#dvm.run()
#keep_alive()
if __name__ == '__main__':
process_venv()

View File

@ -1,15 +1,12 @@
import json
import os
from datetime import timedelta
from pathlib import Path
from threading import Thread
import dotenv
from nostr_sdk import Client, Timestamp, PublicKey, Tag, Keys, Options
from nostr_dvm.interfaces.dvmtaskinterface import DVMTaskInterface
from nostr_dvm.utils.admin_utils import AdminConfig
from nostr_dvm.utils.backend_utils import keep_alive
from nostr_dvm.utils.definitions import EventDefinitions
from nostr_dvm.utils.dvmconfig import DVMConfig, build_default_config
from nostr_dvm.utils.nip89_utils import NIP89Config, check_and_set_d_tag
@ -206,19 +203,5 @@ def process_venv():
result = dvm.process(json.loads(args.request))
DVMTaskInterface.write_output(result, args.output)
#if __name__ == '__main__':
# process_venv()
#env_path = Path('.env')
#if env_path.is_file():
# print(f'loading environment from {env_path.resolve()}')
# dotenv.load_dotenv(env_path, verbose=True, override=True)
#else:
# raise FileNotFoundError(f'.env file not found at {env_path} ')
#admin_config = AdminConfig()
#admin_config.REBROADCAST_NIP89 = False
#admin_config.UPDATE_PROFILE = False
#dvm = build_example("Bygones", "discovery_inactive_follows", admin_config)
#dvm.run()
#keep_alive()
if __name__ == '__main__':
process_venv()

View File

@ -2,15 +2,12 @@ import json
import os
import time
from io import BytesIO
from pathlib import Path
import dotenv
import requests
from PIL import Image
from nostr_dvm.interfaces.dvmtaskinterface import DVMTaskInterface
from nostr_dvm.utils.admin_utils import AdminConfig
from nostr_dvm.utils.backend_utils import keep_alive
from nostr_dvm.utils.definitions import EventDefinitions
from nostr_dvm.utils.dvmconfig import DVMConfig, build_default_config
from nostr_dvm.utils.nip89_utils import NIP89Config, check_and_set_d_tag
@ -129,6 +126,7 @@ class ImageGenerationDALLE(DVMTaskInterface):
# playground or elsewhere
def build_example(name, identifier, admin_config):
dvm_config = build_default_config(identifier)
dvm_config.USE_OWN_VENV = True
admin_config.LUD16 = dvm_config.LN_ADDRESS
profit_in_sats = 10
cost_in_cent = 4.0
@ -167,19 +165,5 @@ def process_venv():
DVMTaskInterface.write_output(result, args.output)
#if __name__ == '__main__':
# process_venv()
#env_path = Path('.env')
#if env_path.is_file():
# print(f'loading environment from {env_path.resolve()}')
# dotenv.load_dotenv(env_path, verbose=True, override=True)
#else:
# raise FileNotFoundError(f'.env file not found at {env_path} ')
#admin_config = AdminConfig()
#admin_config.REBROADCAST_NIP89 = False
#admin_config.UPDATE_PROFILE = False
#dvm = build_example("Dall-E 3", "dalle3", admin_config)
#dvm.run()
#keep_alive()
if __name__ == '__main__':
process_venv()

View File

@ -6,6 +6,7 @@ from PIL import Image
from nostr_dvm.interfaces.dvmtaskinterface import DVMTaskInterface
from nostr_dvm.utils.admin_utils import AdminConfig
from nostr_dvm.utils.backend_utils import keep_alive
from nostr_dvm.utils.definitions import EventDefinitions
from nostr_dvm.utils.dvmconfig import DVMConfig, build_default_config
from nostr_dvm.utils.nip89_utils import NIP89Config, check_and_set_d_tag
@ -152,19 +153,5 @@ def process_venv():
result = dvm.process(json.loads(args.request))
DVMTaskInterface.write_output(result, args.output)
#if __name__ == '__main__':
# process_venv()
#env_path = Path('.env')
#if env_path.is_file():
# print(f'loading environment from {env_path.resolve()}')
# dotenv.load_dotenv(env_path, verbose=True, override=True)
#else:
# raise FileNotFoundError(f'.env file not found at {env_path} ')
#admin_config = AdminConfig()
#admin_config.REBROADCAST_NIP89 = False
#admin_config.UPDATE_PROFILE = False
#dvm = build_example("Stable Diffusion XL", "replicate_sdxl", admin_config)
#dvm.run()
#keep_alive()
if __name__ == '__main__':
process_venv()

View File

@ -166,18 +166,5 @@ def process_venv():
DVMTaskInterface.write_output(result, args.output)
#if __name__ == '__main__':
# env_path = Path('.env')
# if env_path.is_file():
# print(f'loading environment from {env_path.resolve()}')
# dotenv.load_dotenv(env_path, verbose=True, override=True)
# else:
# raise FileNotFoundError(f'.env file not found at {env_path} ')
#
# admin_config = AdminConfig()
# admin_config.REBROADCAST_NIP89 = False
# admin_config.UPDATE_PROFILE = False
# dvm = build_example("Transcriptor", "speech_recognition", admin_config)
# dvm.run()
# keep_alive()
if __name__ == '__main__':
process_venv()

View File

@ -126,18 +126,5 @@ def process_venv():
DVMTaskInterface.write_output(result, args.output)
#if __name__ == '__main__':
# env_path = Path('.env')
# if env_path.is_file():
# print(f'loading environment from {env_path.resolve()}')
# dotenv.load_dotenv(env_path, verbose=True, override=True)
# else:
# raise FileNotFoundError(f'.env file not found at {env_path} ')
# admin_config = AdminConfig()
# admin_config.REBROADCAST_NIP89 = False
# admin_config.UPDATE_PROFILE = False
# dvm = build_example("PDF Extractor", "pdf_extractor", admin_config)
# dvm.run()
# keep_alive()
if __name__ == '__main__':
process_venv()

View File

@ -135,19 +135,5 @@ def process_venv():
DVMTaskInterface.write_output(result, args.output)
#if __name__ == '__main__':
# env_path = Path('.env')
# if env_path.is_file():
# print(f'loading environment from {env_path.resolve()}')
# dotenv.load_dotenv(env_path, verbose=True, override=True)
# else:
# raise FileNotFoundError(f'.env file not found at {env_path} ')
#
# admin_config = AdminConfig()
# admin_config.REBROADCAST_NIP89 = False
# admin_config.UPDATE_PROFILE = False
#
# dvm = build_example("LLM", "llmlite", admin_config)
# dvm.run()
# keep_alive()
if __name__ == '__main__':
process_venv()

View File

@ -0,0 +1,136 @@
import json
import os
os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
from pathlib import Path
from nostr_dvm.interfaces.dvmtaskinterface import DVMTaskInterface
from nostr_dvm.utils.admin_utils import AdminConfig
from nostr_dvm.utils.definitions import EventDefinitions
from nostr_dvm.utils.dvmconfig import DVMConfig, build_default_config
from nostr_dvm.utils.nip89_utils import NIP89Config, check_and_set_d_tag
from nostr_dvm.utils.output_utils import upload_media_to_hoster
"""
This File contains a Module to generate Audio based on an input and a voice
Accepted Inputs: Text
Outputs: Generated Audiofile
"""
class TextToSpeech(DVMTaskInterface):
KIND: int = EventDefinitions.KIND_NIP90_TEXT_TO_SPEECH
TASK: str = "text-to-speech"
FIX_COST: float = 0
dependencies = [("nostr-dvm", "nostr-dvm"),
("TTS", "TTS==0.22.0")]
def __init__(self, name, dvm_config: DVMConfig, nip89config: NIP89Config,
admin_config: AdminConfig = None, options=None):
dvm_config.SCRIPT = os.path.abspath(__file__)
super().__init__(name, dvm_config, nip89config, admin_config, options)
def is_input_supported(self, tags):
for tag in tags:
if tag.as_vec()[0] == 'i':
input_value = tag.as_vec()[1]
input_type = tag.as_vec()[2]
if input_type != "text":
return False
return True
def create_request_from_nostr_event(self, event, client=None, dvm_config=None):
request_form = {"jobID": event.id().to_hex() + "_" + self.NAME.replace(" ", "")}
prompt = "test"
if self.options.get("input_file") and self.options.get("input_file") != "":
input_file = self.options['input_file']
else:
input_file = "https://media.nostr.build/av/de104e3260be636533a56fd4468b905c1eb22b226143a997aa936b011122af8a.wav"
import urllib.request
if not Path.exists(Path(r'cache/input.wav')):
urllib.request.urlretrieve(input_file, "cache/input.wav")
language = "en"
for tag in event.tags():
if tag.as_vec()[0] == 'i':
input_type = tag.as_vec()[2]
if input_type == "text":
prompt = tag.as_vec()[1]
if input_type == "url":
input_file = tag.as_vec()[1]
elif tag.as_vec()[0] == 'param':
param = tag.as_vec()[1]
if param == "language": # check for param type
language = tag.as_vec()[2]
options = {
"prompt": prompt,
"input_wav": input_file,
"language": language
}
request_form['options'] = json.dumps(options)
return request_form
def process(self, request_form):
import torch
from TTS.api import TTS
options = DVMTaskInterface.set_options(request_form)
device = "cuda" if torch.cuda.is_available() else "cpu"
#else "mps" if torch.backends.mps.is_available() \
print(TTS().list_models())
try:
tts = TTS("tts_models/multilingual/multi-dataset/xtts_v2").to(device)
tts.tts_to_file(
text=options["prompt"],
speaker_wav="cache/input.wav", language=options["language"], file_path="outputs/output.wav")
result = upload_media_to_hoster("outputs/output.wav")
return result
except Exception as e:
print("Error in Module: " + str(e))
raise Exception(e)
# We build an example here that we can call by either calling this file directly from the main directory,
# or by adding it to our playground. You can call the example and adjust it to your needs or redefine it in the
# playground or elsewhere
def build_example(name, identifier, admin_config):
dvm_config = build_default_config(identifier)
admin_config.LUD16 = dvm_config.LN_ADDRESS
options = {'input_file': ""}
nip89info = {
"name": name,
"image": "https://image.nostr.build/c33ca6fc4cc038ca4adb46fdfdfda34951656f87ee364ef59095bae1495ce669.jpg",
"about": "I Generate Speech from Text",
"encryptionSupported": True,
"cashuAccepted": True,
"nip90Params": {
"language": {
"required": False,
"values": []
}
}
}
nip89config = NIP89Config()
nip89config.DTAG = check_and_set_d_tag(identifier, name, dvm_config.PRIVATE_KEY, nip89info["image"])
nip89config.CONTENT = json.dumps(nip89info)
return TextToSpeech(name=name, dvm_config=dvm_config, nip89config=nip89config, admin_config=admin_config,
options=options)
def process_venv():
args = DVMTaskInterface.process_args()
dvm_config = build_default_config(args.identifier)
dvm = TextToSpeech(name="", dvm_config=dvm_config, nip89config=NIP89Config(), admin_config=None)
result = dvm.process(json.loads(args.request))
DVMTaskInterface.write_output(result, args.output)
if __name__ == '__main__':
process_venv()

View File

@ -154,21 +154,5 @@ def process_venv():
DVMTaskInterface.write_output(result, args.output)
#if __name__ == '__main__':
#process_venv()
#env_path = Path('.env')
#if env_path.is_file():
# print(f'loading environment from {env_path.resolve()}')
# dotenv.load_dotenv(env_path, verbose=True, override=True)
#else:
# raise FileNotFoundError(f'.env file not found at {env_path} ')
#admin_config = AdminConfig()
#admin_config.REBROADCAST_NIP89 = False
#admin_config.UPDATE_PROFILE = False
#dvm = build_example("Google Translator", "google_translator", admin_config)
#dvm.run()
#keep_alive()
if __name__ == '__main__':
process_venv()

View File

@ -156,18 +156,5 @@ def process_venv():
result = dvm.process(json.loads(args.request))
DVMTaskInterface.write_output(result, args.output)
# if __name__ == '__main__':
# env_path = Path('.env')
# if env_path.is_file():
# print(f'loading environment from {env_path.resolve()}')
# dotenv.load_dotenv(env_path, verbose=True, override=True)
# else:
# raise FileNotFoundError(f'.env file not found at {env_path} ')
#
# admin_config = AdminConfig()
# admin_config.REBROADCAST_NIP89 = False
# admin_config.UPDATE_PROFILE = False
# dvm = build_example("Libre Translator", "libre_translator", admin_config)
# dvm.run()
#
# keep_alive()
if __name__ == '__main__':
process_venv()

View File

@ -125,19 +125,5 @@ def process_venv():
result = dvm.process(json.loads(args.request))
DVMTaskInterface.write_output(result, args.output)
#if __name__ == '__main__':
# env_path = Path('.env')
# if env_path.is_file():
# print(f'loading environment from {env_path.resolve()}')
# dotenv.load_dotenv(env_path, verbose=True, override=True)
# else:
# raise FileNotFoundError(f'.env file not found at {env_path} ')
#
# admin_config = AdminConfig()
# admin_config.REBROADCAST_NIP89 = False
# admin_config.UPDATE_PROFILE = False
#
# dvm = build_example("Trending Notes on Nostr.band", "trending_notes_nostrband", admin_config)
# dvm.run()
#
# keep_alive()
if __name__ == '__main__':
process_venv()

View File

@ -37,7 +37,6 @@ class VideoGenerationReplicateSVD(DVMTaskInterface):
def __init__(self, name, dvm_config: DVMConfig, nip89config: NIP89Config,
admin_config: AdminConfig = None, options=None):
dvm_config.SCRIPT = os.path.abspath(__file__)
print(dvm_config.SCRIPT)
super().__init__(name, dvm_config, nip89config, admin_config, options)
@ -149,19 +148,5 @@ def process_venv():
DVMTaskInterface.write_output(result, args.output)
#if __name__ == '__main__':
# env_path = Path('.env')
# if env_path.is_file():
# print(f'loading environment from {env_path.resolve()}')
# dotenv.load_dotenv(env_path, verbose=True, override=True)
# else:
# raise FileNotFoundError(f'.env file not found at {env_path} ')
#
# admin_config = AdminConfig()
# admin_config.REBROADCAST_NIP89 = False
# admin_config.UPDATE_PROFILE = False
#
# dvm = build_example("Stable Video Diffusion", "replicate_svd", admin_config)
# dvm.run()
#
# keep_alive()
if __name__ == '__main__':
process_venv()

View File

@ -9,27 +9,29 @@ class EventDefinitions:
KIND_NIP94_METADATA = 1063
KIND_FEEDBACK = 7000
KIND_NIP90_EXTRACT_TEXT = 5000
KIND_NIP90_RESULT_EXTRACT_TEXT = 6000
KIND_NIP90_RESULT_EXTRACT_TEXT = KIND_NIP90_EXTRACT_TEXT + 1000
KIND_NIP90_SUMMARIZE_TEXT = 5001
KIND_NIP90_RESULT_SUMMARIZE_TEXT = 6001
KIND_NIP90_RESULT_SUMMARIZE_TEXT = KIND_NIP90_SUMMARIZE_TEXT + 1000
KIND_NIP90_TRANSLATE_TEXT = 5002
KIND_NIP90_RESULT_TRANSLATE_TEXT = 6002
KIND_NIP90_RESULT_TRANSLATE_TEXT = KIND_NIP90_TRANSLATE_TEXT + 1000
KIND_NIP90_TEXT_TO_SPEECH = 5005
KIND_NIP90_TEXT_TO_SPEECH_RESULT = KIND_NIP90_TEXT_TO_SPEECH + 1000
KIND_NIP90_GENERATE_TEXT = 5050
KIND_NIP90_RESULT_GENERATE_TEXT = 6050
KIND_NIP90_RESULT_GENERATE_TEXT = KIND_NIP90_GENERATE_TEXT + 1000
KIND_NIP90_GENERATE_IMAGE = 5100
KIND_NIP90_RESULT_GENERATE_IMAGE = 6100
KIND_NIP90_RESULT_GENERATE_IMAGE = KIND_NIP90_GENERATE_IMAGE + 1000
KIND_NIP90_CONVERT_VIDEO = 5200
KIND_NIP90_RESULT_CONVERT_VIDEO = 6200
KIND_NIP90_RESULT_CONVERT_VIDEO = KIND_NIP90_CONVERT_VIDEO + 1000
KIND_NIP90_GENERATE_VIDEO = 5202
KIND_NIP90_RESULT_GENERATE_VIDEO = 6202
KIND_NIP90_RESULT_GENERATE_VIDEO = KIND_NIP90_GENERATE_VIDEO + 1000
KIND_NIP90_CONTENT_DISCOVERY = 5300
KIND_NIP90_RESULT_CONTENT_DISCOVERY = 6300
KIND_NIP90_RESULT_CONTENT_DISCOVERY = KIND_NIP90_CONTENT_DISCOVERY + 1000
KIND_NIP90_PEOPLE_DISCOVERY = 5301
KIND_NIP90_RESULT_PEOPLE_DISCOVERY = 6301
KIND_NIP90_RESULT_PEOPLE_DISCOVERY = KIND_NIP90_PEOPLE_DISCOVERY + 1000
KIND_NIP90_CONTENT_SEARCH = 5302
KIND_NIP90_RESULTS_CONTENT_SEARCH = 6302
KIND_NIP90_RESULTS_CONTENT_SEARCH = KIND_NIP90_CONTENT_SEARCH + 1000
KIND_NIP90_GENERIC = 5999
KIND_NIP90_RESULT_GENERIC = 6999
KIND_NIP90_RESULT_GENERIC = KIND_NIP90_GENERIC + 1000
ANY_RESULT = [KIND_NIP90_RESULT_EXTRACT_TEXT,
KIND_NIP90_RESULT_SUMMARIZE_TEXT,
KIND_NIP90_RESULT_TRANSLATE_TEXT,

View File

@ -23,12 +23,13 @@ class DVMConfig:
RELAY_TIMEOUT = 3
EXTERNAL_POST_PROCESS_TYPE = PostProcessFunctionType.NONE # Leave this on None, except the DVM is external
LNBITS_INVOICE_KEY = ''
LNBITS_INVOICE_KEY = '' # Will all automatically generated by default, or read from .env
LNBITS_ADMIN_KEY = '' # In order to pay invoices, e.g. from the bot to DVMs, or reimburse users.
LNBITS_URL = 'https://lnbits.com'
LN_ADDRESS = ''
SCRIPT = ''
IDENTIFIER = ''
USE_OWN_VENV = True # Make an own venv for each dvm's process function.Disable if you want to install packages into main venv. Only recommended if you dont want to run dvms with different dependency versions
DB: str
NEW_USER_BALANCE: int = 0 # Free credits for new users
NIP89: NIP89Config

View File

@ -1,107 +0,0 @@
aiohttp==3.9.1
aiosignal==1.3.1
anyio==3.7.1
appdirs==1.4.4
asn1crypto==1.5.1
async-timeout==4.0.3
attrs==23.1.0
base58==2.1.1
beautifulsoup4==4.12.2
bech32==1.2.0
bip32==3.4
bitarray==2.8.3
bitstring==3.1.9
blessed==1.20.0
cassidy==0.1.4
certifi==2023.7.22
cffi==1.16.0
charset-normalizer==3.3.2
click==8.1.7
coincurve==18.0.0
cryptography==41.0.4
decorator==4.4.2
distro==1.8.0
ecdsa==0.18.0
emoji==2.8.0
enumb==0.1.5
environs==9.5.0
eva-decord==0.6.1
exceptiongroup==1.1.3
expo==0.1.2
fastapi==0.103.0
ffmpegio==0.8.5
ffmpegio-core==0.8.5
filelock==3.13.1
frozenlist==1.4.0
fsspec==2023.12.1
h11==0.14.0
httpcore==0.18.0
httpx==0.25.1
huggingface-hub==0.19.4
idna==3.4
imageio==2.33.0
imageio-ffmpeg==0.4.9
importlib-metadata==6.8.0
inquirer==3.1.3
install==1.3.5
instaloader==4.10.1
Jinja2==3.1.2
litellm==1.12.3
lnurl==0.4.1
loguru==0.7.2
MarkupSafe==2.1.3
marshmallow==3.20.1
mediatype==0.1.6
mnemonic==0.20
moviepy==2.0.0.dev2
multidict==6.0.4
nostr-sdk==0.0.5
numpy==1.26.2
openai==1.3.5
outcome==1.2.0
packaging==23.2
pandas==2.1.3
Pillow==10.1.0
pluggy==1.3.0
proglog==0.1.10
pycparser==2.21
pycryptodome==3.19.0
pycryptodomex==3.19.0
pydantic==1.10.13
pydantic_core==2.14.5
pypdf==3.17.1
python-dateutil==2.8.2
python-dotenv==1.0.0
python-editor==1.0.4
pytube==15.0.0
pytz==2023.3.post1
PyUpload==0.1.4
pyuseragents==1.0.5
PyYAML==6.0.1
readchar==4.0.5
regex==2023.10.3
replicate==0.21.1
Represent==1.6.0.post0
requests==2.31.0
requests-toolbelt==1.0.0
safeIO==1.2
six==1.16.0
sniffio==1.3.0
socksio==1.0.0
soupsieve==2.5
SpeechRecognition==3.10.0
SQLAlchemy==1.3.24
sqlalchemy-aio==0.17.0
starlette==0.27.0
tiktoken==0.5.2
tokenizers==0.15.0
tqdm==4.66.1
translatepy==2.3
typing_extensions==4.8.0
tzdata==2023.3
urllib3==2.1.0
uvicorn==0.23.2
wcwidth==0.2.10
websocket-client==1.6.4
yarl==1.9.4
zipp==3.17.0

View File

@ -65,6 +65,30 @@ def nostr_client_test_image(prompt):
config = DVMConfig
send_event(event, client=client, dvm_config=config)
return event.as_json()
def nostr_client_test_tts(prompt):
keys = Keys.from_sk_str(check_and_set_private_key("test_client"))
iTag = Tag.parse(["i", prompt, "text"])
paramTag1 = Tag.parse(["param", "language", "en"])
bidTag = Tag.parse(['bid', str(1000 * 1000), str(1000 * 1000)])
relaysTag = Tag.parse(['relays', "wss://relay.damus.io", "wss://blastr.f7z.xyz", "wss://relayable.org",
"wss://nostr-pub.wellorder.net"])
alttag = Tag.parse(["alt", "This is a NIP90 DVM AI task to generate TTSt"])
event = EventBuilder(EventDefinitions.KIND_NIP90_TEXT_TO_SPEECH, str("Generate an Audio File."),
[iTag, paramTag1, bidTag, relaysTag, alttag]).to_event(keys)
relay_list = ["wss://relay.damus.io", "wss://blastr.f7z.xyz", "wss://relayable.org",
"wss://nostr-pub.wellorder.net"]
client = Client(keys)
for relay in relay_list:
client.add_relay(relay)
client.connect()
config = DVMConfig
send_event(event, client=client, dvm_config=config)
return event.as_json()
def nostr_client_test_image_private(prompt, cashutoken):
@ -127,8 +151,9 @@ def nostr_client():
#nostr_client_test_translation("This is the result of the DVM in spanish", "text", "es", 20, 20)
#nostr_client_test_translation("note1p8cx2dz5ss5gnk7c59zjydcncx6a754c0hsyakjvnw8xwlm5hymsnc23rs", "event", "es", 20,20)
#nostr_client_test_translation("44a0a8b395ade39d46b9d20038b3f0c8a11168e67c442e3ece95e4a1703e2beb", "event", "zh", 20, 20)
#nostr_client_test_image("a beautiful purple ostrich watching the sunset")
nostr_client_test_tts("Hello, this is a test. One two three.")
nostr_client_test_image("a beautiful purple ostrich watching the sunset")
#cashutoken = "cashuAeyJ0b2tlbiI6W3sicHJvb2ZzIjpbeyJpZCI6InZxc1VRSVorb0sxOSIsImFtb3VudCI6MSwiQyI6IjAyNWU3ODZhOGFkMmExYTg0N2YxMzNiNGRhM2VhMGIyYWRhZGFkOTRiYzA4M2E2NWJjYjFlOTgwYTE1NGIyMDA2NCIsInNlY3JldCI6InQ1WnphMTZKMGY4UElQZ2FKTEg4V3pPck5rUjhESWhGa291LzVzZFd4S0U9In0seyJpZCI6InZxc1VRSVorb0sxOSIsImFtb3VudCI6NCwiQyI6IjAyOTQxNmZmMTY2MzU5ZWY5ZDc3MDc2MGNjZmY0YzliNTMzMzVmZTA2ZGI5YjBiZDg2Njg5Y2ZiZTIzMjVhYWUwYiIsInNlY3JldCI6IlRPNHB5WE43WlZqaFRQbnBkQ1BldWhncm44UHdUdE5WRUNYWk9MTzZtQXM9In0seyJpZCI6InZxc1VRSVorb0sxOSIsImFtb3VudCI6MTYsIkMiOiIwMmRiZTA3ZjgwYmMzNzE0N2YyMDJkNTZiMGI3ZTIzZTdiNWNkYTBhNmI3Yjg3NDExZWYyOGRiZDg2NjAzNzBlMWIiLCJzZWNyZXQiOiJHYUNIdHhzeG9HM3J2WWNCc0N3V0YxbU1NVXczK0dDN1RKRnVwOHg1cURzPSJ9XSwibWludCI6Imh0dHBzOi8vbG5iaXRzLmJpdGNvaW5maXhlc3RoaXMub3JnL2Nhc2h1L2FwaS92MS9ScDlXZGdKZjlxck51a3M1eVQ2SG5rIn1dfQ=="
#nostr_client_test_image_private("a beautiful ostrich watching the sunset")