move nserver to branch for now

This commit is contained in:
Believethehype 2023-12-05 21:14:04 +01:00
parent a9e869d3bb
commit d2d7027e80
9 changed files with 0 additions and 1213 deletions

View File

@ -1,10 +0,0 @@
# NostrAI Data Vending Machine Backends
Each DVM task might either run locally or use a specific backend.
Especially for GPU tasks it might make sense to outsource some tasks on other machines.
Backends can also be API calls to (paid) services. This directory contains basic calling functions to such backends.
Modules in the folder "tasks" might use these functions to call a specific backend.
Using backends might require some extra work like running/hosting a server or acquiring an API key.

View File

@ -1,122 +0,0 @@
import io
import json
import os
import re
import time
import zipfile
from pathlib import Path
import pandas as pd
import requests
import PIL.Image as Image
from utils.output_utils import upload_media_to_hoster
"""
This file contains basic calling functions for ML tasks that are outsourced to nova-server
(https://pypi.org/project/hcai-nova-server/). nova-server is an Open-Source backend that enables running models locally
based on preefined modules (nova-server-modules), by accepting a request form.
Modules are deployed in in separate virtual environments so dependencies won't conflict.
Setup nova-server:
https://hcmlab.github.io/nova-server/docbuild/html/tutorials/introduction.html
"""
"""
send_request_to_nova_server(request_form, address)
Function to send a request_form to the server, containing all the information we parsed from the Nostr event and added
in the module that is calling the server
"""
def send_request_to_nova_server(request_form, address):
print("Sending job to NOVA-Server")
url = ('http://' + address + '/process')
headers = {'Content-type': 'application/x-www-form-urlencoded'}
response = requests.post(url, headers=headers, data=request_form)
return response.text
def send_file_to_nova_server(filepath, address):
print("Sending file to NOVA-Server")
url = ('http://' + address + '/upload')
try:
fp = open(filepath, 'rb')
response = requests.post(url, files={'file': fp})
result = response.content.decode('utf-8')
except Exception as e:
print(e)
print(response.content.decode('utf-8'))
return result
# headers = {'Content-type': 'application/x-www-form-urlencoded'}
"""
check_nova_server_status(request_form, address)
Function that requests the status of the current process with the jobID (we use the Nostr event as jobID).
When the Job is successfully finished we grab the result and depending on the type return the output
We throw an exception on error
"""
def check_nova_server_status(jobID, address) -> str | pd.DataFrame:
headers = {'Content-type': 'application/x-www-form-urlencoded'}
url_status = 'http://' + address + '/job_status'
url_log = 'http://' + address + '/log'
print("Sending Status Request to NOVA-Server")
data = {"jobID": jobID}
status = 0
length = 0
while status != 2 and status != 3:
response_status = requests.post(url_status, headers=headers, data=data)
response_log = requests.post(url_log, headers=headers, data=data)
status = int(json.loads(response_status.text)['status'])
log_content = str(json.loads(response_log.text)['message']).replace("ERROR", "").replace("INFO", "")
log = log_content[length:]
length = len(log_content)
if log != "":
print(log)
# WAITING = 0, RUNNING = 1, FINISHED = 2, ERROR = 3
time.sleep(1.0)
if status == 2:
try:
url_fetch = 'http://' + address + '/fetch_result'
print("Fetching Results from NOVA-Server...")
data = {"jobID": jobID, "delete_after_download": True}
response = requests.post(url_fetch, headers=headers, data=data)
content_type = response.headers['content-type']
print("Content-type: " + str(content_type))
if content_type == "image/jpeg":
image = Image.open(io.BytesIO(response.content))
image.save("./outputs/image.jpg")
result = upload_media_to_hoster("./outputs/image.jpg")
os.remove("./outputs/image.jpg")
return result
elif content_type == 'text/plain; charset=utf-8':
return response.content.decode('utf-8')
elif content_type == "application/x-zip-compressed":
zf = zipfile.ZipFile(io.BytesIO(response.content), "r")
for fileinfo in zf.infolist():
if fileinfo.filename.endswith(".annotation~"):
try:
anno_string = zf.read(fileinfo).decode('utf-8', errors='replace')
columns = ['from', 'to', 'name', 'conf']
result = pd.DataFrame([row.split(';') for row in anno_string.split('\n')],
columns=columns)
return result
except Exception as e:
print(e)
except Exception as e:
print("Couldn't fetch result: " + str(e))
elif status == 3:
return "error"

77
main.py
View File

@ -11,11 +11,8 @@ from interfaces.dvmtaskinterface import DVMTaskInterface
import tasks.convert_media as convert_media
import tasks.discovery_inactive_follows as discovery_inactive_follows
import tasks.imagegeneration_openai_dalle as imagegeneration_openai_dalle
import tasks.imagegeneration_sdxl as imagegeneration_sdxl
import tasks.imagegeneration_sdxlimg2img as imagegeneration_sdxlimg2img
import tasks.textextraction_pdf as textextraction_pdf
import tasks.textextraction_google as textextraction_google
import tasks.textextraction_whisperx as textextraction_whisperx
import tasks.translation_google as translation_google
import tasks.translation_libretranslate as translation_libretranslate
@ -70,80 +67,6 @@ def playground():
bot_config.SUPPORTED_DVMS.append(libre_translator) # We add translator to the bot
libre_translator.run()
# Spawn DVM3 Kind 5100 Image Generation This one uses a specific backend called nova-server.
# If you want to use it, see the instructions in backends/nova_server
if os.getenv("NOVA_SERVER") is not None and os.getenv("NOVA_SERVER") != "":
stable_artist = imagegeneration_sdxl.build_example("Stable Diffusion XL", "stable_diffusion",
admin_config, os.getenv("NOVA_SERVER"),
"stabilityai/stable-diffusion-xl",
"")
bot_config.SUPPORTED_DVMS.append(stable_artist) # We add unstable Diffusion to the bot
stable_artist.run()
# Spawn DVM4, another Instance of text-to-image, as before but use a different privatekey, model and lora this time.
if os.getenv("NOVA_SERVER") is not None and os.getenv("NOVA_SERVER") != "":
# Let's not use one of the examples in this one, but generate our own variation of the dvm. We make a new DVM
# called "Sketcher", with a predefined model and lora, so it will always make sketches of prompts
def build_sketcher(name, identifier, admin_config):
dvm_config = DVMConfig()
dvm_config.PRIVATE_KEY = check_and_set_private_key(identifier)
dvm_config.LNBITS_INVOICE_KEY = os.getenv("LNBITS_INVOICE_KEY")
dvm_config.LNBITS_URL = os.getenv("LNBITS_HOST")
nip90params = {
"negative_prompt": {
"required": False,
"values": []
},
"ratio": {
"required": False,
"values": ["1:1", "4:3", "16:9", "3:4", "9:16", "10:16"]
}
}
nip89info = {
"name": name,
"image": "https://image.nostr.build/229c14e440895da30de77b3ca145d66d4b04efb4027ba3c44ca147eecde891f1.jpg",
"about": "I draw images based on a prompt in the style of paper sketches",
"encryptionSupported": True,
"cashuAccepted": True,
"nip90Params": nip90params
}
# A module might have options it can be initialized with, here we set a default model, lora and the nova-server
# address it should use. These parameters can be freely defined in the task component
options = {'default_model': "mohawk", 'default_lora': "timburton", 'nova_server': os.getenv("NOVA_SERVER")}
nip89config = NIP89Config()
nip89config.DTAG = nip89config.DTAG = check_and_set_d_tag(identifier, name, dvm_config.PRIVATE_KEY,
nip89info["image"])
nip89config.CONTENT = json.dumps(nip89info)
# We add an optional AdminConfig for this one, and tell the dvm to rebroadcast its NIP89
return imagegeneration_sdxl.ImageGenerationSDXL(name=name, dvm_config=dvm_config, nip89config=nip89config,
admin_config=admin_config, options=options)
sketcher = build_sketcher("Sketcher", "sketcher", admin_config)
bot_config.SUPPORTED_DVMS.append(sketcher) # We also add Sketcher to the bot
sketcher.run()
# Spawn DVM5, image-to-image, .
if os.getenv("NOVA_SERVER") is not None and os.getenv("NOVA_SERVER") != "":
imageconverter = imagegeneration_sdxlimg2img.build_example("Image Converter Inkpunk",
"image_converter_inkpunk", admin_config,
os.getenv("NOVA_SERVER"),
"inkpunk", 0.6)
bot_config.SUPPORTED_DVMS.append(imageconverter) # We also add Sketcher to the bot
imageconverter.run()
# Spawn DVM5, Another script on nova-server calling WhisperX to transcribe media files
if os.getenv("NOVA_SERVER") is not None and os.getenv("NOVA_SERVER") != "":
whisperer = textextraction_whisperx.build_example("Whisperer", "whisperx", admin_config, os.getenv("NOVA_SERVER"))
bot_config.SUPPORTED_DVMS.append(whisperer) # We also add Sketcher to the bot
whisperer.run()
transcriptor = textextraction_google.build_example("Transcriptor", "speech_recognition", admin_config)
bot_config.SUPPORTED_DVMS.append(transcriptor) # We also add Sketcher to the bot
transcriptor.run()
# Spawn DVM6, this one requires an OPENAI API Key and balance with OpenAI, you will move the task to them and pay
# per call. Make sure you have enough balance and the DVM's cost is set higher than what you pay yourself, except, you know,

View File

@ -9,11 +9,9 @@ Current List of Tasks:
| Module | Kind | Description | Backend |
|-------------------------|------|------------------------------------------------|-------------|
| TextExtractionPDF | 5000 | Extracts Text from a PDF file | local |
| SpeechToTextWhisperX | 5000 | Extracts Speech from Media files | nova-server |
| SpeechToTextGoogle | 5000 | Extracts Speech from Media files via Google | googleAPI |
| TranslationGoogle | 5002 | Translates Inputs to another language | googleAPI |
| TranslationLibre | 5002 | Translates Inputs to another language | libreAPI |
| ImageGenerationSDXL | 5100 | Generates an Image with StableDiffusionXL | nova-server |
| ImageGenerationDALLE | 5100 | Generates an Image with Dall-E | openAI |
| MediaConverter | 5200 | Converts a link of a media file and uploads it | openAI |
| DiscoverInactiveFollows | 5301 | Find inactive Nostr users | local |

View File

@ -1,220 +0,0 @@
import json
import os
from multiprocessing.pool import ThreadPool
from pathlib import Path
import dotenv
from backends.nova_server import check_nova_server_status, send_request_to_nova_server
from interfaces.dvmtaskinterface import DVMTaskInterface
from utils.admin_utils import AdminConfig
from utils.backend_utils import keep_alive
from utils.dvmconfig import DVMConfig
from utils.nip89_utils import NIP89Config, check_and_set_d_tag
from utils.definitions import EventDefinitions
from utils.nostr_utils import check_and_set_private_key
"""
This File contains a Module to transform Text input on NOVA-Server and receive results back.
Accepted Inputs: Prompt (text)
Outputs: An url to an Image
Params: -model # models: juggernaut, dynavision, colossusProject, newreality, unstable
-lora # loras (weights on top of models) voxel,
"""
class ImageGenerationSDXL(DVMTaskInterface):
KIND: int = EventDefinitions.KIND_NIP90_GENERATE_IMAGE
TASK: str = "text-to-image"
FIX_COST: float = 50
def __init__(self, name, dvm_config: DVMConfig, nip89config: NIP89Config,
admin_config: AdminConfig = None, options=None):
super().__init__(name, dvm_config, nip89config, admin_config, options)
def is_input_supported(self, tags):
for tag in tags:
if tag.as_vec()[0] == 'i':
input_value = tag.as_vec()[1]
input_type = tag.as_vec()[2]
if input_type != "text":
return False
elif tag.as_vec()[0] == 'output':
output = tag.as_vec()[1]
if (output == "" or
not (output == "image/png" or "image/jpg"
or output == "image/png;format=url" or output == "image/jpg;format=url")):
print("Output format not supported, skipping..")
return False
return True
def create_request_from_nostr_event(self, event, client=None, dvm_config=None):
request_form = {"jobID": event.id().to_hex() + "_" + self.NAME.replace(" ", "")}
request_form["trainerFilePath"] = 'modules\\stablediffusionxl\\stablediffusionxl.trainer'
prompt = ""
negative_prompt = ""
if self.options.get("default_model") and self.options.get("default_model") != "":
model = self.options['default_model']
else:
model = "stabilityai/stable-diffusion-xl-base-1.0"
ratio_width = "1"
ratio_height = "1"
width = ""
height = ""
if self.options.get("default_lora") and self.options.get("default_lora") != "":
lora = self.options['default_lora']
else:
lora = ""
lora_weight = ""
strength = ""
guidance_scale = ""
for tag in event.tags():
if tag.as_vec()[0] == 'i':
input_type = tag.as_vec()[2]
if input_type == "text":
prompt = tag.as_vec()[1]
elif tag.as_vec()[0] == 'param':
print("Param: " + tag.as_vec()[1] + ": " + tag.as_vec()[2])
if tag.as_vec()[1] == "negative_prompt":
negative_prompt = tag.as_vec()[2]
elif tag.as_vec()[1] == "lora":
lora = tag.as_vec()[2]
elif tag.as_vec()[1] == "lora_weight":
lora_weight = tag.as_vec()[2]
elif tag.as_vec()[1] == "strength":
strength = float(tag.as_vec()[2])
elif tag.as_vec()[1] == "guidance_scale":
guidance_scale = float(tag.as_vec()[2])
elif tag.as_vec()[1] == "ratio":
if len(tag.as_vec()) > 3:
ratio_width = (tag.as_vec()[2])
ratio_height = (tag.as_vec()[3])
elif len(tag.as_vec()) == 3:
split = tag.as_vec()[2].split(":")
ratio_width = split[0]
ratio_height = split[1]
# if size is set it will overwrite ratio.
elif tag.as_vec()[1] == "size":
if len(tag.as_vec()) > 3:
width = (tag.as_vec()[2])
height = (tag.as_vec()[3])
elif len(tag.as_vec()) == 3:
split = tag.as_vec()[2].split("x")
if len(split) > 1:
width = split[0]
height = split[1]
elif tag.as_vec()[1] == "model":
model = tag.as_vec()[2]
io_input = {
"id": "input_prompt",
"type": "input",
"src": "request:text",
"data": prompt
}
io_negative = {
"id": "negative_prompt",
"type": "input",
"src": "request:text",
"data": negative_prompt
}
io_output = {
"id": "output_image",
"type": "output",
"src": "request:image"
}
request_form['data'] = json.dumps([io_input, io_negative, io_output])
options = {
"model": model,
"ratio": ratio_width + '-' + ratio_height,
"width": width,
"height": height,
"strength": strength,
"guidance_scale": guidance_scale,
"lora": lora,
"lora_weight": lora_weight
}
request_form['options'] = json.dumps(options)
return request_form
def process(self, request_form):
try:
# Call the process route of NOVA-Server with our request form.
response = send_request_to_nova_server(request_form, self.options['nova_server'])
if bool(json.loads(response)['success']):
print("Job " + request_form['jobID'] + " sent to NOVA-server")
pool = ThreadPool(processes=1)
thread = pool.apply_async(check_nova_server_status, (request_form['jobID'], self.options['nova_server']))
print("Wait for results of NOVA-Server...")
result = thread.get()
return result
except Exception as e:
raise Exception(e)
# We build an example here that we can call by either calling this file directly from the main directory,
# or by adding it to our playground. You can call the example and adjust it to your needs or redefine it in the
# playground or elsewhere
def build_example(name, identifier, admin_config, server_address, default_model="stabilityai/stable-diffusion-xl"
"-base-1.0", default_lora=""):
dvm_config = DVMConfig()
dvm_config.PRIVATE_KEY = check_and_set_private_key(identifier)
dvm_config.LNBITS_INVOICE_KEY = "" # This one will not use Lnbits to create invoices, but rely on zaps
dvm_config.LNBITS_URL = ""
# A module might have options it can be initialized with, here we set a default model, and the nova-server
# address it should use. These parameters can be freely defined in the task component
options = {'default_model': default_model, 'default_lora': default_lora, 'nova_server': server_address}
nip90params = {
"negative_prompt": {
"required": False,
"values": []
},
"ratio": {
"required": False,
"values": ["1:1", "4:3", "16:9", "3:4", "9:16", "10:16"]
}
}
nip89info = {
"name": name,
"image": "https://image.nostr.build/c33ca6fc4cc038ca4adb46fdfdfda34951656f87ee364ef59095bae1495ce669.jpg",
"about": "I draw images based on a prompt with a Model called unstable diffusion",
"encryptionSupported": True,
"cashuAccepted": True,
"nip90Params": nip90params
}
nip89config = NIP89Config()
nip89config.DTAG = check_and_set_d_tag(identifier, name, dvm_config.PRIVATE_KEY,
nip89info["image"])
nip89config.CONTENT = json.dumps(nip89info)
return ImageGenerationSDXL(name=name, dvm_config=dvm_config, nip89config=nip89config,
admin_config=admin_config, options=options)
if __name__ == '__main__':
env_path = Path('.env')
if env_path.is_file():
print(f'loading environment from {env_path.resolve()}')
dotenv.load_dotenv(env_path, verbose=True, override=True)
else:
raise FileNotFoundError(f'.env file not found at {env_path} ')
admin_config = AdminConfig()
admin_config.REBROADCAST_NIP89 = False
admin_config.UPDATE_PROFILE = False
admin_config.LUD16 = ""
dvm = build_example("Unstable Diffusion", "unstable_diffusion", admin_config, os.getenv("NOVA_SERVER"), "stabilityai/stable-diffusion-xl", "")
dvm.run()
keep_alive()

View File

@ -1,255 +0,0 @@
import json
import os
from multiprocessing.pool import ThreadPool
from pathlib import Path
import dotenv
from backends.nova_server import check_nova_server_status, send_request_to_nova_server
from interfaces.dvmtaskinterface import DVMTaskInterface
from utils.admin_utils import AdminConfig
from utils.backend_utils import keep_alive
from utils.dvmconfig import DVMConfig
from utils.nip89_utils import NIP89Config, check_and_set_d_tag
from utils.definitions import EventDefinitions
from utils.nostr_utils import check_and_set_private_key
"""
This File contains a Module to transform Text input on NOVA-Server and receive results back.
Accepted Inputs: Prompt (text)
Outputs: An url to an Image
Params: -model # models: juggernaut, dynavision, colossusProject, newreality, unstable
-lora # loras (weights on top of models) voxel,
"""
class ImageGenerationSDXLIMG2IMG(DVMTaskInterface):
KIND: int = EventDefinitions.KIND_NIP90_GENERATE_IMAGE
TASK: str = "image-to-image"
FIX_COST: float = 50
def __init__(self, name, dvm_config: DVMConfig, nip89config: NIP89Config,
admin_config: AdminConfig = None, options=None):
super().__init__(name, dvm_config, nip89config, admin_config, options)
def is_input_supported(self, tags):
hasurl = False
hasprompt = False
for tag in tags:
if tag.as_vec()[0] == 'i':
input_value = tag.as_vec()[1]
input_type = tag.as_vec()[2]
if input_type == "url":
hasurl = True
elif input_type == "text":
hasprompt = True #Little optional when lora is set
elif tag.as_vec()[0] == 'output':
output = tag.as_vec()[1]
if (output == "" or
not (output == "image/png" or "image/jpg"
or output == "image/png;format=url" or output == "image/jpg;format=url")):
print("Output format not supported, skipping..")
return False
if not hasurl:
return False
return True
def create_request_from_nostr_event(self, event, client=None, dvm_config=None):
request_form = {"jobID": event.id().to_hex() + "_" + self.NAME.replace(" ", "")}
request_form["trainerFilePath"] = r'modules\stablediffusionxl\stablediffusionxl-img2img.trainer'
prompt = ""
negative_prompt = ""
url = ""
if self.options.get("default_model"):
model = self.options['default_model']
else:
model = "stabilityai/stable-diffusion-xl-refiner-1.0"
ratio_width = "1"
ratio_height = "1"
width = ""
height = ""
if self.options.get("default_lora") and self.options.get("default_lora") != "":
lora = self.options['default_lora']
else:
lora = ""
lora_weight = ""
if self.options.get("strength"):
strength = float(self.options['strength'])
else:
strength = 0.8
if self.options.get("guidance_scale"):
guidance_scale = float(self.options['guidance_scale'])
else:
guidance_scale = 11.0
for tag in event.tags():
if tag.as_vec()[0] == 'i':
input_type = tag.as_vec()[2]
if input_type == "text":
prompt = tag.as_vec()[1]
elif input_type == "url":
url = tag.as_vec()[1]
elif tag.as_vec()[0] == 'param':
print("Param: " + tag.as_vec()[1] + ": " + tag.as_vec()[2])
if tag.as_vec()[1] == "negative_prompt":
negative_prompt = tag.as_vec()[2]
elif tag.as_vec()[1] == "lora":
lora = tag.as_vec()[2]
elif tag.as_vec()[1] == "lora_weight":
lora_weight = tag.as_vec()[2]
elif tag.as_vec()[1] == "strength":
strength = float(tag.as_vec()[2])
elif tag.as_vec()[1] == "guidance_scale":
guidance_scale = float(tag.as_vec()[2])
elif tag.as_vec()[1] == "ratio":
if len(tag.as_vec()) > 3:
ratio_width = (tag.as_vec()[2])
ratio_height = (tag.as_vec()[3])
elif len(tag.as_vec()) == 3:
split = tag.as_vec()[2].split(":")
ratio_width = split[0]
ratio_height = split[1]
# if size is set it will overwrite ratio.
elif tag.as_vec()[1] == "size":
if len(tag.as_vec()) > 3:
width = (tag.as_vec()[2])
height = (tag.as_vec()[3])
elif len(tag.as_vec()) == 3:
split = tag.as_vec()[2].split("x")
if len(split) > 1:
width = split[0]
height = split[1]
elif tag.as_vec()[1] == "model":
model = tag.as_vec()[2]
io_input_image = {
"id": "input_image",
"type": "input",
"src": "url:Image",
"uri": url
}
io_input = {
"id": "input_prompt",
"type": "input",
"src": "request:text",
"data": prompt
}
io_negative = {
"id": "negative_prompt",
"type": "input",
"src": "request:text",
"data": negative_prompt
}
io_output = {
"id": "output_image",
"type": "output",
"src": "request:image"
}
request_form['data'] = json.dumps([io_input_image, io_input, io_negative, io_output])
options = {
"model": model,
"ratio": ratio_width + '-' + ratio_height,
"width": width,
"height": height,
"strength": strength,
"guidance_scale": guidance_scale,
"lora": lora,
"lora_weight": lora_weight,
"n_steps": 30
}
request_form['options'] = json.dumps(options)
return request_form
def process(self, request_form):
try:
# Call the process route of NOVA-Server with our request form.
response = send_request_to_nova_server(request_form, self.options['nova_server'])
if bool(json.loads(response)['success']):
print("Job " + request_form['jobID'] + " sent to NOVA-server")
pool = ThreadPool(processes=1)
thread = pool.apply_async(check_nova_server_status, (request_form['jobID'], self.options['nova_server']))
print("Wait for results of NOVA-Server...")
result = thread.get()
return result
except Exception as e:
raise Exception(e)
# We build an example here that we can call by either calling this file directly from the main directory,
# or by adding it to our playground. You can call the example and adjust it to your needs or redefine it in the
# playground or elsewhere
def build_example(name, identifier, admin_config, server_address, default_lora="", strength=0.6):
dvm_config = DVMConfig()
dvm_config.PRIVATE_KEY = check_and_set_private_key(identifier)
dvm_config.LNBITS_INVOICE_KEY = os.getenv("LNBITS_INVOICE_KEY")
dvm_config.LNBITS_URL = os.getenv("LNBITS_HOST")
nip90params = {
"negative_prompt": {
"required": False,
"values": []
},
"lora": {
"required": False,
"values": ["inkpunk", "timburton", "voxel"]
},
"strength": {
"required": False,
"values": []
}
}
nip89info = {
"name": name,
"image": "https://image.nostr.build/229c14e440895da30de77b3ca145d66d4b04efb4027ba3c44ca147eecde891f1.jpg",
"about": "I convert an image to another image, kinda random for now. ",
"encryptionSupported": True,
"cashuAccepted": True,
"nip90Params": nip90params
}
# A module might have options it can be initialized with, here we set a default model, lora and the nova-server
options = {'default_lora': default_lora, 'strength': strength, 'nova_server': server_address}
nip89config = NIP89Config()
nip89config.DTAG = check_and_set_d_tag(identifier, name, dvm_config.PRIVATE_KEY,
nip89info["image"])
nip89config.CONTENT = json.dumps(nip89info)
# We add an optional AdminConfig for this one, and tell the dvm to rebroadcast its NIP89
return ImageGenerationSDXLIMG2IMG(name=name, dvm_config=dvm_config, nip89config=nip89config,
admin_config=admin_config, options=options)
if __name__ == '__main__':
env_path = Path('.env')
if env_path.is_file():
print(f'loading environment from {env_path.resolve()}')
dotenv.load_dotenv(env_path, verbose=True, override=True)
else:
raise FileNotFoundError(f'.env file not found at {env_path} ')
admin_config = AdminConfig()
admin_config.REBROADCAST_NIP89 = False
admin_config.UPDATE_PROFILE = False
admin_config.LUD16 = ""
dvm = build_example("Image Converter Inkpunk", "image2image", admin_config, os.getenv("NOVA_SERVER"), "", 0.6)
dvm.run()
keep_alive()

View File

@ -1,164 +0,0 @@
import json
import os
from multiprocessing.pool import ThreadPool
from pathlib import Path
import dotenv
from backends.nova_server import check_nova_server_status, send_request_to_nova_server
from interfaces.dvmtaskinterface import DVMTaskInterface
from utils.admin_utils import AdminConfig
from utils.backend_utils import keep_alive
from utils.dvmconfig import DVMConfig
from utils.nip89_utils import NIP89Config, check_and_set_d_tag
from utils.definitions import EventDefinitions
from utils.nostr_utils import check_and_set_private_key
"""
This File contains a Module to extract a prompt from an image from an url.
Accepted Inputs: link to image (url)
Outputs: An textual description of the image
"""
class ImageInterrogator(DVMTaskInterface):
KIND: int = EventDefinitions.KIND_NIP90_EXTRACT_TEXT
TASK: str = "image-to-text"
FIX_COST: float = 80
def __init__(self, name, dvm_config: DVMConfig, nip89config: NIP89Config,
admin_config: AdminConfig = None, options=None):
super().__init__(name, dvm_config, nip89config, admin_config, options)
def is_input_supported(self, tags):
hasurl = False
for tag in tags:
if tag.as_vec()[0] == 'i':
input_value = tag.as_vec()[1]
input_type = tag.as_vec()[2]
if input_type == "url":
hasurl = True
if not hasurl:
return False
return True
def create_request_from_nostr_event(self, event, client=None, dvm_config=None):
request_form = {"jobID": event.id().to_hex() + "_" + self.NAME.replace(" ", "")}
request_form["trainerFilePath"] = r'modules\image_interrogator\image_interrogator.trainer'
url = ""
method = "prompt"
mode = "best"
for tag in event.tags():
if tag.as_vec()[0] == 'i':
input_type = tag.as_vec()[2]
if input_type == "url":
url = tag.as_vec()[1]
elif tag.as_vec()[0] == 'param':
print("Param: " + tag.as_vec()[1] + ": " + tag.as_vec()[2])
if tag.as_vec()[1] == "method":
method = tag.as_vec()[2]
elif tag.as_vec()[1] == "mode":
mode = tag.as_vec()[2]
io_input_image = {
"id": "input_image",
"type": "input",
"src": "url:Image",
"uri": url
}
io_output = {
"id": "output",
"type": "output",
"src": "request:text"
}
request_form['data'] = json.dumps([io_input_image, io_output])
options = {
"kind": method,
"mode": mode
}
request_form['options'] = json.dumps(options)
return request_form
def process(self, request_form):
try:
# Call the process route of NOVA-Server with our request form.
response = send_request_to_nova_server(request_form, self.options['nova_server'])
if bool(json.loads(response)['success']):
print("Job " + request_form['jobID'] + " sent to NOVA-server")
pool = ThreadPool(processes=1)
thread = pool.apply_async(check_nova_server_status, (request_form['jobID'], self.options['nova_server']))
print("Wait for results of NOVA-Server...")
result = thread.get()
return result
except Exception as e:
raise Exception(e)
# We build an example here that we can call by either calling this file directly from the main directory,
# or by adding it to our playground. You can call the example and adjust it to your needs or redefine it in the
# playground or elsewhere
def build_example(name, identifier, admin_config, server_address):
dvm_config = DVMConfig()
dvm_config.PRIVATE_KEY = check_and_set_private_key(identifier)
dvm_config.LNBITS_INVOICE_KEY = os.getenv("LNBITS_INVOICE_KEY")
dvm_config.LNBITS_URL = os.getenv("LNBITS_HOST")
nip90params = {
"method": {
"required": False,
"values": ["prompt", "analysis"]
},
"mode": {
"required": False,
"values": ["best", "classic", "fast", "negative"]
}
}
nip89info = {
"name": name,
"image": "https://image.nostr.build/229c14e440895da30de77b3ca145d66d4b04efb4027ba3c44ca147eecde891f1.jpg",
"about": "I analyse Images an return a prompt or a prompt analysis",
"encryptionSupported": True,
"cashuAccepted": True,
"nip90Params": nip90params
}
# A module might have options it can be initialized with, here we set a default model, lora and the nova-server
options = {'nova_server': server_address}
nip89config = NIP89Config()
nip89config.DTAG = check_and_set_d_tag(identifier, name, dvm_config.PRIVATE_KEY,
nip89info["image"])
nip89config.CONTENT = json.dumps(nip89info)
# We add an optional AdminConfig for this one, and tell the dvm to rebroadcast its NIP89
return ImageInterrogator(name=name, dvm_config=dvm_config, nip89config=nip89config,
admin_config=admin_config, options=options)
if __name__ == '__main__':
env_path = Path('.env')
if env_path.is_file():
print(f'loading environment from {env_path.resolve()}')
dotenv.load_dotenv(env_path, verbose=True, override=True)
else:
raise FileNotFoundError(f'.env file not found at {env_path} ')
admin_config = AdminConfig()
admin_config.REBROADCAST_NIP89 = False
admin_config.UPDATE_PROFILE = False
admin_config.LUD16 = ""
dvm = build_example("Image Interrogator", "imageinterrogator", admin_config, os.getenv("NOVA_SERVER"))
dvm.run()
keep_alive()

View File

@ -1,158 +0,0 @@
import json
import os
from multiprocessing.pool import ThreadPool
from pathlib import Path
import dotenv
from backends.nova_server import check_nova_server_status, send_request_to_nova_server
from interfaces.dvmtaskinterface import DVMTaskInterface
from utils.admin_utils import AdminConfig
from utils.backend_utils import keep_alive
from utils.dvmconfig import DVMConfig
from utils.nip89_utils import NIP89Config, check_and_set_d_tag
from utils.definitions import EventDefinitions
from utils.nostr_utils import check_and_set_private_key
"""
This File contains a Module to upscale an image from an url by factor 2-4
Accepted Inputs: link to image (url)
Outputs: An url to an Image
Params: -upscale 2,3,4
"""
class ImageUpscale(DVMTaskInterface):
KIND: int = EventDefinitions.KIND_NIP90_GENERATE_IMAGE
TASK: str = "image-to-image"
FIX_COST: float = 20
def __init__(self, name, dvm_config: DVMConfig, nip89config: NIP89Config,
admin_config: AdminConfig = None, options=None):
super().__init__(name, dvm_config, nip89config, admin_config, options)
def is_input_supported(self, tags):
hasurl = False
for tag in tags:
if tag.as_vec()[0] == 'i':
input_value = tag.as_vec()[1]
input_type = tag.as_vec()[2]
if input_type == "url":
hasurl = True
if not hasurl:
return False
return True
def create_request_from_nostr_event(self, event, client=None, dvm_config=None):
request_form = {"jobID": event.id().to_hex() + "_" + self.NAME.replace(" ", "")}
request_form["trainerFilePath"] = r'modules\image_upscale\image_upscale_realesrgan.trainer'
url = ""
out_scale = 4
for tag in event.tags():
if tag.as_vec()[0] == 'i':
input_type = tag.as_vec()[2]
if input_type == "url":
url = tag.as_vec()[1]
elif tag.as_vec()[0] == 'param':
print("Param: " + tag.as_vec()[1] + ": " + tag.as_vec()[2])
if tag.as_vec()[1] == "upscale":
out_scale = tag.as_vec()[2]
io_input_image = {
"id": "input_image",
"type": "input",
"src": "url:Image",
"uri": url
}
io_output = {
"id": "output_image",
"type": "output",
"src": "request:image"
}
request_form['data'] = json.dumps([io_input_image, io_output])
options = {
"outscale": out_scale,
}
request_form['options'] = json.dumps(options)
return request_form
def process(self, request_form):
try:
# Call the process route of NOVA-Server with our request form.
response = send_request_to_nova_server(request_form, self.options['nova_server'])
if bool(json.loads(response)['success']):
print("Job " + request_form['jobID'] + " sent to NOVA-server")
pool = ThreadPool(processes=1)
thread = pool.apply_async(check_nova_server_status, (request_form['jobID'], self.options['nova_server']))
print("Wait for results of NOVA-Server...")
result = thread.get()
return result
except Exception as e:
raise Exception(e)
# We build an example here that we can call by either calling this file directly from the main directory,
# or by adding it to our playground. You can call the example and adjust it to your needs or redefine it in the
# playground or elsewhere
def build_example(name, identifier, admin_config, server_address):
dvm_config = DVMConfig()
dvm_config.PRIVATE_KEY = check_and_set_private_key(identifier)
dvm_config.LNBITS_INVOICE_KEY = os.getenv("LNBITS_INVOICE_KEY")
dvm_config.LNBITS_URL = os.getenv("LNBITS_HOST")
nip90params = {
"upscale": {
"required": False,
"values": ["2", "3", "4"]
}
}
nip89info = {
"name": name,
"image": "https://image.nostr.build/229c14e440895da30de77b3ca145d66d4b04efb4027ba3c44ca147eecde891f1.jpg",
"about": "I upscale an image using realESRGan up to factor 4 (default is factor 4)",
"encryptionSupported": True,
"cashuAccepted": True,
"nip90Params": nip90params
}
# A module might have options it can be initialized with, here we set a default model, lora and the nova-server
options = {'nova_server': server_address}
nip89config = NIP89Config()
nip89config.DTAG = check_and_set_d_tag(identifier, name, dvm_config.PRIVATE_KEY,
nip89info["image"])
nip89config.CONTENT = json.dumps(nip89info)
# We add an optional AdminConfig for this one, and tell the dvm to rebroadcast its NIP89
return ImageUpscale(name=name, dvm_config=dvm_config, nip89config=nip89config,
admin_config=admin_config, options=options)
if __name__ == '__main__':
env_path = Path('.env')
if env_path.is_file():
print(f'loading environment from {env_path.resolve()}')
dotenv.load_dotenv(env_path, verbose=True, override=True)
else:
raise FileNotFoundError(f'.env file not found at {env_path} ')
admin_config = AdminConfig()
admin_config.REBROADCAST_NIP89 = False
admin_config.UPDATE_PROFILE = False
admin_config.LUD16 = ""
dvm = build_example("Image Upscaler", "imageupscale", admin_config, os.getenv("NOVA_SERVER"))
dvm.run()
keep_alive()

View File

@ -1,205 +0,0 @@
import json
import os
import time
from multiprocessing.pool import ThreadPool
from pathlib import Path
import dotenv
from backends.nova_server import check_nova_server_status, send_request_to_nova_server, send_file_to_nova_server
from interfaces.dvmtaskinterface import DVMTaskInterface
from utils.admin_utils import AdminConfig
from utils.backend_utils import keep_alive
from utils.dvmconfig import DVMConfig
from utils.mediasource_utils import organize_input_media_data
from utils.nip89_utils import NIP89Config, check_and_set_d_tag
from utils.definitions import EventDefinitions
from utils.nostr_utils import check_and_set_private_key
"""
This File contains a Module to transform A media file input on NOVA-Server and receive results back.
Accepted Inputs: Url to media file (url)
Outputs: Transcribed text
"""
class SpeechToTextWhisperX(DVMTaskInterface):
KIND: int = EventDefinitions.KIND_NIP90_EXTRACT_TEXT
TASK: str = "speech-to-text"
FIX_COST: float = 10
PER_UNIT_COST: float = 0.1
def __init__(self, name, dvm_config: DVMConfig, nip89config: NIP89Config,
admin_config: AdminConfig = None, options=None):
super().__init__(name, dvm_config, nip89config, admin_config, options)
def is_input_supported(self, tags):
for tag in tags:
if tag.as_vec()[0] == 'i':
input_value = tag.as_vec()[1]
input_type = tag.as_vec()[2]
if input_type != "url":
return False
elif tag.as_vec()[0] == 'output':
output = tag.as_vec()[1]
if output == "" or not (output == "text/plain"):
print("Output format not supported, skipping..")
return False
return True
def create_request_from_nostr_event(self, event, client=None, dvm_config=None):
request_form = {"jobID": event.id().to_hex() + "_" + self.NAME.replace(" ", ""),
"trainerFilePath": 'modules\\whisperx\\whisperx_transcript.trainer'}
if self.options.get("default_model"):
model = self.options['default_model']
else:
model = "base"
if self.options.get("alignment"):
alignment = self.options['alignment']
else:
alignment = "raw"
url = ""
input_type = "url"
start_time = 0
end_time = 0
media_format = "audio/mp3"
for tag in event.tags():
if tag.as_vec()[0] == 'i':
input_type = tag.as_vec()[2]
if input_type == "url":
url = tag.as_vec()[1]
elif tag.as_vec()[0] == 'param':
print("Param: " + tag.as_vec()[1] + ": " + tag.as_vec()[2])
if tag.as_vec()[1] == "alignment":
alignment = tag.as_vec()[2]
elif tag.as_vec()[1] == "model":
model = tag.as_vec()[2]
elif tag.as_vec()[1] == "range":
try:
t = time.strptime(tag.as_vec()[2], "%H:%M:%S")
seconds = t.tm_hour * 60 * 60 + t.tm_min * 60 + t.tm_sec
start_time = float(seconds)
except:
try:
t = time.strptime(tag.as_vec()[2], "%M:%S")
seconds = t.tm_min * 60 + t.tm_sec
start_time = float(seconds)
except:
start_time = tag.as_vec()[2]
try:
t = time.strptime(tag.as_vec()[3], "%H:%M:%S")
seconds = t.tm_hour * 60 * 60 + t.tm_min * 60 + t.tm_sec
end_time = float(seconds)
except:
try:
t = time.strptime(tag.as_vec()[3], "%M:%S")
seconds = t.tm_min * 60 + t.tm_sec
end_time = float(seconds)
except:
end_time = float(tag.as_vec()[3])
filepath = organize_input_media_data(url, input_type, start_time, end_time, dvm_config, client, True, media_format)
path_on_server = send_file_to_nova_server(os.path.realpath(filepath), self.options['nova_server'])
io_input = {
"id": "audio",
"type": "input",
"src": "file:stream",
"uri": path_on_server
}
io_output = {
"id": "transcript",
"type": "output",
"src": "request:annotation:free"
}
request_form['data'] = json.dumps([io_input, io_output])
options = {
"model": model,
"alignment_mode": alignment,
}
request_form['options'] = json.dumps(options)
return request_form
def process(self, request_form):
try:
# Call the process route of NOVA-Server with our request form.
response = send_request_to_nova_server(request_form, self.options['nova_server'])
if bool(json.loads(response)['success']):
print("Job " + request_form['jobID'] + " sent to NOVA-server")
pool = ThreadPool(processes=1)
thread = pool.apply_async(check_nova_server_status, (request_form['jobID'], self.options['nova_server']))
print("Wait for results of NOVA-Server...")
result = thread.get()
return result
except Exception as e:
raise Exception(e)
# We build an example here that we can call by either calling this file directly from the main directory,
# or by adding it to our playground. You can call the example and adjust it to your needs or redefine it in the
# playground or elsewhere
def build_example(name, identifier, admin_config, server_address):
dvm_config = DVMConfig()
dvm_config.PRIVATE_KEY = check_and_set_private_key(identifier)
dvm_config.LNBITS_INVOICE_KEY = os.getenv("LNBITS_INVOICE_KEY")
dvm_config.LNBITS_URL = os.getenv("LNBITS_HOST")
# A module might have options it can be initialized with, here we set a default model, and the nova-server
# address it should use. These parameters can be freely defined in the task component
options = {'default_model': "base", 'nova_server': server_address}
nip90params = {
"model": {
"required": False,
"values": ["base", "tiny", "small", "medium", "large-v1", "large-v2", "tiny.en", "base.en", "small.en",
"medium.en"]
},
"alignment": {
"required": False,
"values": ["raw", "segment", "word"]
}
}
nip89info = {
"name": name,
"image": "https://image.nostr.build/c33ca6fc4cc038ca4adb46fdfdfda34951656f87ee364ef59095bae1495ce669.jpg",
"about": "I extract text from media files with WhisperX",
"encryptionSupported": True,
"cashuAccepted": True,
"nip90Params": nip90params
}
nip89config = NIP89Config()
nip89config.DTAG = check_and_set_d_tag(identifier, name, dvm_config.PRIVATE_KEY,
nip89info["image"])
nip89config.CONTENT = json.dumps(nip89info)
return SpeechToTextWhisperX(name=name, dvm_config=dvm_config, nip89config=nip89config,
admin_config=admin_config, options=options)
if __name__ == '__main__':
env_path = Path('.env')
if env_path.is_file():
print(f'loading environment from {env_path.resolve()}')
dotenv.load_dotenv(env_path, verbose=True, override=True)
else:
raise FileNotFoundError(f'.env file not found at {env_path} ')
admin_config = AdminConfig()
admin_config.REBROADCAST_NIP89 = False
admin_config.UPDATE_PROFILE = False
admin_config.LUD16 = ""
dvm = build_example("Whisperer", "whisperx", admin_config, os.getenv("NOVA_SERVER"))
dvm.run()
keep_alive()