mirror of
https://github.com/believethehype/nostrdvm.git
synced 2025-11-18 21:31:41 +01:00
119 lines
4.2 KiB
Python
119 lines
4.2 KiB
Python
import json
|
|
from io import BytesIO
|
|
|
|
import requests
|
|
from PIL import Image
|
|
|
|
from interfaces.dvmtaskinterface import DVMTaskInterface
|
|
from utils.admin_utils import AdminConfig
|
|
from utils.definitions import EventDefinitions
|
|
from utils.dvmconfig import DVMConfig
|
|
from utils.nip89_utils import NIP89Config
|
|
from utils.output_utils import upload_media_to_hoster
|
|
|
|
"""
|
|
This File contains a Module to transform Text input on NOVA-Server and receive results back.
|
|
|
|
Accepted Inputs: Prompt (text)
|
|
Outputs: An url to an Image
|
|
Params: -model # models: juggernaut, dynavision, colossusProject, newreality, unstable
|
|
-lora # loras (weights on top of models) voxel,
|
|
"""
|
|
|
|
|
|
class ImageGenerationDALLE(DVMTaskInterface):
|
|
KIND: int = EventDefinitions.KIND_NIP90_GENERATE_IMAGE
|
|
TASK: str = "text-to-image"
|
|
FIX_COST: float = 120
|
|
|
|
def __init__(self, name, dvm_config: DVMConfig, nip89config: NIP89Config,
|
|
admin_config: AdminConfig = None, options=None):
|
|
super().__init__(name, dvm_config, nip89config, admin_config, options)
|
|
|
|
def is_input_supported(self, tags):
|
|
for tag in tags:
|
|
if tag.as_vec()[0] == 'i':
|
|
input_value = tag.as_vec()[1]
|
|
input_type = tag.as_vec()[2]
|
|
if input_type != "text":
|
|
return False
|
|
|
|
elif tag.as_vec()[0] == 'output':
|
|
output = tag.as_vec()[1]
|
|
if (output == "" or
|
|
not (output == "image/png" or "image/jpg"
|
|
or output == "image/png;format=url" or output == "image/jpg;format=url")):
|
|
print("Output format not supported, skipping..")
|
|
return False
|
|
|
|
return True
|
|
|
|
def create_request_form_from_nostr_event(self, event, client=None, dvm_config=None):
|
|
request_form = {"jobID": event.id().to_hex() + "_" + self.NAME.replace(" ", "")}
|
|
prompt = ""
|
|
width = "1024"
|
|
height = "1024"
|
|
model = "dall-e-3"
|
|
quality = "standard"
|
|
|
|
for tag in event.tags():
|
|
if tag.as_vec()[0] == 'i':
|
|
input_type = tag.as_vec()[2]
|
|
if input_type == "text":
|
|
prompt = tag.as_vec()[1]
|
|
|
|
elif tag.as_vec()[0] == 'param':
|
|
print("Param: " + tag.as_vec()[1] + ": " + tag.as_vec()[2])
|
|
if tag.as_vec()[1] == "size":
|
|
if len(tag.as_vec()) > 3:
|
|
width = (tag.as_vec()[2])
|
|
height = (tag.as_vec()[3])
|
|
elif len(tag.as_vec()) == 3:
|
|
split = tag.as_vec()[2].split("x")
|
|
if len(split) > 1:
|
|
width = split[0]
|
|
height = split[1]
|
|
elif tag.as_vec()[1] == "model":
|
|
model = tag.as_vec()[2]
|
|
elif tag.as_vec()[1] == "quality":
|
|
quality = tag.as_vec()[2]
|
|
|
|
options = {
|
|
"prompt": prompt,
|
|
"size": width + "x" + height,
|
|
"model": model,
|
|
"quality": quality,
|
|
"number": 1
|
|
}
|
|
request_form['options'] = json.dumps(options)
|
|
|
|
return request_form
|
|
|
|
def process(self, request_form):
|
|
try:
|
|
options = DVMTaskInterface.set_options(request_form)
|
|
|
|
from openai import OpenAI
|
|
client = OpenAI()
|
|
print("Job " + request_form['jobID'] + " sent to OpenAI API..")
|
|
|
|
response = client.images.generate(
|
|
model=options['model'],
|
|
prompt=options['prompt'],
|
|
size=options['size'],
|
|
quality=options['quality'],
|
|
n=int(options['number']),
|
|
)
|
|
|
|
image_url = response.data[0].url
|
|
# rehost the result instead of relying on the openai link
|
|
response = requests.get(image_url)
|
|
image = Image.open(BytesIO(response.content)).convert("RGB")
|
|
image.save("./outputs/image.jpg")
|
|
result = upload_media_to_hoster("./outputs/image.jpg")
|
|
return result
|
|
|
|
except Exception as e:
|
|
print("Error in Module")
|
|
raise Exception(e)
|