mirror of
https://github.com/believethehype/nostrdvm.git
synced 2025-07-28 18:32:13 +02:00
Merge branch 'main' into nserver-modules
This commit is contained in:
5
main.py
5
main.py
@@ -10,7 +10,7 @@ import tasks.textextraction_pdf as textextraction_pdf
|
||||
import tasks.textextraction_google as textextraction_google
|
||||
import tasks.translation_google as translation_google
|
||||
import tasks.translation_libretranslate as translation_libretranslate
|
||||
from tasks import imagegeneration_replicate_sdxl, videogeneration_replicate_svd, imagegeneration_sdxl, trending_notes_nostrband
|
||||
from tasks import imagegeneration_replicate_sdxl, videogeneration_replicate_svd, imagegeneration_sdxl, trending_notes_nostrband, textgeneration_llmlite
|
||||
|
||||
from utils.admin_utils import AdminConfig
|
||||
from utils.backend_utils import keep_alive
|
||||
@@ -141,6 +141,9 @@ def playground():
|
||||
bot_config.SUPPORTED_DVMS.append(trending)
|
||||
trending.run()
|
||||
|
||||
ollama = textgeneration_llmlite.build_example("LLM", "llmlite", admin_config)
|
||||
bot_config.SUPPORTED_DVMS.append(ollama)
|
||||
ollama.run()
|
||||
# Run the bot
|
||||
Bot(bot_config)
|
||||
# Keep the main function alive for libraries that require it, like openai
|
||||
|
@@ -1,35 +1,73 @@
|
||||
aiohttp==3.9.1
|
||||
aiosignal==1.3.1
|
||||
anyio==3.7.1
|
||||
appdirs==1.4.4
|
||||
asn1crypto==1.5.1
|
||||
async-timeout==4.0.3
|
||||
attrs==23.1.0
|
||||
base58==2.1.1
|
||||
beautifulsoup4==4.12.2
|
||||
bech32==1.2.0
|
||||
bip32==3.4
|
||||
bitarray==2.8.3
|
||||
bitstring==3.1.9
|
||||
blessed==1.20.0
|
||||
bolt11==2.0.5
|
||||
cassidy==0.1.4
|
||||
certifi==2023.7.22
|
||||
cffi==1.16.0
|
||||
charset-normalizer==3.3.2
|
||||
click==8.1.7
|
||||
coincurve==18.0.0
|
||||
cryptography==41.0.4
|
||||
decorator==4.4.2
|
||||
distro==1.8.0
|
||||
ecdsa==0.18.0
|
||||
emoji==2.8.0
|
||||
enumb==0.1.5
|
||||
environs==9.5.0
|
||||
eva-decord==0.6.1
|
||||
exceptiongroup==1.1.3
|
||||
expo==0.1.2
|
||||
fastapi==0.103.0
|
||||
ffmpegio==0.8.5
|
||||
ffmpegio-core==0.8.5
|
||||
filelock==3.13.1
|
||||
frozenlist==1.4.0
|
||||
fsspec==2023.12.1
|
||||
h11==0.14.0
|
||||
httpcore==0.18.0
|
||||
httpx==0.25.1
|
||||
huggingface-hub==0.19.4
|
||||
idna==3.4
|
||||
imageio==2.33.0
|
||||
imageio-ffmpeg==0.4.9
|
||||
importlib-metadata==6.8.0
|
||||
inquirer==3.1.3
|
||||
install==1.3.5
|
||||
instaloader==4.10.1
|
||||
Jinja2==3.1.2
|
||||
litellm==1.12.3
|
||||
lnurl==0.4.1
|
||||
loguru==0.7.2
|
||||
MarkupSafe==2.1.3
|
||||
marshmallow==3.20.1
|
||||
mediatype==0.1.6
|
||||
mnemonic==0.20
|
||||
moviepy==2.0.0.dev2
|
||||
multidict==6.0.4
|
||||
nostr-sdk==0.0.5
|
||||
numpy==1.26.2
|
||||
openai==1.3.5
|
||||
outcome==1.2.0
|
||||
packaging==23.2
|
||||
pandas==2.1.3
|
||||
Pillow==10.1.0
|
||||
pluggy==1.3.0
|
||||
proglog==0.1.10
|
||||
pycparser==2.21
|
||||
pycryptodome==3.19.0
|
||||
pycryptodomex==3.19.0
|
||||
pydantic==1.10.13
|
||||
pydantic_core==2.14.5
|
||||
pypdf==3.17.1
|
||||
@@ -40,20 +78,31 @@ pytube==15.0.0
|
||||
pytz==2023.3.post1
|
||||
PyUpload==0.1.4
|
||||
pyuseragents==1.0.5
|
||||
PyYAML==6.0.1
|
||||
readchar==4.0.5
|
||||
regex==2023.10.3
|
||||
replicate==0.21.1
|
||||
Represent==1.6.0.post0
|
||||
requests==2.31.0
|
||||
requests-toolbelt==1.0.0
|
||||
replicate==0.21.1
|
||||
safeIO==1.2
|
||||
six==1.16.0
|
||||
sniffio==1.3.0
|
||||
socksio==1.0.0
|
||||
soupsieve==2.5
|
||||
SpeechRecognition==3.10.0
|
||||
SQLAlchemy==1.3.24
|
||||
sqlalchemy-aio==0.17.0
|
||||
starlette==0.27.0
|
||||
tiktoken==0.5.2
|
||||
tokenizers==0.15.0
|
||||
tqdm==4.66.1
|
||||
translatepy==2.3
|
||||
typing_extensions==4.8.0
|
||||
tzdata==2023.3
|
||||
urllib3==2.1.0
|
||||
uvicorn==0.23.2
|
||||
wcwidth==0.2.10
|
||||
--use-pep517
|
||||
secp256k1
|
||||
cashu
|
||||
websocket-client==1.6.4
|
||||
yarl==1.9.4
|
||||
zipp==3.17.0
|
@@ -201,7 +201,6 @@ if __name__ == '__main__':
|
||||
admin_config = AdminConfig()
|
||||
admin_config.REBROADCAST_NIP89 = False
|
||||
admin_config.UPDATE_PROFILE = False
|
||||
admin_config.LUD16 = ""
|
||||
|
||||
dvm = build_example("Advanced Nostr Search", "discovery_content_search", admin_config)
|
||||
dvm.run()
|
||||
|
@@ -130,7 +130,6 @@ if __name__ == '__main__':
|
||||
admin_config = AdminConfig()
|
||||
admin_config.REBROADCAST_NIP89 = False
|
||||
admin_config.UPDATE_PROFILE = False
|
||||
admin_config.LUD16 = ""
|
||||
|
||||
dvm = build_example("Media Bringer", "media_converter", admin_config)
|
||||
dvm.run()
|
||||
|
@@ -224,8 +224,6 @@ if __name__ == '__main__':
|
||||
admin_config = AdminConfig()
|
||||
admin_config.REBROADCAST_NIP89 = False
|
||||
admin_config.UPDATE_PROFILE = False
|
||||
admin_config.LUD16 = ""
|
||||
|
||||
dvm = build_example("Bygones", "discovery_inactive_follows", admin_config)
|
||||
dvm.run()
|
||||
|
||||
|
@@ -170,8 +170,6 @@ if __name__ == '__main__':
|
||||
admin_config = AdminConfig()
|
||||
admin_config.REBROADCAST_NIP89 = False
|
||||
admin_config.UPDATE_PROFILE = False
|
||||
admin_config.LUD16 = ""
|
||||
|
||||
dvm = build_example("Dall-E 3", "dalle3", admin_config)
|
||||
dvm.run()
|
||||
|
||||
|
@@ -167,8 +167,6 @@ if __name__ == '__main__':
|
||||
admin_config = AdminConfig()
|
||||
admin_config.REBROADCAST_NIP89 = False
|
||||
admin_config.UPDATE_PROFILE = False
|
||||
admin_config.LUD16 = ""
|
||||
|
||||
dvm = build_example("Stable Diffusion XL", "replicate_sdxl", admin_config)
|
||||
dvm.run()
|
||||
|
||||
|
@@ -178,7 +178,6 @@ if __name__ == '__main__':
|
||||
admin_config = AdminConfig()
|
||||
admin_config.REBROADCAST_NIP89 = False
|
||||
admin_config.UPDATE_PROFILE = False
|
||||
admin_config.LUD16 = ""
|
||||
dvm = build_example("Transcriptor", "speech_recognition", admin_config)
|
||||
dvm.run()
|
||||
|
||||
|
@@ -133,7 +133,6 @@ if __name__ == '__main__':
|
||||
admin_config = AdminConfig()
|
||||
admin_config.REBROADCAST_NIP89 = False
|
||||
admin_config.UPDATE_PROFILE = False
|
||||
admin_config.LUD16 = ""
|
||||
dvm = build_example("PDF Extractor", "pdf_extractor", admin_config)
|
||||
dvm.run()
|
||||
|
||||
|
148
tasks/textgeneration_llmlite.py
Normal file
148
tasks/textgeneration_llmlite.py
Normal file
@@ -0,0 +1,148 @@
|
||||
import json
|
||||
import os
|
||||
from io import BytesIO
|
||||
from pathlib import Path
|
||||
|
||||
import dotenv
|
||||
import requests
|
||||
from litellm import completion
|
||||
|
||||
from interfaces.dvmtaskinterface import DVMTaskInterface
|
||||
from utils.admin_utils import AdminConfig
|
||||
from utils.backend_utils import keep_alive
|
||||
from utils.definitions import EventDefinitions
|
||||
from utils.dvmconfig import DVMConfig
|
||||
from utils.nip89_utils import NIP89Config, check_and_set_d_tag
|
||||
from utils.nostr_utils import check_and_set_private_key
|
||||
from utils.output_utils import upload_media_to_hoster
|
||||
from utils.zap_utils import get_price_per_sat, check_and_set_ln_bits_keys
|
||||
from nostr_sdk import Keys
|
||||
|
||||
"""
|
||||
This File contains a Module to transform Text input on OpenAI's servers with DALLE-3 and receive results back.
|
||||
|
||||
Accepted Inputs: Prompt (text)
|
||||
Outputs: An url to an Image
|
||||
"""
|
||||
|
||||
|
||||
class TextGenerationOLLAMA(DVMTaskInterface):
|
||||
KIND: int = EventDefinitions.KIND_NIP90_GENERATE_TEXT
|
||||
TASK: str = "text-to-text"
|
||||
FIX_COST: float = 0
|
||||
|
||||
def __init__(self, name, dvm_config: DVMConfig, nip89config: NIP89Config,
|
||||
admin_config: AdminConfig = None, options=None):
|
||||
super().__init__(name, dvm_config, nip89config, admin_config, options)
|
||||
if options is not None and options.get("server"):
|
||||
self.options["server"] = options["server"]
|
||||
|
||||
def is_input_supported(self, tags):
|
||||
for tag in tags:
|
||||
if tag.as_vec()[0] == 'i':
|
||||
input_value = tag.as_vec()[1]
|
||||
input_type = tag.as_vec()[2]
|
||||
if input_type != "text":
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def create_request_from_nostr_event(self, event, client=None, dvm_config=None):
|
||||
request_form = {"jobID": event.id().to_hex() + "_" + self.NAME.replace(" ", "")}
|
||||
prompt = ""
|
||||
model = "ollama/llama2-uncensored" #ollama/nous-hermes # This requires an instance of OLLAMA running
|
||||
#model = "gpt-4-1106-preview" # This will call chatgpt and requires an OpenAI API Key set in .env
|
||||
server = "http://localhost:11434"
|
||||
|
||||
for tag in event.tags():
|
||||
if tag.as_vec()[0] == 'i':
|
||||
input_type = tag.as_vec()[2]
|
||||
if input_type == "text":
|
||||
prompt = tag.as_vec()[1]
|
||||
|
||||
options = {
|
||||
"prompt": prompt,
|
||||
"model": model,
|
||||
"server": server
|
||||
}
|
||||
request_form['options'] = json.dumps(options)
|
||||
|
||||
return request_form
|
||||
|
||||
def process(self, request_form):
|
||||
options = DVMTaskInterface.set_options(request_form)
|
||||
|
||||
try:
|
||||
if options["model"].startswith("ollama"):
|
||||
response = completion(
|
||||
model=options["model"],
|
||||
messages=[{"content": options["prompt"], "role": "user"}],
|
||||
api_base=options["server"]
|
||||
)
|
||||
print(response.choices[0].message.content)
|
||||
return response.choices[0].message.content
|
||||
else:
|
||||
response = completion(
|
||||
model=options["model"],
|
||||
messages=[{"content": options["prompt"], "role": "user"}],
|
||||
)
|
||||
print(response.choices[0].message.content)
|
||||
return response.choices[0].message.content
|
||||
|
||||
except Exception as e:
|
||||
print("Error in Module: " + str(e))
|
||||
raise Exception(e)
|
||||
|
||||
|
||||
# We build an example here that we can call by either calling this file directly from the main directory,
|
||||
# or by adding it to our playground. You can call the example and adjust it to your needs or redefine it in the
|
||||
# playground or elsewhere
|
||||
def build_example(name, identifier, admin_config):
|
||||
dvm_config = DVMConfig()
|
||||
dvm_config.PRIVATE_KEY = check_and_set_private_key(identifier)
|
||||
npub = Keys.from_sk_str(dvm_config.PRIVATE_KEY).public_key().to_bech32()
|
||||
invoice_key, admin_key, wallet_id, user_id, lnaddress = check_and_set_ln_bits_keys(identifier, npub)
|
||||
dvm_config.LNBITS_INVOICE_KEY = invoice_key
|
||||
dvm_config.LNBITS_ADMIN_KEY = admin_key # The dvm might pay failed jobs back
|
||||
dvm_config.LNBITS_URL = os.getenv("LNBITS_HOST")
|
||||
admin_config.LUD16 = lnaddress
|
||||
|
||||
nip90params = {
|
||||
"size": {
|
||||
"required": False,
|
||||
"values": ["1024:1024", "1024x1792", "1792x1024"]
|
||||
}
|
||||
}
|
||||
nip89info = {
|
||||
"name": name,
|
||||
"image": "https://image.nostr.build/c33ca6fc4cc038ca4adb46fdfdfda34951656f87ee364ef59095bae1495ce669.jpg",
|
||||
"about": "I use a LLM connected via OLLAMA",
|
||||
"encryptionSupported": True,
|
||||
"cashuAccepted": True,
|
||||
"nip90Params": nip90params
|
||||
}
|
||||
|
||||
nip89config = NIP89Config()
|
||||
nip89config.DTAG = check_and_set_d_tag(identifier, name, dvm_config.PRIVATE_KEY,
|
||||
nip89info["image"])
|
||||
nip89config.CONTENT = json.dumps(nip89info)
|
||||
# We add an optional AdminConfig for this one, and tell the dvm to rebroadcast its NIP89
|
||||
return TextGenerationOLLAMA(name=name, dvm_config=dvm_config, nip89config=nip89config, admin_config=admin_config)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
env_path = Path('.env')
|
||||
if env_path.is_file():
|
||||
print(f'loading environment from {env_path.resolve()}')
|
||||
dotenv.load_dotenv(env_path, verbose=True, override=True)
|
||||
else:
|
||||
raise FileNotFoundError(f'.env file not found at {env_path} ')
|
||||
|
||||
admin_config = AdminConfig()
|
||||
admin_config.REBROADCAST_NIP89 = False
|
||||
admin_config.UPDATE_PROFILE = False
|
||||
|
||||
dvm = build_example("LLM", "llmlite", admin_config)
|
||||
dvm.run()
|
||||
|
||||
keep_alive()
|
@@ -160,7 +160,6 @@ if __name__ == '__main__':
|
||||
admin_config = AdminConfig()
|
||||
admin_config.REBROADCAST_NIP89 = False
|
||||
admin_config.UPDATE_PROFILE = False
|
||||
admin_config.LUD16 = ""
|
||||
dvm = build_example("Google Translator", "google_translator", admin_config)
|
||||
dvm.run()
|
||||
|
||||
|
@@ -161,7 +161,6 @@ if __name__ == '__main__':
|
||||
admin_config = AdminConfig()
|
||||
admin_config.REBROADCAST_NIP89 = False
|
||||
admin_config.UPDATE_PROFILE = False
|
||||
admin_config.LUD16 = ""
|
||||
dvm = build_example("Libre Translator", "libre_translator", admin_config)
|
||||
dvm.run()
|
||||
|
||||
|
@@ -149,7 +149,6 @@ if __name__ == '__main__':
|
||||
admin_config = AdminConfig()
|
||||
admin_config.REBROADCAST_NIP89 = False
|
||||
admin_config.UPDATE_PROFILE = False
|
||||
admin_config.LUD16 = ""
|
||||
|
||||
dvm = build_example("Trending Notes on Nostr.band", "trending_notes_nostrband", admin_config)
|
||||
dvm.run()
|
||||
|
@@ -156,7 +156,6 @@ if __name__ == '__main__':
|
||||
admin_config = AdminConfig()
|
||||
admin_config.REBROADCAST_NIP89 = False
|
||||
admin_config.UPDATE_PROFILE = False
|
||||
admin_config.LUD16 = ""
|
||||
|
||||
dvm = build_example("Stable Video Diffusion", "replicate_svd", admin_config)
|
||||
dvm.run()
|
||||
|
Reference in New Issue
Block a user