mirror of
https://github.com/karl0ss/ai_image_frame_server.git
synced 2025-08-13 21:18:28 +01:00
Compare commits
No commits in common. "main" and "0.2.21" have entirely different histories.
@ -1,5 +1,5 @@
|
|||||||
[tool.bumpversion]
|
[tool.bumpversion]
|
||||||
current_version = "0.3.9"
|
current_version = "0.2.21"
|
||||||
parse = "(?P<major>\\d+)\\.(?P<minor>\\d+)\\.(?P<patch>\\d+)"
|
parse = "(?P<major>\\d+)\\.(?P<minor>\\d+)\\.(?P<patch>\\d+)"
|
||||||
serialize = ["{major}.{minor}.{patch}"]
|
serialize = ["{major}.{minor}.{patch}"]
|
||||||
replace = "{new_version}"
|
replace = "{new_version}"
|
||||||
|
@ -4,7 +4,7 @@ FROM python:3.11-slim
|
|||||||
# Set the working directory in the container
|
# Set the working directory in the container
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
# Set version label
|
# Set version label
|
||||||
ARG VERSION="0.3.9"
|
ARG VERSION="0.2.21"
|
||||||
LABEL version=$VERSION
|
LABEL version=$VERSION
|
||||||
|
|
||||||
# Copy project files into the container
|
# Copy project files into the container
|
||||||
|
@ -18,13 +18,6 @@ user_config = load_config()
|
|||||||
app = Flask(__name__)
|
app = Flask(__name__)
|
||||||
app.secret_key = os.environ.get("SECRET_KEY")
|
app.secret_key = os.environ.get("SECRET_KEY")
|
||||||
|
|
||||||
# Make version available to all templates
|
|
||||||
from libs.generic import get_current_version
|
|
||||||
@app.context_processor
|
|
||||||
def inject_version():
|
|
||||||
version = get_current_version()
|
|
||||||
return dict(version=version)
|
|
||||||
|
|
||||||
# Inject config into routes that need it
|
# Inject config into routes that need it
|
||||||
create_routes.init_app(user_config)
|
create_routes.init_app(user_config)
|
||||||
auth_routes.init_app(user_config)
|
auth_routes.init_app(user_config)
|
||||||
@ -46,16 +39,7 @@ from libs.comfyui import create_image
|
|||||||
|
|
||||||
def scheduled_task():
|
def scheduled_task():
|
||||||
print(f"Executing scheduled task at {time.strftime('%Y-%m-%d %H:%M:%S')}")
|
print(f"Executing scheduled task at {time.strftime('%Y-%m-%d %H:%M:%S')}")
|
||||||
# Generate a random prompt using either OpenWebUI or OpenRouter
|
create_image(None)
|
||||||
from libs.generic import create_prompt_with_random_model
|
|
||||||
prompt = create_prompt_with_random_model("Generate a random detailed prompt for stable diffusion.")
|
|
||||||
if prompt:
|
|
||||||
# Select a random model
|
|
||||||
import random
|
|
||||||
model = "Random Image Model"
|
|
||||||
create_image(prompt, model)
|
|
||||||
else:
|
|
||||||
print("Failed to generate a prompt for the scheduled task.")
|
|
||||||
|
|
||||||
if user_config["frame"]["auto_regen"] == "True":
|
if user_config["frame"]["auto_regen"] == "True":
|
||||||
if os.environ.get("WERKZEUG_RUN_MAIN") == "true":
|
if os.environ.get("WERKZEUG_RUN_MAIN") == "true":
|
||||||
|
102
libs/comfyui.py
102
libs/comfyui.py
@ -15,7 +15,7 @@ from tenacity import (
|
|||||||
import nest_asyncio
|
import nest_asyncio
|
||||||
from libs.generic import rename_image, load_config, save_prompt
|
from libs.generic import rename_image, load_config, save_prompt
|
||||||
from libs.create_thumbnail import generate_thumbnail
|
from libs.create_thumbnail import generate_thumbnail
|
||||||
from libs.openwebui import create_prompt_on_openwebui
|
from libs.ollama import create_prompt_on_openwebui
|
||||||
nest_asyncio.apply()
|
nest_asyncio.apply()
|
||||||
|
|
||||||
logging.basicConfig(level=logging.INFO)
|
logging.basicConfig(level=logging.INFO)
|
||||||
@ -122,7 +122,6 @@ def generate_image(
|
|||||||
def select_model(model: str) -> tuple[str, str]:
|
def select_model(model: str) -> tuple[str, str]:
|
||||||
use_flux = json.loads(user_config["comfyui"].get("FLUX", "false").lower())
|
use_flux = json.loads(user_config["comfyui"].get("FLUX", "false").lower())
|
||||||
only_flux = json.loads(user_config["comfyui"].get("ONLY_FLUX", "false").lower())
|
only_flux = json.loads(user_config["comfyui"].get("ONLY_FLUX", "false").lower())
|
||||||
use_qwen = json.loads(user_config["comfyui"].get("Qwen", "false").lower())
|
|
||||||
|
|
||||||
if model == "Random Image Model":
|
if model == "Random Image Model":
|
||||||
selected_workflow = "FLUX" if (use_flux and (only_flux or random.choice([True, False]))) else "SDXL"
|
selected_workflow = "FLUX" if (use_flux and (only_flux or random.choice([True, False]))) else "SDXL"
|
||||||
@ -134,8 +133,6 @@ def select_model(model: str) -> tuple[str, str]:
|
|||||||
if model == "Random Image Model":
|
if model == "Random Image Model":
|
||||||
if selected_workflow == "FLUX":
|
if selected_workflow == "FLUX":
|
||||||
valid_models = user_config["comfyui:flux"]["models"].split(",")
|
valid_models = user_config["comfyui:flux"]["models"].split(",")
|
||||||
elif selected_workflow == "Qwen":
|
|
||||||
valid_models = user_config["comfyui:qwen"]["models"].split(",")
|
|
||||||
else: # SDXL
|
else: # SDXL
|
||||||
available_model_list = user_config["comfyui"]["models"].split(",")
|
available_model_list = user_config["comfyui"]["models"].split(",")
|
||||||
valid_models = list(set(get_available_models()) & set(available_model_list))
|
valid_models = list(set(get_available_models()) & set(available_model_list))
|
||||||
@ -148,11 +145,7 @@ def create_image(prompt: str | None = None, model: str = "Random Image Model") -
|
|||||||
"""Generate an image with a chosen workflow (Random, FLUX*, or SDXL*)."""
|
"""Generate an image with a chosen workflow (Random, FLUX*, or SDXL*)."""
|
||||||
|
|
||||||
if prompt is None:
|
if prompt is None:
|
||||||
# Generate a random prompt using either OpenWebUI or OpenRouter
|
logging.error("No prompt provided.")
|
||||||
from libs.generic import create_prompt_with_random_model
|
|
||||||
prompt = create_prompt_with_random_model("Generate a random detailed prompt for stable diffusion.")
|
|
||||||
if not prompt:
|
|
||||||
logging.error("Failed to generate a prompt.")
|
|
||||||
return
|
return
|
||||||
|
|
||||||
if not prompt:
|
if not prompt:
|
||||||
@ -167,95 +160,16 @@ def create_image(prompt: str | None = None, model: str = "Random Image Model") -
|
|||||||
file_name="image",
|
file_name="image",
|
||||||
comfy_prompt=prompt,
|
comfy_prompt=prompt,
|
||||||
workflow_path="./workflow_flux.json",
|
workflow_path="./workflow_flux.json",
|
||||||
prompt_node="CLIP Text Encode (Positive Prompt)",
|
prompt_node="Positive Prompt T5",
|
||||||
seed_node="RandomNoise",
|
seed_node="Seed",
|
||||||
seed_param="noise_seed",
|
|
||||||
save_node="Save Image",
|
|
||||||
save_param="filename_prefix",
|
|
||||||
model_node="UnetLoaderGGUFDisTorchMultiGPU",
|
|
||||||
model_param="unet_name",
|
|
||||||
model=model
|
|
||||||
)
|
|
||||||
elif selected_workflow == "Qwen":
|
|
||||||
generate_image(
|
|
||||||
file_name="image",
|
|
||||||
comfy_prompt=prompt,
|
|
||||||
workflow_path="./workflow_qwen.json",
|
|
||||||
prompt_node="Positive",
|
|
||||||
seed_node="KSampler",
|
|
||||||
seed_param="seed",
|
seed_param="seed",
|
||||||
save_node="Save Image",
|
save_node="CivitAI Image Saver",
|
||||||
save_param="filename_prefix",
|
save_param="filename",
|
||||||
model_node="Load Checkpoint",
|
model_node="UnetLoaderGGUFAdvancedDisTorchMultiGPU",
|
||||||
model_param="ckpt_name",
|
model_param="unet_name",
|
||||||
model=model
|
model=model
|
||||||
)
|
)
|
||||||
else: # SDXL
|
else: # SDXL
|
||||||
generate_image("image", comfy_prompt=prompt, model=model)
|
generate_image("image", comfy_prompt=prompt, model=model)
|
||||||
|
|
||||||
logging.info(f"{selected_workflow} generation started with prompt: {prompt}")
|
logging.info(f"{selected_workflow} generation started with prompt: {prompt}")
|
||||||
|
|
||||||
def get_queue_count() -> int:
|
|
||||||
"""Fetches the current queue count from ComfyUI (pending + running jobs)."""
|
|
||||||
url = user_config["comfyui"]["comfyui_url"] + "/queue"
|
|
||||||
try:
|
|
||||||
response = requests.get(url)
|
|
||||||
response.raise_for_status()
|
|
||||||
data = response.json()
|
|
||||||
pending = len(data.get("queue_pending", []))
|
|
||||||
running = len(data.get("queue_running", []))
|
|
||||||
return pending + running
|
|
||||||
except Exception as e:
|
|
||||||
logging.error(f"Error fetching queue count: {e}")
|
|
||||||
return 0
|
|
||||||
|
|
||||||
def get_queue_details() -> list:
|
|
||||||
"""Fetches detailed queue information including model names and prompts."""
|
|
||||||
url = user_config["comfyui"]["comfyui_url"] + "/queue"
|
|
||||||
try:
|
|
||||||
response = requests.get(url)
|
|
||||||
response.raise_for_status()
|
|
||||||
data = response.json()
|
|
||||||
jobs = []
|
|
||||||
for job_list in [data.get("queue_running", []), data.get("queue_pending", [])]:
|
|
||||||
for job in job_list:
|
|
||||||
# Extract prompt data (format: [priority, time, prompt])
|
|
||||||
prompt_data = job[2]
|
|
||||||
model = "Unknown"
|
|
||||||
prompt = "No prompt"
|
|
||||||
|
|
||||||
# Find model loader node (works for SDXL/FLUX/Qwen workflows)
|
|
||||||
for node in prompt_data.values():
|
|
||||||
if node.get("class_type") in ["CheckpointLoaderSimple", "UnetLoaderGGUFAdvancedDisTorchMultiGPU"]:
|
|
||||||
model = node["inputs"].get("ckpt_name", "Unknown")
|
|
||||||
break
|
|
||||||
|
|
||||||
# Find prompt node using class_type pattern and title matching
|
|
||||||
for node in prompt_data.values():
|
|
||||||
class_type = node.get("class_type", "")
|
|
||||||
if "CLIPTextEncode" in class_type and "text" in node["inputs"]:
|
|
||||||
meta = node.get('_meta', {})
|
|
||||||
title = meta.get('title', '').lower()
|
|
||||||
if 'positive' in title or 'prompt' in title:
|
|
||||||
prompt = node["inputs"]["text"]
|
|
||||||
break
|
|
||||||
|
|
||||||
jobs.append({
|
|
||||||
"id": job[0],
|
|
||||||
"model": model.split(".")[0] if model != "Unknown" else model,
|
|
||||||
"prompt": prompt
|
|
||||||
})
|
|
||||||
return jobs
|
|
||||||
except Exception as e:
|
|
||||||
logging.error(f"Error fetching queue details: {e}")
|
|
||||||
return []
|
|
||||||
try:
|
|
||||||
response = requests.get(url)
|
|
||||||
response.raise_for_status()
|
|
||||||
data = response.json()
|
|
||||||
pending = len(data.get("queue_pending", []))
|
|
||||||
running = len(data.get("queue_running", []))
|
|
||||||
return pending + running
|
|
||||||
except Exception as e:
|
|
||||||
logging.error(f"Error fetching queue count: {e}")
|
|
||||||
return 0
|
|
||||||
|
@ -84,8 +84,8 @@ def get_details_from_png(path):
|
|||||||
try:
|
try:
|
||||||
# Flux workflow
|
# Flux workflow
|
||||||
data = json.loads(img.info["prompt"])
|
data = json.loads(img.info["prompt"])
|
||||||
prompt = data['6']['inputs']['text']
|
prompt = data['44']['inputs']['text']
|
||||||
model = data['38']['inputs']['unet_name'].split(".")[0]
|
model = data['35']['inputs']['unet_name'].split(".")[0]
|
||||||
except KeyError:
|
except KeyError:
|
||||||
# SDXL workflow
|
# SDXL workflow
|
||||||
data = json.loads(img.info["prompt"])
|
data = json.loads(img.info["prompt"])
|
||||||
@ -110,31 +110,14 @@ def get_current_version():
|
|||||||
return version
|
return version
|
||||||
except subprocess.CalledProcessError as e:
|
except subprocess.CalledProcessError as e:
|
||||||
print("Error running bump-my-version:", e)
|
print("Error running bump-my-version:", e)
|
||||||
return "unknown"
|
return None
|
||||||
|
|
||||||
def load_models_from_config():
|
def load_models_from_config():
|
||||||
config = load_config()
|
flux_models = load_config()["comfyui:flux"]["models"].split(",")
|
||||||
|
sdxl_models = load_config()["comfyui"]["models"].split(",")
|
||||||
# Only load FLUX models if FLUX feature is enabled
|
|
||||||
use_flux = config["comfyui"].get("flux", "False").lower() == "true"
|
|
||||||
if use_flux and "comfyui:flux" in config and "models" in config["comfyui:flux"]:
|
|
||||||
flux_models = config["comfyui:flux"]["models"].split(",")
|
|
||||||
else:
|
|
||||||
flux_models = []
|
|
||||||
|
|
||||||
sdxl_models = config["comfyui"]["models"].split(",")
|
|
||||||
|
|
||||||
# Only load Qwen models if Qwen feature is enabled
|
|
||||||
use_qwen = config["comfyui"].get("qwen", "False").lower() == "true"
|
|
||||||
if use_qwen and "comfyui:qwen" in config and "models" in config["comfyui:qwen"]:
|
|
||||||
qwen_models = config["comfyui:qwen"]["models"].split(",")
|
|
||||||
else:
|
|
||||||
qwen_models = []
|
|
||||||
|
|
||||||
sorted_flux_models = sorted(flux_models, key=str.lower)
|
sorted_flux_models = sorted(flux_models, key=str.lower)
|
||||||
sorted_sdxl_models = sorted(sdxl_models, key=str.lower)
|
sorted_sdxl_models = sorted(sdxl_models, key=str.lower)
|
||||||
sorted_qwen_models = sorted(qwen_models, key=str.lower)
|
return sorted_sdxl_models, sorted_flux_models
|
||||||
return sorted_sdxl_models, sorted_flux_models, sorted_qwen_models
|
|
||||||
|
|
||||||
|
|
||||||
def load_topics_from_config():
|
def load_topics_from_config():
|
||||||
@ -175,10 +158,7 @@ def load_prompt_models_from_config():
|
|||||||
|
|
||||||
|
|
||||||
def create_prompt_with_random_model(base_prompt: str, topic: str = "random"):
|
def create_prompt_with_random_model(base_prompt: str, topic: str = "random"):
|
||||||
"""Create a prompt using a randomly selected model from OpenWebUI or OpenRouter.
|
"""Create a prompt using a randomly selected model from OpenWebUI or OpenRouter."""
|
||||||
|
|
||||||
If OpenWebUI fails, it will retry once. If it fails again, it will fallback to OpenRouter.
|
|
||||||
"""
|
|
||||||
prompt_models = load_prompt_models_from_config()
|
prompt_models = load_prompt_models_from_config()
|
||||||
|
|
||||||
if not prompt_models:
|
if not prompt_models:
|
||||||
@ -188,59 +168,16 @@ def create_prompt_with_random_model(base_prompt: str, topic: str = "random"):
|
|||||||
# Randomly select a model
|
# Randomly select a model
|
||||||
service, model = random.choice(prompt_models)
|
service, model = random.choice(prompt_models)
|
||||||
|
|
||||||
# Import here to avoid circular imports
|
|
||||||
from libs.openwebui import create_prompt_on_openwebui
|
|
||||||
from libs.openrouter import create_prompt_on_openrouter
|
|
||||||
|
|
||||||
if service == "openwebui":
|
if service == "openwebui":
|
||||||
try:
|
# Import here to avoid circular imports
|
||||||
# First attempt with OpenWebUI
|
from libs.ollama import create_prompt_on_openwebui
|
||||||
logging.info(f"Attempting to generate prompt with OpenWebUI using model: {model}")
|
return create_prompt_on_openwebui(base_prompt, topic)
|
||||||
result = create_prompt_on_openwebui(base_prompt, topic, model)
|
|
||||||
if result:
|
|
||||||
return result
|
|
||||||
|
|
||||||
# If first attempt returns None, try again
|
|
||||||
logging.warning("First OpenWebUI attempt failed. Retrying...")
|
|
||||||
result = create_prompt_on_openwebui(base_prompt, topic, model)
|
|
||||||
if result:
|
|
||||||
return result
|
|
||||||
|
|
||||||
# If second attempt fails, fallback to OpenRouter
|
|
||||||
logging.warning("Second OpenWebUI attempt failed. Falling back to OpenRouter...")
|
|
||||||
openrouter_models = [m for m in prompt_models if m[0] == "openrouter"]
|
|
||||||
if openrouter_models:
|
|
||||||
_, openrouter_model = random.choice(openrouter_models)
|
|
||||||
return create_prompt_on_openrouter(base_prompt, topic, openrouter_model)
|
|
||||||
else:
|
|
||||||
logging.error("No OpenRouter models configured for fallback.")
|
|
||||||
return "A colorful abstract composition" # Default fallback prompt
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
logging.error(f"Error with OpenWebUI: {e}")
|
|
||||||
# Fallback to OpenRouter on exception
|
|
||||||
logging.warning("OpenWebUI exception. Falling back to OpenRouter...")
|
|
||||||
openrouter_models = [m for m in prompt_models if m[0] == "openrouter"]
|
|
||||||
if openrouter_models:
|
|
||||||
_, openrouter_model = random.choice(openrouter_models)
|
|
||||||
try:
|
|
||||||
return create_prompt_on_openrouter(base_prompt, topic, openrouter_model)
|
|
||||||
except Exception as e2:
|
|
||||||
logging.error(f"Error with OpenRouter fallback: {e2}")
|
|
||||||
return "A colorful abstract composition" # Default fallback prompt
|
|
||||||
else:
|
|
||||||
logging.error("No OpenRouter models configured for fallback.")
|
|
||||||
return "A colorful abstract composition" # Default fallback prompt
|
|
||||||
|
|
||||||
elif service == "openrouter":
|
elif service == "openrouter":
|
||||||
try:
|
# Import here to avoid circular imports
|
||||||
# Use OpenRouter
|
from libs.openrouter import create_prompt_on_openrouter
|
||||||
return create_prompt_on_openrouter(base_prompt, topic, model)
|
return create_prompt_on_openrouter(base_prompt, topic)
|
||||||
except Exception as e:
|
|
||||||
logging.error(f"Error with OpenRouter: {e}")
|
|
||||||
return "A colorful abstract composition" # Default fallback prompt
|
|
||||||
|
|
||||||
return "A colorful abstract composition" # Default fallback prompt
|
return None
|
||||||
|
|
||||||
user_config = load_config()
|
user_config = load_config()
|
||||||
output_folder = user_config["comfyui"]["output_dir"]
|
output_folder = user_config["comfyui"]["output_dir"]
|
@ -1,11 +1,9 @@
|
|||||||
import random
|
import random
|
||||||
import logging
|
import logging
|
||||||
|
import litellm
|
||||||
import nest_asyncio
|
import nest_asyncio
|
||||||
from libs.generic import load_recent_prompts, load_config
|
from libs.generic import load_recent_prompts, load_config
|
||||||
import re
|
import re
|
||||||
from openwebui_chat_client import OpenWebUIClient
|
|
||||||
from datetime import datetime
|
|
||||||
|
|
||||||
nest_asyncio.apply()
|
nest_asyncio.apply()
|
||||||
|
|
||||||
logging.basicConfig(level=logging.INFO)
|
logging.basicConfig(level=logging.INFO)
|
||||||
@ -35,27 +33,22 @@ def create_prompt_on_openwebui(prompt: str, topic: str = "random", model: str =
|
|||||||
topic_instruction = f" Incorporate the theme of '{selected_topic}' into the new prompt."
|
topic_instruction = f" Incorporate the theme of '{selected_topic}' into the new prompt."
|
||||||
|
|
||||||
user_content = (
|
user_content = (
|
||||||
"Can you generate me a really random image idea, Do not exceed 10 words. Use clear language, not poetic metaphors."
|
"Can you generate me a really random image idea, Do not exceed 10 words. Use clear language, not poetic metaphors.”"
|
||||||
+ topic_instruction
|
+ topic_instruction
|
||||||
+ "Avoid prompts similar to the following:"
|
+ "Avoid prompts similar to the following:"
|
||||||
+ "\n".join(f"{i+1}. {p}" for i, p in enumerate(recent_prompts))
|
+ "\n".join(f"{i+1}. {p}" for i, p in enumerate(recent_prompts))
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
if model:
|
if model:
|
||||||
# Use the specified model
|
# Use the specified model
|
||||||
model = model
|
model = model
|
||||||
else:
|
else:
|
||||||
# Select a random model
|
# Select a random model
|
||||||
model = random.choice(user_config["openwebui"]["models"].split(",")).strip()
|
model = random.choice(user_config["openwebui"]["models"].split(","))
|
||||||
|
response = litellm.completion(
|
||||||
# Create OpenWebUI client
|
api_base=user_config["openwebui"]["base_url"],
|
||||||
client = OpenWebUIClient(
|
model="openai/" + model,
|
||||||
base_url=user_config["openwebui"]["base_url"],
|
|
||||||
token=user_config["openwebui"]["api_key"],
|
|
||||||
default_model_id=model
|
|
||||||
)
|
|
||||||
|
|
||||||
# Prepare messages for the chat
|
|
||||||
messages=[
|
messages=[
|
||||||
{
|
{
|
||||||
"role": "system",
|
"role": "system",
|
||||||
@ -70,26 +63,23 @@ def create_prompt_on_openwebui(prompt: str, topic: str = "random", model: str =
|
|||||||
"role": "user",
|
"role": "user",
|
||||||
"content": user_content,
|
"content": user_content,
|
||||||
},
|
},
|
||||||
]
|
],
|
||||||
|
api_key=user_config["openwebui"]["api_key"],
|
||||||
# Send the chat request
|
|
||||||
try:
|
|
||||||
result = client.chat(
|
|
||||||
question=user_content,
|
|
||||||
chat_title=datetime.now().strftime("%Y-%m-%d %H:%M"),
|
|
||||||
folder_name="ai-frame-image-server"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
if result:
|
prompt = response["choices"][0]["message"]["content"].strip('"')
|
||||||
prompt = result["response"].strip('"')
|
# response = litellm.completion(
|
||||||
else:
|
# api_base=user_config["openwebui"]["base_url"],
|
||||||
# Return None if the request fails
|
# model="openai/brxce/stable-diffusion-prompt-generator:latest",
|
||||||
logging.warning(f"OpenWebUI request failed with model: {model}")
|
# messages=[
|
||||||
return None
|
# {
|
||||||
except Exception as e:
|
# "role": "user",
|
||||||
logging.error(f"Error in OpenWebUI request with model {model}: {e}")
|
# "content": prompt,
|
||||||
return None
|
# },
|
||||||
|
# ],
|
||||||
|
# api_key=user_config["openwebui"]["api_key"],
|
||||||
|
# )
|
||||||
|
# prompt = response["choices"][0]["message"]["content"].strip('"')
|
||||||
match = re.search(r'"([^"]+)"', prompt)
|
match = re.search(r'"([^"]+)"', prompt)
|
||||||
if not match:
|
if not match:
|
||||||
match = re.search(r":\s*\n*\s*(.+)", prompt)
|
match = re.search(r":\s*\n*\s*(.+)", prompt)
|
BIN
requirements.txt
BIN
requirements.txt
Binary file not shown.
@ -1,7 +1,7 @@
|
|||||||
from flask import Blueprint, request, render_template, redirect, url_for, session
|
from flask import Blueprint, request, render_template, redirect, url_for, session
|
||||||
import threading
|
import threading
|
||||||
from libs.comfyui import create_image, select_model, get_available_models, get_queue_count
|
from libs.comfyui import create_image, select_model, get_available_models
|
||||||
from libs.openwebui import create_prompt_on_openwebui
|
from libs.ollama import create_prompt_on_openwebui
|
||||||
from libs.generic import load_models_from_config, load_topics_from_config, load_openrouter_models_from_config, load_openwebui_models_from_config, create_prompt_with_random_model
|
from libs.generic import load_models_from_config, load_topics_from_config, load_openrouter_models_from_config, load_openwebui_models_from_config, create_prompt_with_random_model
|
||||||
import os
|
import os
|
||||||
|
|
||||||
@ -23,7 +23,7 @@ def create():
|
|||||||
# Use the specified prompt model
|
# Use the specified prompt model
|
||||||
service, service_model = prompt_model.split(":", 1) if ":" in prompt_model else (prompt_model, "")
|
service, service_model = prompt_model.split(":", 1) if ":" in prompt_model else (prompt_model, "")
|
||||||
if service == "openwebui":
|
if service == "openwebui":
|
||||||
from libs.openwebui import create_prompt_on_openwebui
|
from libs.ollama import create_prompt_on_openwebui
|
||||||
prompt = create_prompt_on_openwebui(user_config["comfyui"]["prompt"], topic, service_model)
|
prompt = create_prompt_on_openwebui(user_config["comfyui"]["prompt"], topic, service_model)
|
||||||
elif service == "openrouter":
|
elif service == "openrouter":
|
||||||
from libs.openrouter import create_prompt_on_openrouter
|
from libs.openrouter import create_prompt_on_openrouter
|
||||||
@ -35,20 +35,17 @@ def create():
|
|||||||
threading.Thread(target=lambda: create_image(prompt, model)).start()
|
threading.Thread(target=lambda: create_image(prompt, model)).start()
|
||||||
return redirect(url_for("create_routes.image_queued", prompt=prompt, model=model.split(".")[0]))
|
return redirect(url_for("create_routes.image_queued", prompt=prompt, model=model.split(".")[0]))
|
||||||
|
|
||||||
# Load all models (SDXL, FLUX, and Qwen)
|
# Load all models (SDXL and FLUX only)
|
||||||
sdxl_models, flux_models, qwen_models = load_models_from_config()
|
sdxl_models, flux_models = load_models_from_config()
|
||||||
openwebui_models = load_openwebui_models_from_config()
|
openwebui_models = load_openwebui_models_from_config()
|
||||||
openrouter_models = load_openrouter_models_from_config()
|
openrouter_models = load_openrouter_models_from_config()
|
||||||
|
|
||||||
queue_count = get_queue_count()
|
|
||||||
return render_template("create_image.html",
|
return render_template("create_image.html",
|
||||||
sdxx_models=sdxl_models,
|
sdxl_models=sdxl_models,
|
||||||
flux_models=flux_models,
|
flux_models=flux_models,
|
||||||
qwen_models=qwen_models,
|
|
||||||
openwebui_models=openwebui_models,
|
openwebui_models=openwebui_models,
|
||||||
openrouter_models=openrouter_models,
|
openrouter_models=openrouter_models,
|
||||||
topics=load_topics_from_config(),
|
topics=load_topics_from_config())
|
||||||
queue_count=queue_count)
|
|
||||||
|
|
||||||
@bp.route("/image_queued")
|
@bp.route("/image_queued")
|
||||||
def image_queued():
|
def image_queued():
|
||||||
@ -65,20 +62,17 @@ def create_image_page():
|
|||||||
if user_config["frame"]["create_requires_auth"] == "True" and not session.get("authenticated"):
|
if user_config["frame"]["create_requires_auth"] == "True" and not session.get("authenticated"):
|
||||||
return redirect(url_for("auth_routes.login", next=request.path))
|
return redirect(url_for("auth_routes.login", next=request.path))
|
||||||
|
|
||||||
# Load all models (SDXL, FLUX, and Qwen)
|
# Load all models (SDXL and FLUX only)
|
||||||
sdxl_models, flux_models, qwen_models = load_models_from_config()
|
sdxl_models, flux_models = load_models_from_config()
|
||||||
openwebui_models = load_openwebui_models_from_config()
|
openwebui_models = load_openwebui_models_from_config()
|
||||||
openrouter_models = load_openrouter_models_from_config()
|
openrouter_models = load_openrouter_models_from_config()
|
||||||
|
|
||||||
queue_count = get_queue_count()
|
|
||||||
return render_template("create_image.html",
|
return render_template("create_image.html",
|
||||||
sdxl_models=sdxl_models,
|
sdxl_models=sdxl_models,
|
||||||
flux_models=flux_models,
|
flux_models=flux_models,
|
||||||
qwen_models=qwen_models,
|
|
||||||
openwebui_models=openwebui_models,
|
openwebui_models=openwebui_models,
|
||||||
openrouter_models=openrouter_models,
|
openrouter_models=openrouter_models,
|
||||||
topics=load_topics_from_config(),
|
topics=load_topics_from_config())
|
||||||
queue_count=queue_count)
|
|
||||||
|
|
||||||
|
|
||||||
def init_app(config):
|
def init_app(config):
|
||||||
|
@ -11,10 +11,12 @@ def index():
|
|||||||
image_filename = "./image.png"
|
image_filename = "./image.png"
|
||||||
image_path = os.path.join(image_folder, image_filename)
|
image_path = os.path.join(image_folder, image_filename)
|
||||||
prompt = get_details_from_png(image_path)["p"]
|
prompt = get_details_from_png(image_path)["p"]
|
||||||
|
version = get_current_version()
|
||||||
|
|
||||||
return render_template(
|
return render_template(
|
||||||
"index.html",
|
"index.html",
|
||||||
image=image_filename,
|
image=image_filename,
|
||||||
prompt=prompt,
|
prompt=prompt,
|
||||||
reload_interval=user_config["frame"]["reload_interval"],
|
reload_interval=user_config["frame"]["reload_interval"],
|
||||||
|
version=version,
|
||||||
)
|
)
|
||||||
|
@ -1,12 +1,8 @@
|
|||||||
from flask import Blueprint, jsonify
|
from flask import Blueprint
|
||||||
from libs.comfyui import cancel_current_job, get_queue_details
|
from libs.comfyui import cancel_current_job
|
||||||
|
|
||||||
bp = Blueprint("job_routes", __name__)
|
bp = Blueprint("job_routes", __name__)
|
||||||
|
|
||||||
@bp.route("/cancel", methods=["GET"])
|
@bp.route("/cancel", methods=["GET"])
|
||||||
def cancel_job():
|
def cancel_job():
|
||||||
return cancel_current_job()
|
return cancel_current_job()
|
||||||
|
|
||||||
@bp.route("/api/queue", methods=["GET"])
|
|
||||||
def api_queue():
|
|
||||||
return jsonify(get_queue_details())
|
|
||||||
|
@ -12,7 +12,7 @@
|
|||||||
|
|
||||||
<!-- Version number at bottom right -->
|
<!-- Version number at bottom right -->
|
||||||
<div class="version">
|
<div class="version">
|
||||||
<a href="{{ url_for('settings_route.config_editor') }}">{% if version and version != 'unknown' %}v{{ version }}{% else %}v?.?.?{% endif %}</a>
|
<a href="{{ url_for('settings_route.config_editor') }}">v{{ version }}</a>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
{% block scripts %}{% endblock %}
|
{% block scripts %}{% endblock %}
|
||||||
|
@ -73,7 +73,6 @@
|
|||||||
background: #555;
|
background: #555;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
#spinner-overlay {
|
#spinner-overlay {
|
||||||
position: fixed;
|
position: fixed;
|
||||||
inset: 0;
|
inset: 0;
|
||||||
@ -132,66 +131,10 @@
|
|||||||
height: 150px;
|
height: 150px;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
.queue-dropdown {
|
|
||||||
position: absolute;
|
|
||||||
top: 100%;
|
|
||||||
right: 0;
|
|
||||||
background: #222;
|
|
||||||
border: 1px solid #444;
|
|
||||||
border-radius: 5px;
|
|
||||||
padding: 10px;
|
|
||||||
z-index: 1001;
|
|
||||||
display: none;
|
|
||||||
max-height: 300px;
|
|
||||||
overflow-y: auto;
|
|
||||||
width: 400px;
|
|
||||||
}
|
|
||||||
|
|
||||||
.queue-item {
|
|
||||||
margin-bottom: 5px;
|
|
||||||
padding: 5px;
|
|
||||||
border-bottom: 1px solid #333;
|
|
||||||
}
|
|
||||||
|
|
||||||
.queue-item:last-child {
|
|
||||||
border-bottom: none;
|
|
||||||
}
|
|
||||||
|
|
||||||
.queue-item .prompt {
|
|
||||||
font-size: 0.9em;
|
|
||||||
color: #aaa;
|
|
||||||
white-space: normal;
|
|
||||||
word-wrap: break-word;
|
|
||||||
position: relative;
|
|
||||||
cursor: pointer;
|
|
||||||
}
|
|
||||||
|
|
||||||
.queue-item .prompt:hover::after {
|
|
||||||
content: "Model: " attr(data-model);
|
|
||||||
position: absolute;
|
|
||||||
bottom: 100%;
|
|
||||||
left: 0;
|
|
||||||
background: #333;
|
|
||||||
color: #00aaff;
|
|
||||||
padding: 4px 8px;
|
|
||||||
border-radius: 4px;
|
|
||||||
font-size: 0.8em;
|
|
||||||
white-space: nowrap;
|
|
||||||
z-index: 1002;
|
|
||||||
box-shadow: 0 2px 4px rgba(0,0,0,0.3);
|
|
||||||
}
|
|
||||||
</style>
|
</style>
|
||||||
{% endblock %}
|
{% endblock %}
|
||||||
|
|
||||||
{% block content %}
|
{% block content %}
|
||||||
<div class="queue-container" style="position: fixed; top: 20px; right: 20px; z-index: 1000;">
|
|
||||||
<button id="queue-btn" style="background: #333; color: white; border: none; padding: 5px 10px; border-radius: 5px; cursor: pointer;">
|
|
||||||
Queue: <span id="queue-count">{{ queue_count | default(0) }}</span>
|
|
||||||
</button>
|
|
||||||
<div id="queue-dropdown" class="queue-dropdown">
|
|
||||||
<!-- Queue items will be populated here -->
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
<h1 style="margin-bottom: 20px;">Create An Image</h1>
|
<h1 style="margin-bottom: 20px;">Create An Image</h1>
|
||||||
|
|
||||||
<textarea id="prompt-box" placeholder="Enter your custom prompt here..."></textarea>
|
<textarea id="prompt-box" placeholder="Enter your custom prompt here..."></textarea>
|
||||||
@ -214,13 +157,6 @@
|
|||||||
{% endfor %}
|
{% endfor %}
|
||||||
</optgroup>
|
</optgroup>
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% if qwen_models %}
|
|
||||||
<optgroup label="Qwen">
|
|
||||||
{% for m in qwen_models %}
|
|
||||||
<option value="{{ m }}">{{ m.rsplit('.', 1)[0] if '.' in m else m }}</option>
|
|
||||||
{% endfor %}
|
|
||||||
</optgroup>
|
|
||||||
{% endif %}
|
|
||||||
{% if sdxl_models %}
|
{% if sdxl_models %}
|
||||||
<optgroup label="SDXL">
|
<optgroup label="SDXL">
|
||||||
{% for m in sdxl_models %}
|
{% for m in sdxl_models %}
|
||||||
@ -326,59 +262,5 @@
|
|||||||
alert("Error requesting random prompt: " + error);
|
alert("Error requesting random prompt: " + error);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
document.addEventListener('DOMContentLoaded', function() {
|
|
||||||
const queueBtn = document.getElementById('queue-btn');
|
|
||||||
const queueDropdown = document.getElementById('queue-dropdown');
|
|
||||||
const queueCountSpan = document.getElementById('queue-count');
|
|
||||||
|
|
||||||
// Toggle dropdown visibility
|
|
||||||
queueBtn.addEventListener('click', function(e) {
|
|
||||||
e.stopPropagation();
|
|
||||||
if (queueDropdown.style.display === 'block') {
|
|
||||||
queueDropdown.style.display = 'none';
|
|
||||||
} else {
|
|
||||||
fetchQueueDetails();
|
|
||||||
queueDropdown.style.display = 'block';
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
// Close dropdown when clicking outside
|
|
||||||
document.addEventListener('click', function() {
|
|
||||||
queueDropdown.style.display = 'none';
|
|
||||||
});
|
|
||||||
|
|
||||||
// Prevent dropdown from closing when clicking inside it
|
|
||||||
queueDropdown.addEventListener('click', function(e) {
|
|
||||||
e.stopPropagation();
|
|
||||||
});
|
|
||||||
|
|
||||||
function fetchQueueDetails() {
|
|
||||||
fetch('/api/queue')
|
|
||||||
.then(response => response.json())
|
|
||||||
.then(jobs => {
|
|
||||||
queueCountSpan.textContent = jobs.length;
|
|
||||||
const container = queueDropdown;
|
|
||||||
container.innerHTML = '';
|
|
||||||
|
|
||||||
if (jobs.length === 0) {
|
|
||||||
container.innerHTML = '<div class="queue-item">No jobs in queue</div>';
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
jobs.forEach(job => {
|
|
||||||
const item = document.createElement('div');
|
|
||||||
item.className = 'queue-item';
|
|
||||||
item.innerHTML = `
|
|
||||||
<div class="prompt" data-model="${job.model}">${job.prompt}</div>
|
|
||||||
`;
|
|
||||||
container.appendChild(item);
|
|
||||||
});
|
|
||||||
})
|
|
||||||
.catch(error => {
|
|
||||||
console.error('Error fetching queue:', error);
|
|
||||||
queueDropdown.innerHTML = '<div class="queue-item">Error loading queue</div>';
|
|
||||||
});
|
|
||||||
}
|
|
||||||
});
|
|
||||||
</script>
|
</script>
|
||||||
{% endblock %}
|
{% endblock %}
|
@ -1,31 +1,12 @@
|
|||||||
{
|
{
|
||||||
"6": {
|
|
||||||
"inputs": {
|
|
||||||
"text": "Terminator endoskeleton riding a bmx bike",
|
|
||||||
"speak_and_recognation": {
|
|
||||||
"__value__": [
|
|
||||||
false,
|
|
||||||
true
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"clip": [
|
|
||||||
"39",
|
|
||||||
0
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"class_type": "CLIPTextEncode",
|
|
||||||
"_meta": {
|
|
||||||
"title": "CLIP Text Encode (Positive Prompt)"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"8": {
|
"8": {
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"samples": [
|
"samples": [
|
||||||
"13",
|
"62",
|
||||||
0
|
1
|
||||||
],
|
],
|
||||||
"vae": [
|
"vae": [
|
||||||
"41",
|
"73",
|
||||||
0
|
0
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@ -34,157 +15,298 @@
|
|||||||
"title": "VAE Decode"
|
"title": "VAE Decode"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"9": {
|
"40": {
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"filename_prefix": "ComfyUI",
|
"int": 20
|
||||||
|
},
|
||||||
|
"class_type": "Int Literal (Image Saver)",
|
||||||
|
"_meta": {
|
||||||
|
"title": "Generation Steps"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"41": {
|
||||||
|
"inputs": {
|
||||||
|
"width": 720,
|
||||||
|
"height": 1080,
|
||||||
|
"aspect_ratio": "custom",
|
||||||
|
"swap_dimensions": "Off",
|
||||||
|
"upscale_factor": 2,
|
||||||
|
"prescale_factor": 1,
|
||||||
|
"batch_size": 1
|
||||||
|
},
|
||||||
|
"class_type": "CR Aspect Ratio",
|
||||||
|
"_meta": {
|
||||||
|
"title": "CR Aspect Ratio"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"42": {
|
||||||
|
"inputs": {
|
||||||
|
"filename": "THISFILE",
|
||||||
|
"path": "",
|
||||||
|
"extension": "png",
|
||||||
|
"steps": [
|
||||||
|
"40",
|
||||||
|
0
|
||||||
|
],
|
||||||
|
"cfg": [
|
||||||
|
"52",
|
||||||
|
0
|
||||||
|
],
|
||||||
|
"modelname": "flux1-dev-Q4_0.gguf",
|
||||||
|
"sampler_name": [
|
||||||
|
"50",
|
||||||
|
1
|
||||||
|
],
|
||||||
|
"scheduler_name": "normal",
|
||||||
|
"positive": [
|
||||||
|
"44",
|
||||||
|
0
|
||||||
|
],
|
||||||
|
"negative": [
|
||||||
|
"45",
|
||||||
|
0
|
||||||
|
],
|
||||||
|
"seed_value": [
|
||||||
|
"48",
|
||||||
|
0
|
||||||
|
],
|
||||||
|
"width": [
|
||||||
|
"41",
|
||||||
|
0
|
||||||
|
],
|
||||||
|
"height": [
|
||||||
|
"41",
|
||||||
|
1
|
||||||
|
],
|
||||||
|
"lossless_webp": true,
|
||||||
|
"quality_jpeg_or_webp": 100,
|
||||||
|
"optimize_png": false,
|
||||||
|
"counter": 0,
|
||||||
|
"denoise": [
|
||||||
|
"53",
|
||||||
|
0
|
||||||
|
],
|
||||||
|
"clip_skip": 0,
|
||||||
|
"time_format": "%Y-%m-%d-%H%M%S",
|
||||||
|
"save_workflow_as_json": true,
|
||||||
|
"embed_workflow": true,
|
||||||
|
"additional_hashes": "",
|
||||||
|
"download_civitai_data": true,
|
||||||
|
"easy_remix": true,
|
||||||
|
"speak_and_recognation": {
|
||||||
|
"__value__": [
|
||||||
|
false,
|
||||||
|
true
|
||||||
|
]
|
||||||
|
},
|
||||||
"images": [
|
"images": [
|
||||||
"8",
|
"8",
|
||||||
0
|
0
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"class_type": "SaveImage",
|
"class_type": "Image Saver",
|
||||||
"_meta": {
|
"_meta": {
|
||||||
"title": "Save Image"
|
"title": "CivitAI Image Saver"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"13": {
|
"44": {
|
||||||
|
"inputs": {
|
||||||
|
"text": "Yautja Predator wielding flamethrower in smoky, cyberpunk alleyway darkness",
|
||||||
|
"speak_and_recognation": {
|
||||||
|
"__value__": [
|
||||||
|
false,
|
||||||
|
true
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"class_type": "ttN text",
|
||||||
|
"_meta": {
|
||||||
|
"title": "Positive Prompt T5"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"45": {
|
||||||
|
"inputs": {
|
||||||
|
"text": "text, watermark, deformed Avoid flat colors, poor lighting, and artificial elements. No unrealistic elements, low resolution, or flat colors. Avoid generic objects, poor lighting, and inconsistent styles, blurry, low-quality, distorted faces, overexposed lighting, extra limbs, bad anatomy, low contrast",
|
||||||
|
"speak_and_recognation": {
|
||||||
|
"__value__": [
|
||||||
|
false,
|
||||||
|
true
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"class_type": "ttN text",
|
||||||
|
"_meta": {
|
||||||
|
"title": "Negative Prompt"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"47": {
|
||||||
|
"inputs": {
|
||||||
|
"text": [
|
||||||
|
"44",
|
||||||
|
0
|
||||||
|
],
|
||||||
|
"speak_and_recognation": {
|
||||||
|
"__value__": [
|
||||||
|
false,
|
||||||
|
true
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"clip": [
|
||||||
|
"72",
|
||||||
|
0
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"class_type": "CLIPTextEncode",
|
||||||
|
"_meta": {
|
||||||
|
"title": "Prompt Encoder"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"48": {
|
||||||
|
"inputs": {
|
||||||
|
"seed": 47371998700984,
|
||||||
|
"increment": 1
|
||||||
|
},
|
||||||
|
"class_type": "Seed Generator (Image Saver)",
|
||||||
|
"_meta": {
|
||||||
|
"title": "Seed"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"49": {
|
||||||
|
"inputs": {
|
||||||
|
"scheduler": "beta"
|
||||||
|
},
|
||||||
|
"class_type": "Scheduler Selector (Comfy) (Image Saver)",
|
||||||
|
"_meta": {
|
||||||
|
"title": "Scheduler"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"50": {
|
||||||
|
"inputs": {
|
||||||
|
"sampler_name": "euler"
|
||||||
|
},
|
||||||
|
"class_type": "Sampler Selector (Image Saver)",
|
||||||
|
"_meta": {
|
||||||
|
"title": "Sampler"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"52": {
|
||||||
|
"inputs": {
|
||||||
|
"float": 3.500000000000001
|
||||||
|
},
|
||||||
|
"class_type": "Float Literal (Image Saver)",
|
||||||
|
"_meta": {
|
||||||
|
"title": "CFG Scale"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"53": {
|
||||||
|
"inputs": {
|
||||||
|
"float": 1.0000000000000002
|
||||||
|
},
|
||||||
|
"class_type": "Float Literal (Image Saver)",
|
||||||
|
"_meta": {
|
||||||
|
"title": "Denoise"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"62": {
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"noise": [
|
"noise": [
|
||||||
"25",
|
"65",
|
||||||
0
|
0
|
||||||
],
|
],
|
||||||
"guider": [
|
"guider": [
|
||||||
"22",
|
"67",
|
||||||
0
|
0
|
||||||
],
|
],
|
||||||
"sampler": [
|
"sampler": [
|
||||||
"16",
|
"63",
|
||||||
0
|
0
|
||||||
],
|
],
|
||||||
"sigmas": [
|
"sigmas": [
|
||||||
"17",
|
"64",
|
||||||
0
|
0
|
||||||
],
|
],
|
||||||
"latent_image": [
|
"latent_image": [
|
||||||
"27",
|
"41",
|
||||||
0
|
5
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"class_type": "SamplerCustomAdvanced",
|
"class_type": "SamplerCustomAdvanced",
|
||||||
"_meta": {
|
"_meta": {
|
||||||
"title": "SamplerCustomAdvanced"
|
"title": "Custom Sampler"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"16": {
|
"63": {
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"sampler_name": "euler"
|
"sampler_name": [
|
||||||
|
"50",
|
||||||
|
0
|
||||||
|
]
|
||||||
},
|
},
|
||||||
"class_type": "KSamplerSelect",
|
"class_type": "KSamplerSelect",
|
||||||
"_meta": {
|
"_meta": {
|
||||||
"title": "KSampler Select"
|
"title": "KSampler Select"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"17": {
|
"64": {
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"scheduler": "simple",
|
"scheduler": [
|
||||||
"steps": 20,
|
"49",
|
||||||
"denoise": 1,
|
0
|
||||||
|
],
|
||||||
|
"steps": [
|
||||||
|
"40",
|
||||||
|
0
|
||||||
|
],
|
||||||
|
"denoise": [
|
||||||
|
"53",
|
||||||
|
0
|
||||||
|
],
|
||||||
"model": [
|
"model": [
|
||||||
"30",
|
"35",
|
||||||
0
|
0
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"class_type": "BasicScheduler",
|
"class_type": "BasicScheduler",
|
||||||
"_meta": {
|
"_meta": {
|
||||||
"title": "BasicScheduler"
|
"title": "Sigma Generator"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"22": {
|
"65": {
|
||||||
|
"inputs": {
|
||||||
|
"noise_seed": [
|
||||||
|
"48",
|
||||||
|
0
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"class_type": "RandomNoise",
|
||||||
|
"_meta": {
|
||||||
|
"title": "Noise Generator"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"67": {
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"model": [
|
"model": [
|
||||||
"30",
|
"35",
|
||||||
0
|
0
|
||||||
],
|
],
|
||||||
"conditioning": [
|
"conditioning": [
|
||||||
"26",
|
"47",
|
||||||
0
|
0
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"class_type": "BasicGuider",
|
"class_type": "BasicGuider",
|
||||||
"_meta": {
|
"_meta": {
|
||||||
"title": "BasicGuider"
|
"title": "Prompt Guider"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"25": {
|
"72": {
|
||||||
"inputs": {
|
|
||||||
"noise_seed": 707623342760804
|
|
||||||
},
|
|
||||||
"class_type": "RandomNoise",
|
|
||||||
"_meta": {
|
|
||||||
"title": "RandomNoise"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"26": {
|
|
||||||
"inputs": {
|
|
||||||
"guidance": 3.5,
|
|
||||||
"conditioning": [
|
|
||||||
"6",
|
|
||||||
0
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"class_type": "FluxGuidance",
|
|
||||||
"_meta": {
|
|
||||||
"title": "FluxGuidance"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"27": {
|
|
||||||
"inputs": {
|
|
||||||
"width": 720,
|
|
||||||
"height": 1088,
|
|
||||||
"batch_size": 1
|
|
||||||
},
|
|
||||||
"class_type": "EmptySD3LatentImage",
|
|
||||||
"_meta": {
|
|
||||||
"title": "CR Aspect Ratio"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"30": {
|
|
||||||
"inputs": {
|
|
||||||
"max_shift": 1.15,
|
|
||||||
"base_shift": 0.5,
|
|
||||||
"width": 720,
|
|
||||||
"height": 1088,
|
|
||||||
"model": [
|
|
||||||
"38",
|
|
||||||
0
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"class_type": "ModelSamplingFlux",
|
|
||||||
"_meta": {
|
|
||||||
"title": "ModelSamplingFlux"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"38": {
|
|
||||||
"inputs": {
|
|
||||||
"unet_name": "flux1-dev-Q4_0.gguf",
|
|
||||||
"device": "cuda:1",
|
|
||||||
"virtual_vram_gb": 0,
|
|
||||||
"use_other_vram": true,
|
|
||||||
"expert_mode_allocations": ""
|
|
||||||
},
|
|
||||||
"class_type": "UnetLoaderGGUFDisTorchMultiGPU",
|
|
||||||
"_meta": {
|
|
||||||
"title": "UnetLoaderGGUFDisTorchMultiGPU"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"39": {
|
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"clip_name1": "t5-v1_1-xxl-encoder-Q4_K_M.gguf",
|
"clip_name1": "t5-v1_1-xxl-encoder-Q4_K_M.gguf",
|
||||||
"clip_name2": "clip_l.safetensors",
|
"clip_name2": "clip_l.safetensors",
|
||||||
"type": "flux",
|
"type": "flux",
|
||||||
"device": "cuda:0",
|
"device": "cuda:0",
|
||||||
"virtual_vram_gb": 0,
|
"virtual_vram_gb": 0,
|
||||||
"use_other_vram": true,
|
"use_other_vram": false,
|
||||||
"expert_mode_allocations": ""
|
"expert_mode_allocations": ""
|
||||||
},
|
},
|
||||||
"class_type": "DualCLIPLoaderGGUFDisTorchMultiGPU",
|
"class_type": "DualCLIPLoaderGGUFDisTorchMultiGPU",
|
||||||
@ -192,7 +314,7 @@
|
|||||||
"title": "DualCLIPLoaderGGUFDisTorchMultiGPU"
|
"title": "DualCLIPLoaderGGUFDisTorchMultiGPU"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"41": {
|
"73": {
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"vae_name": "FLUX1/ae.safetensors",
|
"vae_name": "FLUX1/ae.safetensors",
|
||||||
"device": "cuda:0"
|
"device": "cuda:0"
|
||||||
@ -201,5 +323,21 @@
|
|||||||
"_meta": {
|
"_meta": {
|
||||||
"title": "VAELoaderMultiGPU"
|
"title": "VAELoaderMultiGPU"
|
||||||
}
|
}
|
||||||
|
},
|
||||||
|
"35": {
|
||||||
|
"inputs": {
|
||||||
|
"unet_name": "flux1-dev-Q4_0.gguf",
|
||||||
|
"dequant_dtype": "default",
|
||||||
|
"patch_dtype": "default",
|
||||||
|
"patch_on_device": false,
|
||||||
|
"device": "cuda:1",
|
||||||
|
"virtual_vram_gb": 0,
|
||||||
|
"use_other_vram": false,
|
||||||
|
"expert_mode_allocations": ""
|
||||||
|
},
|
||||||
|
"class_type": "UnetLoaderGGUFAdvancedDisTorchMultiGPU",
|
||||||
|
"_meta": {
|
||||||
|
"title": "UnetLoaderGGUFAdvancedDisTorchMultiGPU"
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
@ -1,147 +0,0 @@
|
|||||||
{
|
|
||||||
"93": {
|
|
||||||
"inputs": {
|
|
||||||
"text": "jpeg compression",
|
|
||||||
"speak_and_recognation": {
|
|
||||||
"__value__": [
|
|
||||||
false,
|
|
||||||
true
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"clip": [
|
|
||||||
"126",
|
|
||||||
0
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"class_type": "CLIPTextEncode",
|
|
||||||
"_meta": {
|
|
||||||
"title": "CLIP Text Encode (Prompt)"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"95": {
|
|
||||||
"inputs": {
|
|
||||||
"seed": 22,
|
|
||||||
"steps": 10,
|
|
||||||
"cfg": 4.5,
|
|
||||||
"sampler_name": "euler",
|
|
||||||
"scheduler": "normal",
|
|
||||||
"denoise": 1,
|
|
||||||
"model": [
|
|
||||||
"127",
|
|
||||||
0
|
|
||||||
],
|
|
||||||
"positive": [
|
|
||||||
"100",
|
|
||||||
0
|
|
||||||
],
|
|
||||||
"negative": [
|
|
||||||
"93",
|
|
||||||
0
|
|
||||||
],
|
|
||||||
"latent_image": [
|
|
||||||
"97",
|
|
||||||
0
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"class_type": "KSampler",
|
|
||||||
"_meta": {
|
|
||||||
"title": "KSampler"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"97": {
|
|
||||||
"inputs": {
|
|
||||||
"width": 1280,
|
|
||||||
"height": 768,
|
|
||||||
"length": 1,
|
|
||||||
"batch_size": 1
|
|
||||||
},
|
|
||||||
"class_type": "EmptyHunyuanLatentVideo",
|
|
||||||
"_meta": {
|
|
||||||
"title": "EmptyHunyuanLatentVideo"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"98": {
|
|
||||||
"inputs": {
|
|
||||||
"samples": [
|
|
||||||
"95",
|
|
||||||
0
|
|
||||||
],
|
|
||||||
"vae": [
|
|
||||||
"128",
|
|
||||||
0
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"class_type": "VAEDecode",
|
|
||||||
"_meta": {
|
|
||||||
"title": "VAE Decode"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"100": {
|
|
||||||
"inputs": {
|
|
||||||
"text": "Terminator riding a push bike",
|
|
||||||
"speak_and_recognation": {
|
|
||||||
"__value__": [
|
|
||||||
false,
|
|
||||||
true
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"clip": [
|
|
||||||
"126",
|
|
||||||
0
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"class_type": "CLIPTextEncode",
|
|
||||||
"_meta": {
|
|
||||||
"title": "CLIP Text Encode (Prompt)"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"102": {
|
|
||||||
"inputs": {
|
|
||||||
"images": [
|
|
||||||
"98",
|
|
||||||
0
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"class_type": "PreviewImage",
|
|
||||||
"_meta": {
|
|
||||||
"title": "Preview Image"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"126": {
|
|
||||||
"inputs": {
|
|
||||||
"clip_name": "Qwen2.5-VL-7B-Instruct-Q3_K_M.gguf",
|
|
||||||
"type": "qwen_image",
|
|
||||||
"device": "cuda:1",
|
|
||||||
"virtual_vram_gb": 6,
|
|
||||||
"use_other_vram": true,
|
|
||||||
"expert_mode_allocations": ""
|
|
||||||
},
|
|
||||||
"class_type": "CLIPLoaderGGUFDisTorchMultiGPU",
|
|
||||||
"_meta": {
|
|
||||||
"title": "CLIPLoaderGGUFDisTorchMultiGPU"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"127": {
|
|
||||||
"inputs": {
|
|
||||||
"unet_name": "qwen-image-Q2_K.gguf",
|
|
||||||
"device": "cuda:0",
|
|
||||||
"virtual_vram_gb": 6,
|
|
||||||
"use_other_vram": true,
|
|
||||||
"expert_mode_allocations": ""
|
|
||||||
},
|
|
||||||
"class_type": "UnetLoaderGGUFDisTorchMultiGPU",
|
|
||||||
"_meta": {
|
|
||||||
"title": "UnetLoaderGGUFDisTorchMultiGPU"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"128": {
|
|
||||||
"inputs": {
|
|
||||||
"vae_name": "qwen_image_vae.safetensors",
|
|
||||||
"device": "cuda:1"
|
|
||||||
},
|
|
||||||
"class_type": "VAELoaderMultiGPU",
|
|
||||||
"_meta": {
|
|
||||||
"title": "VAELoaderMultiGPU"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
Loading…
x
Reference in New Issue
Block a user