Compare commits

...

39 Commits
0.2.15 ... main

Author SHA1 Message Date
086695d898 Bump version: 0.3.11 → 0.3.12 2025-09-04 08:58:28 +01:00
a63668cc93 No markdown formatting, no extra text.
fix(openrouter): handle rate limit errors with OpenWebUI fallback

When OpenRouter returns a 429 error due to rate limiting, the application now attempts to fall back to using an OpenWebUI model instead of returning a default prompt. This provides better resilience when external API limits are exceeded while maintaining functionality through local models.

The changes include:
- Adding RateLimitError import from openai
- Implementing fallback logic in create_prompt_on_openrouter function
- Using OpenWebUI as secondary source for prompts when rate limiting occurs
- Proper error handling and logging for both primary and fallback scenarios

This change improves the robustness of prompt generation by ensuring that users receive generated content even when external services are temporarily unavailable due to rate limits. The fallback mechanism prioritizes configured local models if available, with a final default prompt as backup.

The implementation follows the existing pattern of using random selection from configured OpenWebUI models and includes comprehensive error handling for both primary and fallback operations. This ensures that all failure modes are gracefully handled while maintaining backward compatibility.
2025-09-04 08:58:24 +01:00
06d3a64bb9 Bump version: 0.3.10 → 0.3.11 2025-09-02 13:41:43 +01:00
d7c25373bd clear vram on comfyui 2025-09-02 12:19:38 +01:00
006c88b084 Bump version: 0.3.9 → 0.3.10 2025-09-01 13:22:20 +01:00
e7df200f8c add new venv to gitignore 2025-09-01 13:22:17 +01:00
506dece377 **refactor(comfyui.py): improve model selection logic**
Refactor `get_available_models` to handle multiple models and improve error handling. Adjust `select_model` to support configurable workflows and fallbacks.
2025-09-01 13:19:28 +01:00
12af531718 Bump version: 0.3.8 → 0.3.9 2025-08-13 09:35:48 +01:00
efefdde70d fix flux 2025-08-13 09:35:42 +01:00
918e37e077 Bump version: 0.3.7 → 0.3.8 2025-08-12 15:21:26 +01:00
f5427d18ed revert blocking image gen 2025-08-12 15:21:23 +01:00
34f8a05035 Bump version: 0.3.6 → 0.3.7 2025-08-12 15:17:34 +01:00
82f29a4fde block generation if image in queue 2025-08-12 15:17:02 +01:00
ad814855ab Bump version: 0.3.5 → 0.3.6 2025-08-12 15:03:57 +01:00
1b75417360 update the queue 2025-08-12 15:03:50 +01:00
9f3cbf736a Bump version: 0.3.4 → 0.3.5 2025-08-12 14:45:40 +01:00
3e46b3363b working queue logic 2025-08-12 14:15:23 +01:00
ff5dfbcbce show queue count 2025-08-12 13:02:38 +01:00
14e69f7608 initial qwen support 2025-08-12 12:08:12 +01:00
1468ac4bbe Bump version: 0.3.3 → 0.3.4 2025-08-09 09:38:52 +01:00
2e13ecfa2f feat(prompt): implement robust error handling and fallback mechanism
Add retry logic and fallback mechanism to prompt generation. When OpenWebUI
fails, the system now attempts a second try before falling back to OpenRouter.
Proper error handling and logging have been added throughout the prompt
generation flow to ensure more reliable operation.
2025-08-09 09:38:46 +01:00
fa59f3cfeb Bump version: 0.3.2 → 0.3.3 2025-07-30 09:22:31 +01:00
fdd2893255 pass the version to all templates 2025-07-30 09:22:28 +01:00
d40f6a95b0 Bump version: 0.3.1 → 0.3.2 2025-07-30 09:03:57 +01:00
f381fbc9c7 fix scheduled task 2025-07-30 09:03:46 +01:00
57bb0fed5b Bump version: 0.3.0 → 0.3.1 2025-07-29 14:31:05 +01:00
6e39c34a58 update the openapi folder name 2025-07-29 14:27:50 +01:00
e2acd2dcd6 openweb ui client rather than litellm and ollama 2025-07-29 14:25:13 +01:00
aa75646d5f Bump version: 0.2.22 → 0.3.0 2025-07-29 13:26:07 +01:00
ba2b943c0d Bump version: 0.2.21 → 0.2.22 2025-07-29 13:16:37 +01:00
5c45b8b832 Bump version: 0.2.20 → 0.2.21 2025-07-29 13:14:37 +01:00
9462888701 Bump version: 0.2.19 → 0.2.20 2025-07-29 12:48:10 +01:00
bd1bb98160 working openrouter and better create_image page formatting 2025-07-29 12:47:55 +01:00
76e33ea523 initial openrouter support 2025-07-29 11:51:29 +01:00
d80cf9473a Bump version: 0.2.18 → 0.2.19 2025-07-28 13:35:34 +01:00
62ee224736 Bump version: 0.2.17 → 0.2.18 2025-07-28 13:33:00 +01:00
410ed18526 Bump version: 0.2.16 → 0.2.17 2025-07-28 13:27:57 +01:00
d29badd0fe serve favourites.json file 2025-07-28 13:27:52 +01:00
82f93ae557 Bump version: 0.2.15 → 0.2.16 2025-07-18 14:05:35 +01:00
22 changed files with 1101 additions and 397 deletions

View File

@ -1,5 +1,5 @@
[tool.bumpversion]
current_version = "0.2.15"
current_version = "0.3.12"
parse = "(?P<major>\\d+)\\.(?P<minor>\\d+)\\.(?P<patch>\\d+)"
serialize = ["{major}.{minor}.{patch}"]
replace = "{new_version}"

2
.gitignore vendored
View File

@ -11,3 +11,5 @@ publish.sh
test.py
.vscode/launch.json
favourites.json
.vscode/launch.json
venv/*

2
.vscode/launch.json vendored
View File

@ -8,7 +8,7 @@
"name": "Python Debugger: Current File",
"type": "debugpy",
"request": "launch",
"program": "${file}",
"program": "ai_frame_image_server.py",
"console": "integratedTerminal",
"justMyCode": false,
"env": {"SECRET_KEY":"dkdkdk"}

View File

@ -4,7 +4,7 @@ FROM python:3.11-slim
# Set the working directory in the container
WORKDIR /app
# Set version label
ARG VERSION="0.2.15"
ARG VERSION="0.3.12"
LABEL version=$VERSION
# Copy project files into the container

View File

@ -4,6 +4,7 @@ import os
from routes import (
auth_routes,
favourites_routes,
gallery_routes,
image_routes,
index_routes,
@ -17,6 +18,13 @@ user_config = load_config()
app = Flask(__name__)
app.secret_key = os.environ.get("SECRET_KEY")
# Make version available to all templates
from libs.generic import get_current_version
@app.context_processor
def inject_version():
version = get_current_version()
return dict(version=version)
# Inject config into routes that need it
create_routes.init_app(user_config)
auth_routes.init_app(user_config)
@ -24,6 +32,7 @@ auth_routes.init_app(user_config)
# Register blueprints
app.register_blueprint(index_routes.bp)
app.register_blueprint(auth_routes.bp)
app.register_blueprint(favourites_routes.bp)
app.register_blueprint(gallery_routes.bp)
app.register_blueprint(image_routes.bp)
app.register_blueprint(job_routes.bp)
@ -37,7 +46,16 @@ from libs.comfyui import create_image
def scheduled_task():
print(f"Executing scheduled task at {time.strftime('%Y-%m-%d %H:%M:%S')}")
create_image(None)
# Generate a random prompt using either OpenWebUI or OpenRouter
from libs.generic import create_prompt_with_random_model
prompt = create_prompt_with_random_model("Generate a random detailed prompt for stable diffusion.")
if prompt:
# Select a random model
import random
model = "Random Image Model"
create_image(prompt, model)
else:
print("Failed to generate a prompt for the scheduled task.")
if user_config["frame"]["auto_regen"] == "True":
if os.environ.get("WERKZEUG_RUN_MAIN") == "true":

View File

@ -15,7 +15,7 @@ from tenacity import (
import nest_asyncio
from libs.generic import rename_image, load_config, save_prompt
from libs.create_thumbnail import generate_thumbnail
from libs.ollama import create_prompt_on_openwebui
from libs.openwebui import create_prompt_on_openwebui
nest_asyncio.apply()
logging.basicConfig(level=logging.INFO)
@ -32,9 +32,17 @@ def get_available_models() -> list:
response = requests.get(url)
if response.status_code == 200:
data = response.json()
general = data.get("CheckpointLoaderSimple", {}).get("input", {}).get("required", {}).get("ckpt_name", [])[0]
flux = data.get("UnetLoaderGGUF", {}).get("input", {}).get("required", {}).get("unet_name", [])[0]
return general + flux
# Get SDXL models from CheckpointLoaderSimple
general = data.get("CheckpointLoaderSimple", {}).get("input", {}).get("required", {}).get("ckpt_name", [[]])[0]
# Get FLUX models from UnetLoaderGGUF
flux = data.get("UnetLoaderGGUF", {}).get("input", {}).get("required", {}).get("unet_name", [[]])[0]
# Combine both lists, handling cases where one might be missing
all_models = []
if isinstance(general, list):
all_models.extend(general)
if isinstance(flux, list):
all_models.extend(flux)
return all_models
else:
print(f"Failed to fetch models: {response.status_code}")
return []
@ -122,30 +130,61 @@ def generate_image(
def select_model(model: str) -> tuple[str, str]:
use_flux = json.loads(user_config["comfyui"].get("FLUX", "false").lower())
only_flux = json.loads(user_config["comfyui"].get("ONLY_FLUX", "false").lower())
use_qwen = json.loads(user_config["comfyui"].get("Qwen", "false").lower())
if model == "Random":
selected_workflow = "FLUX" if (use_flux and (only_flux or random.choice([True, False]))) else "SDXL"
if model == "Random Image Model":
# Create a list of available workflows based on configuration
available_workflows = []
if not only_flux:
available_workflows.append("SDXL")
if use_flux:
available_workflows.append("FLUX")
if use_qwen:
available_workflows.append("Qwen")
# If no workflows are available, default to SDXL
if not available_workflows:
available_workflows.append("SDXL")
# Randomly select a workflow
selected_workflow = random.choice(available_workflows)
elif "flux" in model.lower():
selected_workflow = "FLUX"
elif "qwen" in model.lower():
selected_workflow = "Qwen"
else:
selected_workflow = "SDXL"
if model == "Random":
if model == "Random Image Model":
if selected_workflow == "FLUX":
valid_models = user_config["comfyui:flux"]["models"].split(",")
elif selected_workflow == "Qwen":
valid_models = user_config["comfyui:qwen"]["models"].split(",")
else: # SDXL
available_model_list = user_config["comfyui"]["models"].split(",")
valid_models = list(set(get_available_models()) & set(available_model_list))
# If no valid models found, fall back to configured models
if not valid_models:
valid_models = available_model_list
# Ensure we have at least one model to choose from
if not valid_models:
# Fallback to a default model
valid_models = ["zavychromaxl_v100.safetensors"]
model = random.choice(valid_models)
return selected_workflow, model
def create_image(prompt: str | None = None, model: str = "Random") -> None:
def create_image(prompt: str | None = None, model: str = "Random Image Model") -> None:
"""Generate an image with a chosen workflow (Random, FLUX*, or SDXL*)."""
if prompt is None:
prompt = create_prompt_on_openwebui(user_config["comfyui"]["prompt"])
# Generate a random prompt using either OpenWebUI or OpenRouter
from libs.generic import create_prompt_with_random_model
prompt = create_prompt_with_random_model("Generate a random detailed prompt for stable diffusion.")
if not prompt:
logging.error("Failed to generate a prompt.")
return
if not prompt:
logging.error("No prompt generated.")
@ -153,22 +192,101 @@ def create_image(prompt: str | None = None, model: str = "Random") -> None:
save_prompt(prompt)
selected_workflow, model = select_model(model)
if selected_workflow == "FLUX":
generate_image(
file_name="image",
comfy_prompt=prompt,
workflow_path="./workflow_flux.json",
prompt_node="Positive Prompt T5",
seed_node="Seed",
seed_param="seed",
save_node="CivitAI Image Saver",
save_param="filename",
model_node="UnetLoaderGGUFAdvancedDisTorchMultiGPU",
prompt_node="CLIP Text Encode (Positive Prompt)",
seed_node="RandomNoise",
seed_param="noise_seed",
save_node="Save Image",
save_param="filename_prefix",
model_node="UnetLoaderGGUFDisTorchMultiGPU",
model_param="unet_name",
model=model
)
elif selected_workflow == "Qwen":
generate_image(
file_name="image",
comfy_prompt=prompt,
workflow_path="./workflow_qwen.json",
prompt_node="Positive",
seed_node="KSampler",
seed_param="seed",
save_node="Save Image",
save_param="filename_prefix",
model_node="Load Checkpoint",
model_param="ckpt_name",
model=model
)
else: # SDXL
generate_image("image", comfy_prompt=prompt, model=model)
logging.info(f"{selected_workflow} generation started with prompt: {prompt}")
def get_queue_count() -> int:
"""Fetches the current queue count from ComfyUI (pending + running jobs)."""
url = user_config["comfyui"]["comfyui_url"] + "/queue"
try:
response = requests.get(url)
response.raise_for_status()
data = response.json()
pending = len(data.get("queue_pending", []))
running = len(data.get("queue_running", []))
return pending + running
except Exception as e:
logging.error(f"Error fetching queue count: {e}")
return 0
def get_queue_details() -> list:
"""Fetches detailed queue information including model names and prompts."""
url = user_config["comfyui"]["comfyui_url"] + "/queue"
try:
response = requests.get(url)
response.raise_for_status()
data = response.json()
jobs = []
for job_list in [data.get("queue_running", []), data.get("queue_pending", [])]:
for job in job_list:
# Extract prompt data (format: [priority, time, prompt])
prompt_data = job[2]
model = "Unknown"
prompt = "No prompt"
# Find model loader node (works for SDXL/FLUX/Qwen workflows)
for node in prompt_data.values():
if node.get("class_type") in ["CheckpointLoaderSimple", "UnetLoaderGGUFAdvancedDisTorchMultiGPU"]:
model = node["inputs"].get("ckpt_name", "Unknown")
break
# Find prompt node using class_type pattern and title matching
for node in prompt_data.values():
class_type = node.get("class_type", "")
if "CLIPTextEncode" in class_type and "text" in node["inputs"]:
meta = node.get('_meta', {})
title = meta.get('title', '').lower()
if 'positive' in title or 'prompt' in title:
prompt = node["inputs"]["text"]
break
jobs.append({
"id": job[0],
"model": model.split(".")[0] if model != "Unknown" else model,
"prompt": prompt
})
return jobs
except Exception as e:
logging.error(f"Error fetching queue details: {e}")
return []
try:
response = requests.get(url)
response.raise_for_status()
data = response.json()
pending = len(data.get("queue_pending", []))
running = len(data.get("queue_running", []))
return pending + running
except Exception as e:
logging.error(f"Error fetching queue count: {e}")
return 0

View File

@ -4,6 +4,7 @@ import logging
import sys
import time
import os
import random
from PIL import Image
import nest_asyncio
import json
@ -83,8 +84,8 @@ def get_details_from_png(path):
try:
# Flux workflow
data = json.loads(img.info["prompt"])
prompt = data['44']['inputs']['text']
model = data['35']['inputs']['unet_name'].split(".")[0]
prompt = data['6']['inputs']['text']
model = data['38']['inputs']['unet_name'].split(".")[0]
except KeyError:
# SDXL workflow
data = json.loads(img.info["prompt"])
@ -109,14 +110,31 @@ def get_current_version():
return version
except subprocess.CalledProcessError as e:
print("Error running bump-my-version:", e)
return None
return "unknown"
def load_models_from_config():
flux_models = load_config()["comfyui:flux"]["models"].split(",")
sdxl_models = load_config()["comfyui"]["models"].split(",")
config = load_config()
# Only load FLUX models if FLUX feature is enabled
use_flux = config["comfyui"].get("flux", "False").lower() == "true"
if use_flux and "comfyui:flux" in config and "models" in config["comfyui:flux"]:
flux_models = config["comfyui:flux"]["models"].split(",")
else:
flux_models = []
sdxl_models = config["comfyui"]["models"].split(",")
# Only load Qwen models if Qwen feature is enabled
use_qwen = config["comfyui"].get("qwen", "False").lower() == "true"
if use_qwen and "comfyui:qwen" in config and "models" in config["comfyui:qwen"]:
qwen_models = config["comfyui:qwen"]["models"].split(",")
else:
qwen_models = []
sorted_flux_models = sorted(flux_models, key=str.lower)
sorted_sdxl_models = sorted(sdxl_models, key=str.lower)
return sorted_sdxl_models, sorted_flux_models
sorted_qwen_models = sorted(qwen_models, key=str.lower)
return sorted_sdxl_models, sorted_flux_models, sorted_qwen_models
def load_topics_from_config():
@ -124,5 +142,104 @@ def load_topics_from_config():
sorted_topics = sorted(topics, key=str.lower)
return sorted_topics
def load_openrouter_models_from_config():
config = load_config()
if config["openrouter"].get("enabled", "False").lower() == "true":
models = config["openrouter"]["models"].split(",")
return sorted([model.strip() for model in models if model.strip()], key=str.lower)
return []
def load_openwebui_models_from_config():
config = load_config()
if "openwebui" in config and "models" in config["openwebui"]:
models = config["openwebui"]["models"].split(",")
return sorted([model.strip() for model in models if model.strip()], key=str.lower)
return []
def load_prompt_models_from_config():
"""Load and return a list of available prompt generation models (both OpenWebUI and OpenRouter)."""
config = load_config()
prompt_models = []
# Add OpenWebUI models if configured
if "openwebui" in config and "models" in config["openwebui"]:
openwebui_models = config["openwebui"]["models"].split(",")
prompt_models.extend([("openwebui", model.strip()) for model in openwebui_models if model.strip()])
# Add OpenRouter models if enabled and configured
if config["openrouter"].get("enabled", "False").lower() == "true" and "models" in config["openrouter"]:
openrouter_models = config["openrouter"]["models"].split(",")
prompt_models.extend([("openrouter", model.strip()) for model in openrouter_models if model.strip()])
return prompt_models
def create_prompt_with_random_model(base_prompt: str, topic: str = "random"):
"""Create a prompt using a randomly selected model from OpenWebUI or OpenRouter.
If OpenWebUI fails, it will retry once. If it fails again, it will fallback to OpenRouter.
"""
prompt_models = load_prompt_models_from_config()
if not prompt_models:
logging.warning("No prompt generation models configured.")
return None
# Randomly select a model
service, model = random.choice(prompt_models)
# Import here to avoid circular imports
from libs.openwebui import create_prompt_on_openwebui
from libs.openrouter import create_prompt_on_openrouter
if service == "openwebui":
try:
# First attempt with OpenWebUI
logging.info(f"Attempting to generate prompt with OpenWebUI using model: {model}")
result = create_prompt_on_openwebui(base_prompt, topic, model)
if result:
return result
# If first attempt returns None, try again
logging.warning("First OpenWebUI attempt failed. Retrying...")
result = create_prompt_on_openwebui(base_prompt, topic, model)
if result:
return result
# If second attempt fails, fallback to OpenRouter
logging.warning("Second OpenWebUI attempt failed. Falling back to OpenRouter...")
openrouter_models = [m for m in prompt_models if m[0] == "openrouter"]
if openrouter_models:
_, openrouter_model = random.choice(openrouter_models)
return create_prompt_on_openrouter(base_prompt, topic, openrouter_model)
else:
logging.error("No OpenRouter models configured for fallback.")
return "A colorful abstract composition" # Default fallback prompt
except Exception as e:
logging.error(f"Error with OpenWebUI: {e}")
# Fallback to OpenRouter on exception
logging.warning("OpenWebUI exception. Falling back to OpenRouter...")
openrouter_models = [m for m in prompt_models if m[0] == "openrouter"]
if openrouter_models:
_, openrouter_model = random.choice(openrouter_models)
try:
return create_prompt_on_openrouter(base_prompt, topic, openrouter_model)
except Exception as e2:
logging.error(f"Error with OpenRouter fallback: {e2}")
return "A colorful abstract composition" # Default fallback prompt
else:
logging.error("No OpenRouter models configured for fallback.")
return "A colorful abstract composition" # Default fallback prompt
elif service == "openrouter":
try:
# Use OpenRouter
return create_prompt_on_openrouter(base_prompt, topic, model)
except Exception as e:
logging.error(f"Error with OpenRouter: {e}")
return "A colorful abstract composition" # Default fallback prompt
user_config = load_config()
output_folder = user_config["comfyui"]["output_dir"]

View File

@ -1,84 +0,0 @@
import random
import logging
import litellm
import nest_asyncio
from libs.generic import load_recent_prompts, load_config
import re
nest_asyncio.apply()
logging.basicConfig(level=logging.INFO)
LOG_FILE = "./prompts_log.jsonl"
user_config = load_config()
output_folder = user_config["comfyui"]["output_dir"]
def create_prompt_on_openwebui(prompt: str, topic: str = "random") -> str:
"""Sends prompt to OpenWebui and returns the generated response."""
topic_instruction = ""
selected_topic = ""
# Unique list of recent prompts
recent_prompts = list(set(load_recent_prompts()))
if topic == "random":
topics = [t.strip() for t in user_config["comfyui"]["topics"].split(",") if t.strip()]
selected_topic = random.choice(topics)
elif topic != "":
selected_topic = topic
else:
# Decide on whether to include a topic (e.g., 30% chance to include)
topics = [t.strip() for t in user_config["comfyui"]["topics"].split(",") if t.strip()]
if random.random() < 0.3 and topics:
selected_topic = random.choice(topics)
if selected_topic != "":
topic_instruction = f" Incorporate the theme of '{selected_topic}' into the new prompt."
user_content = (
"Can you generate me a really random image idea, Do not exceed 10 words. Use clear language, not poetic metaphors.”"
+ topic_instruction
+ "Avoid prompts similar to the following:"
+ "\n".join(f"{i+1}. {p}" for i, p in enumerate(recent_prompts))
)
model = random.choice(user_config["openwebui"]["models"].split(","))
response = litellm.completion(
api_base=user_config["openwebui"]["base_url"],
model="openai/" + model,
messages=[
{
"role": "system",
"content": (
"You are a prompt generator for Stable Diffusion. "
"Generate a detailed and imaginative prompt with a strong visual theme. "
"Focus on lighting, atmosphere, and artistic style. "
"Keep the prompt concise, no extra commentary or formatting."
),
},
{
"role": "user",
"content": user_content,
},
],
api_key=user_config["openwebui"]["api_key"],
)
prompt = response["choices"][0]["message"]["content"].strip('"')
# response = litellm.completion(
# api_base=user_config["openwebui"]["base_url"],
# model="openai/brxce/stable-diffusion-prompt-generator:latest",
# messages=[
# {
# "role": "user",
# "content": prompt,
# },
# ],
# api_key=user_config["openwebui"]["api_key"],
# )
# prompt = response["choices"][0]["message"]["content"].strip('"')
match = re.search(r'"([^"]+)"', prompt)
if not match:
match = re.search(r":\s*\n*\s*(.+)", prompt)
if match:
prompt = match.group(1)
logging.debug(prompt)
return prompt

110
libs/openrouter.py Normal file
View File

@ -0,0 +1,110 @@
import random
import logging
from openai import OpenAI, RateLimitError
import nest_asyncio
from libs.generic import load_recent_prompts, load_config
from libs.openwebui import create_prompt_on_openwebui
import re
nest_asyncio.apply()
logging.basicConfig(level=logging.INFO)
LOG_FILE = "./prompts_log.jsonl"
user_config = load_config()
output_folder = user_config["comfyui"]["output_dir"]
def create_prompt_on_openrouter(prompt: str, topic: str = "random", model: str = None) -> str:
"""Sends prompt to OpenRouter and returns the generated response."""
# Check if OpenRouter is enabled
if user_config["openrouter"].get("enabled", "False").lower() != "true":
logging.warning("OpenRouter is not enabled in the configuration.")
return ""
topic_instruction = ""
selected_topic = ""
# Unique list of recent prompts
recent_prompts = list(set(load_recent_prompts()))
if topic == "random":
topics = [t.strip() for t in user_config["comfyui"]["topics"].split(",") if t.strip()]
selected_topic = random.choice(topics) if topics else ""
elif topic != "":
selected_topic = topic
else:
# Decide on whether to include a topic (e.g., 30% chance to include)
topics = [t.strip() for t in user_config["comfyui"]["topics"].split(",") if t.strip()]
if random.random() < 0.3 and topics:
selected_topic = random.choice(topics)
if selected_topic != "":
topic_instruction = f" Incorporate the theme of '{selected_topic}' into the new prompt."
user_content = (
"Can you generate me a really random image idea, Do not exceed 10 words. Use clear language, not poetic metaphors."
+ topic_instruction
+ "Avoid prompts similar to the following:"
+ "\n".join(f"{i+1}. {p}" for i, p in enumerate(recent_prompts))
)
# Use the specified model or select a random model from the configured OpenRouter models
if model:
# Use the specified model
model = model
else:
# Select a random model from the configured OpenRouter models
models = [m.strip() for m in user_config["openrouter"]["models"].split(",") if m.strip()]
if not models:
logging.error("No OpenRouter models configured.")
return ""
model = random.choice(models)
try:
client = OpenAI(
base_url="https://openrouter.ai/api/v1",
api_key=user_config["openrouter"]["api_key"],
)
completion = client.chat.completions.create(
model=model,
messages=[
{
"role": "system",
"content": (
"You are a prompt generator for Stable Diffusion. "
"Generate a detailed and imaginative prompt with a strong visual theme. "
"Focus on lighting, atmosphere, and artistic style. "
"Keep the prompt concise, no extra commentary or formatting."
),
},
{
"role": "user",
"content": user_content,
},
]
)
prompt = completion.choices[0].message.content.strip('"')
match = re.search(r'"([^"]+)"', prompt)
if not match:
match = re.search(r":\s*\n*\s*(.+)", prompt)
if match:
prompt = match.group(1)
logging.debug(prompt)
return prompt
except RateLimitError as e:
logging.warning(f"OpenRouter rate limit exceeded (429): {e}. Falling back to local OpenWebUI model.")
# Try to use OpenWebUI as fallback
openwebui_models = [m.strip() for m in user_config["openwebui"]["models"].split(",") if m.strip()] if "openwebui" in user_config and "models" in user_config["openwebui"] else []
if openwebui_models:
selected_model = random.choice(openwebui_models)
try:
return create_prompt_on_openwebui(user_content, topic, selected_model)
except Exception as e2:
logging.error(f"OpenWebUI fallback also failed: {e2}")
return "A colorful abstract composition" # Final fallback
else:
logging.error("No OpenWebUI models configured for fallback.")
return "A colorful abstract composition" # Final fallback
except Exception as e:
logging.error(f"Error generating prompt with OpenRouter: {e}")
return ""

99
libs/openwebui.py Normal file
View File

@ -0,0 +1,99 @@
import random
import logging
import nest_asyncio
from libs.generic import load_recent_prompts, load_config
import re
from openwebui_chat_client import OpenWebUIClient
from datetime import datetime
nest_asyncio.apply()
logging.basicConfig(level=logging.INFO)
LOG_FILE = "./prompts_log.jsonl"
user_config = load_config()
output_folder = user_config["comfyui"]["output_dir"]
def create_prompt_on_openwebui(prompt: str, topic: str = "random", model: str = None) -> str:
"""Sends prompt to OpenWebui and returns the generated response."""
topic_instruction = ""
selected_topic = ""
# Unique list of recent prompts
recent_prompts = list(set(load_recent_prompts()))
if topic == "random":
topics = [t.strip() for t in user_config["comfyui"]["topics"].split(",") if t.strip()]
selected_topic = random.choice(topics)
elif topic != "":
selected_topic = topic
else:
# Decide on whether to include a topic (e.g., 30% chance to include)
topics = [t.strip() for t in user_config["comfyui"]["topics"].split(",") if t.strip()]
if random.random() < 0.3 and topics:
selected_topic = random.choice(topics)
if selected_topic != "":
topic_instruction = f" Incorporate the theme of '{selected_topic}' into the new prompt."
user_content = (
"Can you generate me a really random image idea, Do not exceed 10 words. Use clear language, not poetic metaphors."
+ topic_instruction
+ "Avoid prompts similar to the following:"
+ "\n".join(f"{i+1}. {p}" for i, p in enumerate(recent_prompts))
)
if model:
# Use the specified model
model = model
else:
# Select a random model
model = random.choice(user_config["openwebui"]["models"].split(",")).strip()
# Create OpenWebUI client
client = OpenWebUIClient(
base_url=user_config["openwebui"]["base_url"],
token=user_config["openwebui"]["api_key"],
default_model_id=model
)
# Prepare messages for the chat
messages = [
{
"role": "system",
"content": (
"You are a prompt generator for Stable Diffusion. "
"Generate a detailed and imaginative prompt with a strong visual theme. "
"Focus on lighting, atmosphere, and artistic style. "
"Keep the prompt concise, no extra commentary or formatting."
),
},
{
"role": "user",
"content": user_content,
},
]
# Send the chat request
try:
result = client.chat(
question=user_content,
chat_title=datetime.now().strftime("%Y-%m-%d %H:%M"),
folder_name="ai-frame-image-server"
)
if result:
prompt = result["response"].strip('"')
else:
# Return None if the request fails
logging.warning(f"OpenWebUI request failed with model: {model}")
return None
except Exception as e:
logging.error(f"Error in OpenWebUI request with model {model}: {e}")
return None
match = re.search(r'"([^"]+)"', prompt)
if not match:
match = re.search(r":\s*\n*\s*(.+)", prompt)
if match:
prompt = match.group(1)
logging.debug(prompt)
return prompt

Binary file not shown.

View File

@ -1,8 +1,9 @@
from . import auth_routes, create_routes, gallery_routes, image_routes, index_routes, job_routes, settings_routes
from . import auth_routes, create_routes, favourites_routes, gallery_routes, image_routes, index_routes, job_routes, settings_routes
__all__ = [
"auth_routes",
"create_routes",
"favourites_routes",
"gallery_routes",
"image_routes",
"index_routes",

View File

@ -1,8 +1,8 @@
from flask import Blueprint, request, render_template, redirect, url_for, session
import threading
from libs.comfyui import create_image, select_model, get_available_models
from libs.ollama import create_prompt_on_openwebui
from libs.generic import load_models_from_config, load_topics_from_config
from libs.comfyui import create_image, select_model, get_available_models, get_queue_count
from libs.openwebui import create_prompt_on_openwebui
from libs.generic import load_models_from_config, load_topics_from_config, load_openrouter_models_from_config, load_openwebui_models_from_config, create_prompt_with_random_model
import os
bp = Blueprint("create_routes", __name__)
@ -12,28 +12,73 @@ user_config = None # will be set in init_app
def create():
if request.method == "POST":
prompt = request.form.get("prompt")
selected_workflow, model = select_model(request.form.get("model") or "Random")
image_model = request.form.get("model") or "Random Image Model"
selected_workflow, model = select_model(image_model)
topic = request.form.get("topic")
if not prompt:
prompt = create_prompt_on_openwebui(user_config["comfyui"]["prompt"], topic)
# Get the prompt model from the form data
prompt_model = request.form.get("prompt_model") or ""
if prompt_model and prompt_model != "Random Prompt Model":
# Use the specified prompt model
service, service_model = prompt_model.split(":", 1) if ":" in prompt_model else (prompt_model, "")
if service == "openwebui":
from libs.openwebui import create_prompt_on_openwebui
prompt = create_prompt_on_openwebui(user_config["comfyui"]["prompt"], topic, service_model)
elif service == "openrouter":
from libs.openrouter import create_prompt_on_openrouter
prompt = create_prompt_on_openrouter(user_config["comfyui"]["prompt"], topic, service_model)
else:
# Use a random prompt model
prompt = create_prompt_with_random_model(user_config["comfyui"]["prompt"], topic)
threading.Thread(target=lambda: create_image(prompt, model)).start()
return redirect(url_for("create_routes.image_queued", prompt=prompt, model=model.split(".")[0]))
return render_template("create_image.html", models=load_models_from_config()[0]+load_models_from_config()[1], topics=load_topics_from_config())
# Load all models (SDXL, FLUX, and Qwen)
sdxl_models, flux_models, qwen_models = load_models_from_config()
openwebui_models = load_openwebui_models_from_config()
openrouter_models = load_openrouter_models_from_config()
queue_count = get_queue_count()
return render_template("create_image.html",
sdxx_models=sdxl_models,
flux_models=flux_models,
qwen_models=qwen_models,
openwebui_models=openwebui_models,
openrouter_models=openrouter_models,
topics=load_topics_from_config(),
queue_count=queue_count)
@bp.route("/image_queued")
def image_queued():
prompt = request.args.get("prompt", "No prompt provided.")
model = request.args.get("model", "No model selected.").split(".")[0]
model = request.args.get("model", "No model selected.")
if model == "Random Image Model":
model = "Random"
else:
model = model.split(".")[0]
return render_template("image_queued.html", prompt=prompt, model=model)
@bp.route("/create_image", methods=["GET"])
def create_image_page():
if user_config["frame"]["create_requires_auth"] == "True" and not session.get("authenticated"):
return redirect(url_for("auth_routes.login", next=request.path))
return render_template("create_image.html", models=load_models_from_config()[0]+load_models_from_config()[1], topics=load_topics_from_config())
# Load all models (SDXL, FLUX, and Qwen)
sdxl_models, flux_models, qwen_models = load_models_from_config()
openwebui_models = load_openwebui_models_from_config()
openrouter_models = load_openrouter_models_from_config()
queue_count = get_queue_count()
return render_template("create_image.html",
sdxl_models=sdxl_models,
flux_models=flux_models,
qwen_models=qwen_models,
openwebui_models=openwebui_models,
openrouter_models=openrouter_models,
topics=load_topics_from_config(),
queue_count=queue_count)
def init_app(config):

View File

@ -0,0 +1,23 @@
from flask import Blueprint, jsonify, send_file
import os
import json
bp = Blueprint("favourites_routes", __name__)
favourites_file = "./favourites.json"
def get_favourites():
if not os.path.exists(favourites_file):
return []
with open(favourites_file, 'r') as f:
return json.load(f)
@bp.route("/favourites", methods=["GET"])
def favourites():
"""
Route to return the favourites.json file
"""
if os.path.exists(favourites_file):
return send_file(favourites_file, mimetype='application/json')
else:
# If the file doesn't exist, return an empty array as JSON
return jsonify([])

View File

@ -11,12 +11,10 @@ def index():
image_filename = "./image.png"
image_path = os.path.join(image_folder, image_filename)
prompt = get_details_from_png(image_path)["p"]
version = get_current_version()
return render_template(
"index.html",
image=image_filename,
prompt=prompt,
reload_interval=user_config["frame"]["reload_interval"],
version=version,
)

View File

@ -1,8 +1,12 @@
from flask import Blueprint
from libs.comfyui import cancel_current_job
from flask import Blueprint, jsonify
from libs.comfyui import cancel_current_job, get_queue_details
bp = Blueprint("job_routes", __name__)
@bp.route("/cancel", methods=["GET"])
def cancel_job():
return cancel_current_job()
@bp.route("/api/queue", methods=["GET"])
def api_queue():
return jsonify(get_queue_details())

View File

@ -12,7 +12,7 @@
<!-- Version number at bottom right -->
<div class="version">
<a href="{{ url_for('settings_route.config_editor') }}">v{{ version }}</a>
<a href="{{ url_for('settings_route.config_editor') }}">{% if version and version != 'unknown' %}v{{ version }}{% else %}v?.?.?{% endif %}</a>
</div>
{% block scripts %}{% endblock %}

View File

@ -33,6 +33,28 @@
align-items: center;
}
.model-selection {
display: flex;
flex-wrap: wrap;
gap: 20px;
justify-content: center;
margin: 20px 0;
width: 100%;
max-width: 800px;
}
.model-group {
display: flex;
flex-direction: column;
align-items: flex-start;
gap: 5px;
}
.model-group label {
font-weight: bold;
color: #ddd;
}
button,
select {
background: #333;
@ -43,6 +65,7 @@
font-size: 16px;
cursor: pointer;
transition: background 0.3s;
min-width: 150px;
}
button:hover,
@ -50,6 +73,7 @@
background: #555;
}
#spinner-overlay {
position: fixed;
inset: 0;
@ -90,6 +114,15 @@
width: 100%;
}
.model-selection {
flex-direction: column;
align-items: stretch;
}
.model-group {
align-items: stretch;
}
button,
select {
width: 100%;
@ -99,44 +132,138 @@
height: 150px;
}
}
.queue-dropdown {
position: absolute;
top: 100%;
right: 0;
background: #222;
border: 1px solid #444;
border-radius: 5px;
padding: 10px;
z-index: 1001;
display: none;
max-height: 300px;
overflow-y: auto;
width: 400px;
}
.queue-item {
margin-bottom: 5px;
padding: 5px;
border-bottom: 1px solid #333;
}
.queue-item:last-child {
border-bottom: none;
}
.queue-item .prompt {
font-size: 0.9em;
color: #aaa;
white-space: normal;
word-wrap: break-word;
position: relative;
cursor: pointer;
}
.queue-item .prompt:hover::after {
content: "Model: " attr(data-model);
position: absolute;
bottom: 100%;
left: 0;
background: #333;
color: #00aaff;
padding: 4px 8px;
border-radius: 4px;
font-size: 0.8em;
white-space: nowrap;
z-index: 1002;
box-shadow: 0 2px 4px rgba(0,0,0,0.3);
}
</style>
{% endblock %}
{% block content %}
<div class="queue-container" style="position: fixed; top: 20px; right: 20px; z-index: 1000;">
<button id="queue-btn" style="background: #333; color: white; border: none; padding: 5px 10px; border-radius: 5px; cursor: pointer;">
Queue: <span id="queue-count">{{ queue_count | default(0) }}</span>
</button>
<div id="queue-dropdown" class="queue-dropdown">
<!-- Queue items will be populated here -->
</div>
</div>
<h1 style="margin-bottom: 20px;">Create An Image</h1>
<textarea id="prompt-box" placeholder="Enter your custom prompt here..."></textarea>
<div class="button-group">
<button onclick="showSpinner(); location.href='/'">Back</button>
<button onclick="sendPrompt()">Send Prompt</button>
<button onclick="randomPrompt()">Random Prompt</button>
</div>
<select id="model-select">
<option value="" selected>Random</option>
<optgroup label="FLUX">
{% for m in models if 'flux' in m|lower %}
<option value="{{ m }}">{{ m.rsplit('.', 1)[0] }}</option>
{% endfor %}
</optgroup>
<optgroup label="SDXL">
{% for m in models if 'flux' not in m|lower %}
<option value="{{ m }}">{{ m.rsplit('.', 1)[0] }}</option>
{% endfor %}
</optgroup>
</select>
<select id="topic-select">
<option value="">No Topic</option>
<option value="random">Random</option>
<optgroup label="Topics">
{% for t in topics %}
<option value="{{ t }}">{{ t }}</option>
{% endfor %}
</optgroup>
</select>
<div class="model-selection">
<div class="model-group">
<label for="model-select">Image Model:</label>
<select id="model-select">
<option value="" selected>Random Image Model</option>
{% if flux_models %}
<optgroup label="FLUX">
{% for m in flux_models %}
<option value="{{ m }}">{{ m.rsplit('.', 1)[0] if '.' in m else m }}</option>
{% endfor %}
</optgroup>
{% endif %}
{% if qwen_models %}
<optgroup label="Qwen">
{% for m in qwen_models %}
<option value="{{ m }}">{{ m.rsplit('.', 1)[0] if '.' in m else m }}</option>
{% endfor %}
</optgroup>
{% endif %}
{% if sdxl_models %}
<optgroup label="SDXL">
{% for m in sdxl_models %}
<option value="{{ m }}">{{ m.rsplit('.', 1)[0] if '.' in m else m }}</option>
{% endfor %}
</optgroup>
{% endif %}
</select>
</div>
<div class="model-group">
<label for="prompt-model-select">Prompt Model:</label>
<select id="prompt-model-select">
<option value="" selected>Random Prompt Model</option>
{% if openwebui_models %}
<optgroup label="OpenWebUI">
{% for m in openwebui_models %}
<option value="openwebui:{{ m }}">{{ m }}</option>
{% endfor %}
</optgroup>
{% endif %}
{% if openrouter_models %}
<optgroup label="OpenRouter">
{% for m in openrouter_models %}
<option value="openrouter:{{ m }}">{{ m }}</option>
{% endfor %}
</optgroup>
{% endif %}
</select>
</div>
<div class="model-group">
<label for="topic-select">Topic:</label>
<select id="topic-select">
<option value="">No Topic</option>
<option value="random">Random</option>
<optgroup label="Topics">
{% for t in topics %}
<option value="{{ t }}">{{ t }}</option>
{% endfor %}
</optgroup>
</select>
</div>
</div>
<div id="spinner-overlay">
@ -154,10 +281,12 @@
showSpinner();
const prompt = document.getElementById('prompt-box').value;
const model = document.getElementById('model-select').value;
const promptModel = document.getElementById('prompt-model-select').value;
const formData = new URLSearchParams();
formData.append('prompt', prompt);
formData.append('model', model);
formData.append('prompt_model', promptModel);
fetch('/create', {
method: 'POST',
@ -176,10 +305,12 @@
function randomPrompt() {
showSpinner();
const model = document.getElementById('model-select').value;
const promptModel = document.getElementById('prompt-model-select').value;
const topic = document.getElementById('topic-select').value;
const formData = new URLSearchParams();
formData.append('model', model);
formData.append('prompt_model', promptModel);
formData.append('topic', topic);
fetch('/create', {
@ -195,5 +326,59 @@
alert("Error requesting random prompt: " + error);
});
}
document.addEventListener('DOMContentLoaded', function() {
const queueBtn = document.getElementById('queue-btn');
const queueDropdown = document.getElementById('queue-dropdown');
const queueCountSpan = document.getElementById('queue-count');
// Toggle dropdown visibility
queueBtn.addEventListener('click', function(e) {
e.stopPropagation();
if (queueDropdown.style.display === 'block') {
queueDropdown.style.display = 'none';
} else {
fetchQueueDetails();
queueDropdown.style.display = 'block';
}
});
// Close dropdown when clicking outside
document.addEventListener('click', function() {
queueDropdown.style.display = 'none';
});
// Prevent dropdown from closing when clicking inside it
queueDropdown.addEventListener('click', function(e) {
e.stopPropagation();
});
function fetchQueueDetails() {
fetch('/api/queue')
.then(response => response.json())
.then(jobs => {
queueCountSpan.textContent = jobs.length;
const container = queueDropdown;
container.innerHTML = '';
if (jobs.length === 0) {
container.innerHTML = '<div class="queue-item">No jobs in queue</div>';
return;
}
jobs.forEach(job => {
const item = document.createElement('div');
item.className = 'queue-item';
item.innerHTML = `
<div class="prompt" data-model="${job.model}">${job.prompt}</div>
`;
container.appendChild(item);
});
})
.catch(error => {
console.error('Error fetching queue:', error);
queueDropdown.innerHTML = '<div class="queue-item">Error loading queue</div>';
});
}
});
</script>
{% endblock %}

View File

@ -24,4 +24,9 @@ models = flux1-dev-Q4_0.gguf,flux1-schnell-Q4_0.gguf
[openwebui]
base_url = https://openwebui
api_key = sk-
models = llama3:latest,cogito:14b,gemma3:12b
models = llama3:latest,cogito:14b,gemma3:12b
[openrouter]
enabled = False
api_key =
models = mistralai/mistral-7b-instruct:free,google/gemma-7b-it:free,meta-llama/llama-3.1-8b-instruct:free

View File

@ -1,12 +1,31 @@
{
"6": {
"inputs": {
"text": "Terminator endoskeleton riding a bmx bike",
"speak_and_recognation": {
"__value__": [
false,
true
]
},
"clip": [
"39",
0
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Positive Prompt)"
}
},
"8": {
"inputs": {
"samples": [
"62",
1
"13",
0
],
"vae": [
"73",
"41",
0
]
},
@ -15,298 +34,157 @@
"title": "VAE Decode"
}
},
"40": {
"9": {
"inputs": {
"int": 20
},
"class_type": "Int Literal (Image Saver)",
"_meta": {
"title": "Generation Steps"
}
},
"41": {
"inputs": {
"width": 720,
"height": 1080,
"aspect_ratio": "custom",
"swap_dimensions": "Off",
"upscale_factor": 2,
"prescale_factor": 1,
"batch_size": 1
},
"class_type": "CR Aspect Ratio",
"_meta": {
"title": "CR Aspect Ratio"
}
},
"42": {
"inputs": {
"filename": "THISFILE",
"path": "",
"extension": "png",
"steps": [
"40",
0
],
"cfg": [
"52",
0
],
"modelname": "flux1-dev-Q4_0.gguf",
"sampler_name": [
"50",
1
],
"scheduler_name": "normal",
"positive": [
"44",
0
],
"negative": [
"45",
0
],
"seed_value": [
"48",
0
],
"width": [
"41",
0
],
"height": [
"41",
1
],
"lossless_webp": true,
"quality_jpeg_or_webp": 100,
"optimize_png": false,
"counter": 0,
"denoise": [
"53",
0
],
"clip_skip": 0,
"time_format": "%Y-%m-%d-%H%M%S",
"save_workflow_as_json": true,
"embed_workflow": true,
"additional_hashes": "",
"download_civitai_data": true,
"easy_remix": true,
"speak_and_recognation": {
"__value__": [
false,
true
]
},
"filename_prefix": "ComfyUI",
"images": [
"8",
"42",
0
]
},
"class_type": "Image Saver",
"class_type": "SaveImage",
"_meta": {
"title": "CivitAI Image Saver"
"title": "Save Image"
}
},
"44": {
"inputs": {
"text": "Yautja Predator wielding flamethrower in smoky, cyberpunk alleyway darkness",
"speak_and_recognation": {
"__value__": [
false,
true
]
}
},
"class_type": "ttN text",
"_meta": {
"title": "Positive Prompt T5"
}
},
"45": {
"inputs": {
"text": "text, watermark, deformed Avoid flat colors, poor lighting, and artificial elements. No unrealistic elements, low resolution, or flat colors. Avoid generic objects, poor lighting, and inconsistent styles, blurry, low-quality, distorted faces, overexposed lighting, extra limbs, bad anatomy, low contrast",
"speak_and_recognation": {
"__value__": [
false,
true
]
}
},
"class_type": "ttN text",
"_meta": {
"title": "Negative Prompt"
}
},
"47": {
"inputs": {
"text": [
"44",
0
],
"speak_and_recognation": {
"__value__": [
false,
true
]
},
"clip": [
"72",
0
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "Prompt Encoder"
}
},
"48": {
"inputs": {
"seed": 47371998700984,
"increment": 1
},
"class_type": "Seed Generator (Image Saver)",
"_meta": {
"title": "Seed"
}
},
"49": {
"inputs": {
"scheduler": "beta"
},
"class_type": "Scheduler Selector (Comfy) (Image Saver)",
"_meta": {
"title": "Scheduler"
}
},
"50": {
"inputs": {
"sampler_name": "euler"
},
"class_type": "Sampler Selector (Image Saver)",
"_meta": {
"title": "Sampler"
}
},
"52": {
"inputs": {
"float": 3.500000000000001
},
"class_type": "Float Literal (Image Saver)",
"_meta": {
"title": "CFG Scale"
}
},
"53": {
"inputs": {
"float": 1.0000000000000002
},
"class_type": "Float Literal (Image Saver)",
"_meta": {
"title": "Denoise"
}
},
"62": {
"13": {
"inputs": {
"noise": [
"65",
"25",
0
],
"guider": [
"67",
"22",
0
],
"sampler": [
"63",
"16",
0
],
"sigmas": [
"64",
"17",
0
],
"latent_image": [
"41",
5
"27",
0
]
},
"class_type": "SamplerCustomAdvanced",
"_meta": {
"title": "Custom Sampler"
"title": "SamplerCustomAdvanced"
}
},
"63": {
"16": {
"inputs": {
"sampler_name": [
"50",
0
]
"sampler_name": "euler"
},
"class_type": "KSamplerSelect",
"_meta": {
"title": "KSampler Select"
"title": "KSamplerSelect"
}
},
"64": {
"17": {
"inputs": {
"scheduler": [
"49",
0
],
"steps": [
"40",
0
],
"denoise": [
"53",
0
],
"scheduler": "simple",
"steps": 20,
"denoise": 1,
"model": [
"35",
"30",
0
]
},
"class_type": "BasicScheduler",
"_meta": {
"title": "Sigma Generator"
"title": "BasicScheduler"
}
},
"65": {
"inputs": {
"noise_seed": [
"48",
0
]
},
"class_type": "RandomNoise",
"_meta": {
"title": "Noise Generator"
}
},
"67": {
"22": {
"inputs": {
"model": [
"35",
"30",
0
],
"conditioning": [
"47",
"26",
0
]
},
"class_type": "BasicGuider",
"_meta": {
"title": "Prompt Guider"
"title": "BasicGuider"
}
},
"72": {
"25": {
"inputs": {
"noise_seed": 707623342760804
},
"class_type": "RandomNoise",
"_meta": {
"title": "RandomNoise"
}
},
"26": {
"inputs": {
"guidance": 3.5,
"conditioning": [
"6",
0
]
},
"class_type": "FluxGuidance",
"_meta": {
"title": "FluxGuidance"
}
},
"27": {
"inputs": {
"width": 720,
"height": 1088,
"batch_size": 1
},
"class_type": "EmptySD3LatentImage",
"_meta": {
"title": "CR Aspect Ratio"
}
},
"30": {
"inputs": {
"max_shift": 1.15,
"base_shift": 0.5,
"width": 720,
"height": 1088,
"model": [
"38",
0
]
},
"class_type": "ModelSamplingFlux",
"_meta": {
"title": "ModelSamplingFlux"
}
},
"38": {
"inputs": {
"unet_name": "flux1-dev-Q4_0.gguf",
"device": "cuda:1",
"virtual_vram_gb": 0,
"use_other_vram": true,
"expert_mode_allocations": ""
},
"class_type": "UnetLoaderGGUFDisTorchMultiGPU",
"_meta": {
"title": "UnetLoaderGGUFDisTorchMultiGPU"
}
},
"39": {
"inputs": {
"clip_name1": "t5-v1_1-xxl-encoder-Q4_K_M.gguf",
"clip_name2": "clip_l.safetensors",
"type": "flux",
"device": "cuda:0",
"virtual_vram_gb": 0,
"use_other_vram": false,
"use_other_vram": true,
"expert_mode_allocations": ""
},
"class_type": "DualCLIPLoaderGGUFDisTorchMultiGPU",
@ -314,7 +192,7 @@
"title": "DualCLIPLoaderGGUFDisTorchMultiGPU"
}
},
"73": {
"41": {
"inputs": {
"vae_name": "FLUX1/ae.safetensors",
"device": "cuda:0"
@ -324,20 +202,18 @@
"title": "VAELoaderMultiGPU"
}
},
"35": {
"42": {
"inputs": {
"unet_name": "flux1-dev-Q4_0.gguf",
"dequant_dtype": "default",
"patch_dtype": "default",
"patch_on_device": false,
"device": "cuda:1",
"virtual_vram_gb": 0,
"use_other_vram": false,
"expert_mode_allocations": ""
"offload_model": true,
"offload_cache": true,
"anything": [
"8",
0
]
},
"class_type": "UnetLoaderGGUFAdvancedDisTorchMultiGPU",
"class_type": "VRAMCleanup",
"_meta": {
"title": "UnetLoaderGGUFAdvancedDisTorchMultiGPU"
"title": "🎈VRAM-Cleanup"
}
}
}

161
workflow_qwen.json Normal file
View File

@ -0,0 +1,161 @@
{
"93": {
"inputs": {
"text": "jpeg compression",
"speak_and_recognation": {
"__value__": [
false,
true
]
},
"clip": [
"126",
0
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Prompt)"
}
},
"95": {
"inputs": {
"seed": 22,
"steps": 10,
"cfg": 4.5,
"sampler_name": "euler",
"scheduler": "normal",
"denoise": 1,
"model": [
"127",
0
],
"positive": [
"100",
0
],
"negative": [
"93",
0
],
"latent_image": [
"97",
0
]
},
"class_type": "KSampler",
"_meta": {
"title": "KSampler"
}
},
"97": {
"inputs": {
"width": 1280,
"height": 768,
"length": 1,
"batch_size": 1
},
"class_type": "EmptyHunyuanLatentVideo",
"_meta": {
"title": "EmptyHunyuanLatentVideo"
}
},
"98": {
"inputs": {
"samples": [
"95",
0
],
"vae": [
"128",
0
]
},
"class_type": "VAEDecode",
"_meta": {
"title": "VAE Decode"
}
},
"100": {
"inputs": {
"text": "Terminator riding a push bike",
"speak_and_recognation": {
"__value__": [
false,
true
]
},
"clip": [
"126",
0
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Prompt)"
}
},
"102": {
"inputs": {
"images": [
"129",
0
]
},
"class_type": "PreviewImage",
"_meta": {
"title": "Preview Image"
}
},
"126": {
"inputs": {
"clip_name": "Qwen2.5-VL-7B-Instruct-Q3_K_M.gguf",
"type": "qwen_image",
"device": "cuda:1",
"virtual_vram_gb": 6,
"use_other_vram": true,
"expert_mode_allocations": ""
},
"class_type": "CLIPLoaderGGUFDisTorchMultiGPU",
"_meta": {
"title": "CLIPLoaderGGUFDisTorchMultiGPU"
}
},
"127": {
"inputs": {
"unet_name": "qwen-image-Q2_K.gguf",
"device": "cuda:0",
"virtual_vram_gb": 6,
"use_other_vram": true,
"expert_mode_allocations": ""
},
"class_type": "UnetLoaderGGUFDisTorchMultiGPU",
"_meta": {
"title": "UnetLoaderGGUFDisTorchMultiGPU"
}
},
"128": {
"inputs": {
"vae_name": "qwen_image_vae.safetensors",
"device": "cuda:1"
},
"class_type": "VAELoaderMultiGPU",
"_meta": {
"title": "VAELoaderMultiGPU"
}
},
"129": {
"inputs": {
"offload_model": true,
"offload_cache": true,
"anything": [
"98",
0
]
},
"class_type": "VRAMCleanup",
"_meta": {
"title": "🎈VRAM-Cleanup"
}
}
}

View File

@ -52,6 +52,12 @@
"6": {
"inputs": {
"text": "A bustling cyberpunk street at night, filled with neon signs, rain-soaked pavement, and futuristic street vendors. High detail, vivid neon colors, and realistic reflections.",
"speak_and_recognation": {
"__value__": [
false,
true
]
},
"clip": [
"4",
1
@ -65,6 +71,12 @@
"7": {
"inputs": {
"text": "text, watermark, deformed Avoid flat colors, poor lighting, and artificial elements. No unrealistic elements, low resolution, or flat colors. Avoid generic objects, poor lighting, and inconsistent styles, blurry, low-quality, distorted faces, overexposed lighting, extra limbs, bad anatomy, low contrast",
"speak_and_recognation": {
"__value__": [
false,
true
]
},
"clip": [
"4",
1
@ -95,7 +107,7 @@
"inputs": {
"filename_prefix": "ComfyUI",
"images": [
"8",
"10",
0
]
},
@ -103,5 +115,19 @@
"_meta": {
"title": "Save Image"
}
},
"10": {
"inputs": {
"offload_model": true,
"offload_cache": true,
"anything": [
"8",
0
]
},
"class_type": "VRAMCleanup",
"_meta": {
"title": "🎈VRAM-Cleanup"
}
}
}