Compare commits

...

19 Commits
0.3.4 ... main

Author SHA1 Message Date
086695d898 Bump version: 0.3.11 → 0.3.12 2025-09-04 08:58:28 +01:00
a63668cc93 No markdown formatting, no extra text.
fix(openrouter): handle rate limit errors with OpenWebUI fallback

When OpenRouter returns a 429 error due to rate limiting, the application now attempts to fall back to using an OpenWebUI model instead of returning a default prompt. This provides better resilience when external API limits are exceeded while maintaining functionality through local models.

The changes include:
- Adding RateLimitError import from openai
- Implementing fallback logic in create_prompt_on_openrouter function
- Using OpenWebUI as secondary source for prompts when rate limiting occurs
- Proper error handling and logging for both primary and fallback scenarios

This change improves the robustness of prompt generation by ensuring that users receive generated content even when external services are temporarily unavailable due to rate limits. The fallback mechanism prioritizes configured local models if available, with a final default prompt as backup.

The implementation follows the existing pattern of using random selection from configured OpenWebUI models and includes comprehensive error handling for both primary and fallback operations. This ensures that all failure modes are gracefully handled while maintaining backward compatibility.
2025-09-04 08:58:24 +01:00
06d3a64bb9 Bump version: 0.3.10 → 0.3.11 2025-09-02 13:41:43 +01:00
d7c25373bd clear vram on comfyui 2025-09-02 12:19:38 +01:00
006c88b084 Bump version: 0.3.9 → 0.3.10 2025-09-01 13:22:20 +01:00
e7df200f8c add new venv to gitignore 2025-09-01 13:22:17 +01:00
506dece377 **refactor(comfyui.py): improve model selection logic**
Refactor `get_available_models` to handle multiple models and improve error handling. Adjust `select_model` to support configurable workflows and fallbacks.
2025-09-01 13:19:28 +01:00
12af531718 Bump version: 0.3.8 → 0.3.9 2025-08-13 09:35:48 +01:00
efefdde70d fix flux 2025-08-13 09:35:42 +01:00
918e37e077 Bump version: 0.3.7 → 0.3.8 2025-08-12 15:21:26 +01:00
f5427d18ed revert blocking image gen 2025-08-12 15:21:23 +01:00
34f8a05035 Bump version: 0.3.6 → 0.3.7 2025-08-12 15:17:34 +01:00
82f29a4fde block generation if image in queue 2025-08-12 15:17:02 +01:00
ad814855ab Bump version: 0.3.5 → 0.3.6 2025-08-12 15:03:57 +01:00
1b75417360 update the queue 2025-08-12 15:03:50 +01:00
9f3cbf736a Bump version: 0.3.4 → 0.3.5 2025-08-12 14:45:40 +01:00
3e46b3363b working queue logic 2025-08-12 14:15:23 +01:00
ff5dfbcbce show queue count 2025-08-12 13:02:38 +01:00
14e69f7608 initial qwen support 2025-08-12 12:08:12 +01:00
12 changed files with 612 additions and 276 deletions

View File

@ -1,5 +1,5 @@
[tool.bumpversion]
current_version = "0.3.4"
current_version = "0.3.12"
parse = "(?P<major>\\d+)\\.(?P<minor>\\d+)\\.(?P<patch>\\d+)"
serialize = ["{major}.{minor}.{patch}"]
replace = "{new_version}"

1
.gitignore vendored
View File

@ -12,3 +12,4 @@ test.py
.vscode/launch.json
favourites.json
.vscode/launch.json
venv/*

View File

@ -4,7 +4,7 @@ FROM python:3.11-slim
# Set the working directory in the container
WORKDIR /app
# Set version label
ARG VERSION="0.3.4"
ARG VERSION="0.3.12"
LABEL version=$VERSION
# Copy project files into the container

View File

@ -32,9 +32,17 @@ def get_available_models() -> list:
response = requests.get(url)
if response.status_code == 200:
data = response.json()
general = data.get("CheckpointLoaderSimple", {}).get("input", {}).get("required", {}).get("ckpt_name", [])[0]
flux = data.get("UnetLoaderGGUF", {}).get("input", {}).get("required", {}).get("unet_name", [])[0]
return general + flux
# Get SDXL models from CheckpointLoaderSimple
general = data.get("CheckpointLoaderSimple", {}).get("input", {}).get("required", {}).get("ckpt_name", [[]])[0]
# Get FLUX models from UnetLoaderGGUF
flux = data.get("UnetLoaderGGUF", {}).get("input", {}).get("required", {}).get("unet_name", [[]])[0]
# Combine both lists, handling cases where one might be missing
all_models = []
if isinstance(general, list):
all_models.extend(general)
if isinstance(flux, list):
all_models.extend(flux)
return all_models
else:
print(f"Failed to fetch models: {response.status_code}")
return []
@ -122,20 +130,46 @@ def generate_image(
def select_model(model: str) -> tuple[str, str]:
use_flux = json.loads(user_config["comfyui"].get("FLUX", "false").lower())
only_flux = json.loads(user_config["comfyui"].get("ONLY_FLUX", "false").lower())
use_qwen = json.loads(user_config["comfyui"].get("Qwen", "false").lower())
if model == "Random Image Model":
selected_workflow = "FLUX" if (use_flux and (only_flux or random.choice([True, False]))) else "SDXL"
# Create a list of available workflows based on configuration
available_workflows = []
if not only_flux:
available_workflows.append("SDXL")
if use_flux:
available_workflows.append("FLUX")
if use_qwen:
available_workflows.append("Qwen")
# If no workflows are available, default to SDXL
if not available_workflows:
available_workflows.append("SDXL")
# Randomly select a workflow
selected_workflow = random.choice(available_workflows)
elif "flux" in model.lower():
selected_workflow = "FLUX"
elif "qwen" in model.lower():
selected_workflow = "Qwen"
else:
selected_workflow = "SDXL"
if model == "Random Image Model":
if selected_workflow == "FLUX":
valid_models = user_config["comfyui:flux"]["models"].split(",")
elif selected_workflow == "Qwen":
valid_models = user_config["comfyui:qwen"]["models"].split(",")
else: # SDXL
available_model_list = user_config["comfyui"]["models"].split(",")
valid_models = list(set(get_available_models()) & set(available_model_list))
# If no valid models found, fall back to configured models
if not valid_models:
valid_models = available_model_list
# Ensure we have at least one model to choose from
if not valid_models:
# Fallback to a default model
valid_models = ["zavychromaxl_v100.safetensors"]
model = random.choice(valid_models)
return selected_workflow, model
@ -164,16 +198,95 @@ def create_image(prompt: str | None = None, model: str = "Random Image Model") -
file_name="image",
comfy_prompt=prompt,
workflow_path="./workflow_flux.json",
prompt_node="Positive Prompt T5",
seed_node="Seed",
seed_param="seed",
save_node="CivitAI Image Saver",
save_param="filename",
model_node="UnetLoaderGGUFAdvancedDisTorchMultiGPU",
prompt_node="CLIP Text Encode (Positive Prompt)",
seed_node="RandomNoise",
seed_param="noise_seed",
save_node="Save Image",
save_param="filename_prefix",
model_node="UnetLoaderGGUFDisTorchMultiGPU",
model_param="unet_name",
model=model
)
elif selected_workflow == "Qwen":
generate_image(
file_name="image",
comfy_prompt=prompt,
workflow_path="./workflow_qwen.json",
prompt_node="Positive",
seed_node="KSampler",
seed_param="seed",
save_node="Save Image",
save_param="filename_prefix",
model_node="Load Checkpoint",
model_param="ckpt_name",
model=model
)
else: # SDXL
generate_image("image", comfy_prompt=prompt, model=model)
logging.info(f"{selected_workflow} generation started with prompt: {prompt}")
def get_queue_count() -> int:
"""Fetches the current queue count from ComfyUI (pending + running jobs)."""
url = user_config["comfyui"]["comfyui_url"] + "/queue"
try:
response = requests.get(url)
response.raise_for_status()
data = response.json()
pending = len(data.get("queue_pending", []))
running = len(data.get("queue_running", []))
return pending + running
except Exception as e:
logging.error(f"Error fetching queue count: {e}")
return 0
def get_queue_details() -> list:
"""Fetches detailed queue information including model names and prompts."""
url = user_config["comfyui"]["comfyui_url"] + "/queue"
try:
response = requests.get(url)
response.raise_for_status()
data = response.json()
jobs = []
for job_list in [data.get("queue_running", []), data.get("queue_pending", [])]:
for job in job_list:
# Extract prompt data (format: [priority, time, prompt])
prompt_data = job[2]
model = "Unknown"
prompt = "No prompt"
# Find model loader node (works for SDXL/FLUX/Qwen workflows)
for node in prompt_data.values():
if node.get("class_type") in ["CheckpointLoaderSimple", "UnetLoaderGGUFAdvancedDisTorchMultiGPU"]:
model = node["inputs"].get("ckpt_name", "Unknown")
break
# Find prompt node using class_type pattern and title matching
for node in prompt_data.values():
class_type = node.get("class_type", "")
if "CLIPTextEncode" in class_type and "text" in node["inputs"]:
meta = node.get('_meta', {})
title = meta.get('title', '').lower()
if 'positive' in title or 'prompt' in title:
prompt = node["inputs"]["text"]
break
jobs.append({
"id": job[0],
"model": model.split(".")[0] if model != "Unknown" else model,
"prompt": prompt
})
return jobs
except Exception as e:
logging.error(f"Error fetching queue details: {e}")
return []
try:
response = requests.get(url)
response.raise_for_status()
data = response.json()
pending = len(data.get("queue_pending", []))
running = len(data.get("queue_running", []))
return pending + running
except Exception as e:
logging.error(f"Error fetching queue count: {e}")
return 0

View File

@ -84,8 +84,8 @@ def get_details_from_png(path):
try:
# Flux workflow
data = json.loads(img.info["prompt"])
prompt = data['44']['inputs']['text']
model = data['35']['inputs']['unet_name'].split(".")[0]
prompt = data['6']['inputs']['text']
model = data['38']['inputs']['unet_name'].split(".")[0]
except KeyError:
# SDXL workflow
data = json.loads(img.info["prompt"])
@ -113,11 +113,28 @@ def get_current_version():
return "unknown"
def load_models_from_config():
flux_models = load_config()["comfyui:flux"]["models"].split(",")
sdxl_models = load_config()["comfyui"]["models"].split(",")
config = load_config()
# Only load FLUX models if FLUX feature is enabled
use_flux = config["comfyui"].get("flux", "False").lower() == "true"
if use_flux and "comfyui:flux" in config and "models" in config["comfyui:flux"]:
flux_models = config["comfyui:flux"]["models"].split(",")
else:
flux_models = []
sdxl_models = config["comfyui"]["models"].split(",")
# Only load Qwen models if Qwen feature is enabled
use_qwen = config["comfyui"].get("qwen", "False").lower() == "true"
if use_qwen and "comfyui:qwen" in config and "models" in config["comfyui:qwen"]:
qwen_models = config["comfyui:qwen"]["models"].split(",")
else:
qwen_models = []
sorted_flux_models = sorted(flux_models, key=str.lower)
sorted_sdxl_models = sorted(sdxl_models, key=str.lower)
return sorted_sdxl_models, sorted_flux_models
sorted_qwen_models = sorted(qwen_models, key=str.lower)
return sorted_sdxl_models, sorted_flux_models, sorted_qwen_models
def load_topics_from_config():
@ -223,7 +240,6 @@ def create_prompt_with_random_model(base_prompt: str, topic: str = "random"):
logging.error(f"Error with OpenRouter: {e}")
return "A colorful abstract composition" # Default fallback prompt
return "A colorful abstract composition" # Default fallback prompt
user_config = load_config()
output_folder = user_config["comfyui"]["output_dir"]

View File

@ -1,8 +1,9 @@
import random
import logging
from openai import OpenAI
from openai import OpenAI, RateLimitError
import nest_asyncio
from libs.generic import load_recent_prompts, load_config
from libs.openwebui import create_prompt_on_openwebui
import re
nest_asyncio.apply()
@ -90,6 +91,20 @@ def create_prompt_on_openrouter(prompt: str, topic: str = "random", model: str =
prompt = match.group(1)
logging.debug(prompt)
return prompt
except RateLimitError as e:
logging.warning(f"OpenRouter rate limit exceeded (429): {e}. Falling back to local OpenWebUI model.")
# Try to use OpenWebUI as fallback
openwebui_models = [m.strip() for m in user_config["openwebui"]["models"].split(",") if m.strip()] if "openwebui" in user_config and "models" in user_config["openwebui"] else []
if openwebui_models:
selected_model = random.choice(openwebui_models)
try:
return create_prompt_on_openwebui(user_content, topic, selected_model)
except Exception as e2:
logging.error(f"OpenWebUI fallback also failed: {e2}")
return "A colorful abstract composition" # Final fallback
else:
logging.error("No OpenWebUI models configured for fallback.")
return "A colorful abstract composition" # Final fallback
except Exception as e:
logging.error(f"Error generating prompt with OpenRouter: {e}")
return ""

View File

@ -1,6 +1,6 @@
from flask import Blueprint, request, render_template, redirect, url_for, session
import threading
from libs.comfyui import create_image, select_model, get_available_models
from libs.comfyui import create_image, select_model, get_available_models, get_queue_count
from libs.openwebui import create_prompt_on_openwebui
from libs.generic import load_models_from_config, load_topics_from_config, load_openrouter_models_from_config, load_openwebui_models_from_config, create_prompt_with_random_model
import os
@ -35,17 +35,20 @@ def create():
threading.Thread(target=lambda: create_image(prompt, model)).start()
return redirect(url_for("create_routes.image_queued", prompt=prompt, model=model.split(".")[0]))
# Load all models (SDXL and FLUX only)
sdxl_models, flux_models = load_models_from_config()
# Load all models (SDXL, FLUX, and Qwen)
sdxl_models, flux_models, qwen_models = load_models_from_config()
openwebui_models = load_openwebui_models_from_config()
openrouter_models = load_openrouter_models_from_config()
queue_count = get_queue_count()
return render_template("create_image.html",
sdxl_models=sdxl_models,
sdxx_models=sdxl_models,
flux_models=flux_models,
qwen_models=qwen_models,
openwebui_models=openwebui_models,
openrouter_models=openrouter_models,
topics=load_topics_from_config())
topics=load_topics_from_config(),
queue_count=queue_count)
@bp.route("/image_queued")
def image_queued():
@ -62,17 +65,20 @@ def create_image_page():
if user_config["frame"]["create_requires_auth"] == "True" and not session.get("authenticated"):
return redirect(url_for("auth_routes.login", next=request.path))
# Load all models (SDXL and FLUX only)
sdxl_models, flux_models = load_models_from_config()
# Load all models (SDXL, FLUX, and Qwen)
sdxl_models, flux_models, qwen_models = load_models_from_config()
openwebui_models = load_openwebui_models_from_config()
openrouter_models = load_openrouter_models_from_config()
queue_count = get_queue_count()
return render_template("create_image.html",
sdxl_models=sdxl_models,
flux_models=flux_models,
qwen_models=qwen_models,
openwebui_models=openwebui_models,
openrouter_models=openrouter_models,
topics=load_topics_from_config())
topics=load_topics_from_config(),
queue_count=queue_count)
def init_app(config):

View File

@ -1,8 +1,12 @@
from flask import Blueprint
from libs.comfyui import cancel_current_job
from flask import Blueprint, jsonify
from libs.comfyui import cancel_current_job, get_queue_details
bp = Blueprint("job_routes", __name__)
@bp.route("/cancel", methods=["GET"])
def cancel_job():
return cancel_current_job()
@bp.route("/api/queue", methods=["GET"])
def api_queue():
return jsonify(get_queue_details())

View File

@ -73,6 +73,7 @@
background: #555;
}
#spinner-overlay {
position: fixed;
inset: 0;
@ -131,10 +132,66 @@
height: 150px;
}
}
.queue-dropdown {
position: absolute;
top: 100%;
right: 0;
background: #222;
border: 1px solid #444;
border-radius: 5px;
padding: 10px;
z-index: 1001;
display: none;
max-height: 300px;
overflow-y: auto;
width: 400px;
}
.queue-item {
margin-bottom: 5px;
padding: 5px;
border-bottom: 1px solid #333;
}
.queue-item:last-child {
border-bottom: none;
}
.queue-item .prompt {
font-size: 0.9em;
color: #aaa;
white-space: normal;
word-wrap: break-word;
position: relative;
cursor: pointer;
}
.queue-item .prompt:hover::after {
content: "Model: " attr(data-model);
position: absolute;
bottom: 100%;
left: 0;
background: #333;
color: #00aaff;
padding: 4px 8px;
border-radius: 4px;
font-size: 0.8em;
white-space: nowrap;
z-index: 1002;
box-shadow: 0 2px 4px rgba(0,0,0,0.3);
}
</style>
{% endblock %}
{% block content %}
<div class="queue-container" style="position: fixed; top: 20px; right: 20px; z-index: 1000;">
<button id="queue-btn" style="background: #333; color: white; border: none; padding: 5px 10px; border-radius: 5px; cursor: pointer;">
Queue: <span id="queue-count">{{ queue_count | default(0) }}</span>
</button>
<div id="queue-dropdown" class="queue-dropdown">
<!-- Queue items will be populated here -->
</div>
</div>
<h1 style="margin-bottom: 20px;">Create An Image</h1>
<textarea id="prompt-box" placeholder="Enter your custom prompt here..."></textarea>
@ -157,6 +214,13 @@
{% endfor %}
</optgroup>
{% endif %}
{% if qwen_models %}
<optgroup label="Qwen">
{% for m in qwen_models %}
<option value="{{ m }}">{{ m.rsplit('.', 1)[0] if '.' in m else m }}</option>
{% endfor %}
</optgroup>
{% endif %}
{% if sdxl_models %}
<optgroup label="SDXL">
{% for m in sdxl_models %}
@ -262,5 +326,59 @@
alert("Error requesting random prompt: " + error);
});
}
document.addEventListener('DOMContentLoaded', function() {
const queueBtn = document.getElementById('queue-btn');
const queueDropdown = document.getElementById('queue-dropdown');
const queueCountSpan = document.getElementById('queue-count');
// Toggle dropdown visibility
queueBtn.addEventListener('click', function(e) {
e.stopPropagation();
if (queueDropdown.style.display === 'block') {
queueDropdown.style.display = 'none';
} else {
fetchQueueDetails();
queueDropdown.style.display = 'block';
}
});
// Close dropdown when clicking outside
document.addEventListener('click', function() {
queueDropdown.style.display = 'none';
});
// Prevent dropdown from closing when clicking inside it
queueDropdown.addEventListener('click', function(e) {
e.stopPropagation();
});
function fetchQueueDetails() {
fetch('/api/queue')
.then(response => response.json())
.then(jobs => {
queueCountSpan.textContent = jobs.length;
const container = queueDropdown;
container.innerHTML = '';
if (jobs.length === 0) {
container.innerHTML = '<div class="queue-item">No jobs in queue</div>';
return;
}
jobs.forEach(job => {
const item = document.createElement('div');
item.className = 'queue-item';
item.innerHTML = `
<div class="prompt" data-model="${job.model}">${job.prompt}</div>
`;
container.appendChild(item);
});
})
.catch(error => {
console.error('Error fetching queue:', error);
queueDropdown.innerHTML = '<div class="queue-item">Error loading queue</div>';
});
}
});
</script>
{% endblock %}

View File

@ -1,12 +1,31 @@
{
"6": {
"inputs": {
"text": "Terminator endoskeleton riding a bmx bike",
"speak_and_recognation": {
"__value__": [
false,
true
]
},
"clip": [
"39",
0
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Positive Prompt)"
}
},
"8": {
"inputs": {
"samples": [
"62",
1
"13",
0
],
"vae": [
"73",
"41",
0
]
},
@ -15,298 +34,157 @@
"title": "VAE Decode"
}
},
"40": {
"9": {
"inputs": {
"int": 20
},
"class_type": "Int Literal (Image Saver)",
"_meta": {
"title": "Generation Steps"
}
},
"41": {
"inputs": {
"width": 720,
"height": 1080,
"aspect_ratio": "custom",
"swap_dimensions": "Off",
"upscale_factor": 2,
"prescale_factor": 1,
"batch_size": 1
},
"class_type": "CR Aspect Ratio",
"_meta": {
"title": "CR Aspect Ratio"
}
},
"42": {
"inputs": {
"filename": "THISFILE",
"path": "",
"extension": "png",
"steps": [
"40",
0
],
"cfg": [
"52",
0
],
"modelname": "flux1-dev-Q4_0.gguf",
"sampler_name": [
"50",
1
],
"scheduler_name": "normal",
"positive": [
"44",
0
],
"negative": [
"45",
0
],
"seed_value": [
"48",
0
],
"width": [
"41",
0
],
"height": [
"41",
1
],
"lossless_webp": true,
"quality_jpeg_or_webp": 100,
"optimize_png": false,
"counter": 0,
"denoise": [
"53",
0
],
"clip_skip": 0,
"time_format": "%Y-%m-%d-%H%M%S",
"save_workflow_as_json": true,
"embed_workflow": true,
"additional_hashes": "",
"download_civitai_data": true,
"easy_remix": true,
"speak_and_recognation": {
"__value__": [
false,
true
]
},
"filename_prefix": "ComfyUI",
"images": [
"8",
"42",
0
]
},
"class_type": "Image Saver",
"class_type": "SaveImage",
"_meta": {
"title": "CivitAI Image Saver"
"title": "Save Image"
}
},
"44": {
"inputs": {
"text": "Yautja Predator wielding flamethrower in smoky, cyberpunk alleyway darkness",
"speak_and_recognation": {
"__value__": [
false,
true
]
}
},
"class_type": "ttN text",
"_meta": {
"title": "Positive Prompt T5"
}
},
"45": {
"inputs": {
"text": "text, watermark, deformed Avoid flat colors, poor lighting, and artificial elements. No unrealistic elements, low resolution, or flat colors. Avoid generic objects, poor lighting, and inconsistent styles, blurry, low-quality, distorted faces, overexposed lighting, extra limbs, bad anatomy, low contrast",
"speak_and_recognation": {
"__value__": [
false,
true
]
}
},
"class_type": "ttN text",
"_meta": {
"title": "Negative Prompt"
}
},
"47": {
"inputs": {
"text": [
"44",
0
],
"speak_and_recognation": {
"__value__": [
false,
true
]
},
"clip": [
"72",
0
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "Prompt Encoder"
}
},
"48": {
"inputs": {
"seed": 47371998700984,
"increment": 1
},
"class_type": "Seed Generator (Image Saver)",
"_meta": {
"title": "Seed"
}
},
"49": {
"inputs": {
"scheduler": "beta"
},
"class_type": "Scheduler Selector (Comfy) (Image Saver)",
"_meta": {
"title": "Scheduler"
}
},
"50": {
"inputs": {
"sampler_name": "euler"
},
"class_type": "Sampler Selector (Image Saver)",
"_meta": {
"title": "Sampler"
}
},
"52": {
"inputs": {
"float": 3.500000000000001
},
"class_type": "Float Literal (Image Saver)",
"_meta": {
"title": "CFG Scale"
}
},
"53": {
"inputs": {
"float": 1.0000000000000002
},
"class_type": "Float Literal (Image Saver)",
"_meta": {
"title": "Denoise"
}
},
"62": {
"13": {
"inputs": {
"noise": [
"65",
"25",
0
],
"guider": [
"67",
"22",
0
],
"sampler": [
"63",
"16",
0
],
"sigmas": [
"64",
"17",
0
],
"latent_image": [
"41",
5
"27",
0
]
},
"class_type": "SamplerCustomAdvanced",
"_meta": {
"title": "Custom Sampler"
"title": "SamplerCustomAdvanced"
}
},
"63": {
"16": {
"inputs": {
"sampler_name": [
"50",
0
]
"sampler_name": "euler"
},
"class_type": "KSamplerSelect",
"_meta": {
"title": "KSampler Select"
"title": "KSamplerSelect"
}
},
"64": {
"17": {
"inputs": {
"scheduler": [
"49",
0
],
"steps": [
"40",
0
],
"denoise": [
"53",
0
],
"scheduler": "simple",
"steps": 20,
"denoise": 1,
"model": [
"35",
"30",
0
]
},
"class_type": "BasicScheduler",
"_meta": {
"title": "Sigma Generator"
"title": "BasicScheduler"
}
},
"65": {
"inputs": {
"noise_seed": [
"48",
0
]
},
"class_type": "RandomNoise",
"_meta": {
"title": "Noise Generator"
}
},
"67": {
"22": {
"inputs": {
"model": [
"35",
"30",
0
],
"conditioning": [
"47",
"26",
0
]
},
"class_type": "BasicGuider",
"_meta": {
"title": "Prompt Guider"
"title": "BasicGuider"
}
},
"72": {
"25": {
"inputs": {
"noise_seed": 707623342760804
},
"class_type": "RandomNoise",
"_meta": {
"title": "RandomNoise"
}
},
"26": {
"inputs": {
"guidance": 3.5,
"conditioning": [
"6",
0
]
},
"class_type": "FluxGuidance",
"_meta": {
"title": "FluxGuidance"
}
},
"27": {
"inputs": {
"width": 720,
"height": 1088,
"batch_size": 1
},
"class_type": "EmptySD3LatentImage",
"_meta": {
"title": "CR Aspect Ratio"
}
},
"30": {
"inputs": {
"max_shift": 1.15,
"base_shift": 0.5,
"width": 720,
"height": 1088,
"model": [
"38",
0
]
},
"class_type": "ModelSamplingFlux",
"_meta": {
"title": "ModelSamplingFlux"
}
},
"38": {
"inputs": {
"unet_name": "flux1-dev-Q4_0.gguf",
"device": "cuda:1",
"virtual_vram_gb": 0,
"use_other_vram": true,
"expert_mode_allocations": ""
},
"class_type": "UnetLoaderGGUFDisTorchMultiGPU",
"_meta": {
"title": "UnetLoaderGGUFDisTorchMultiGPU"
}
},
"39": {
"inputs": {
"clip_name1": "t5-v1_1-xxl-encoder-Q4_K_M.gguf",
"clip_name2": "clip_l.safetensors",
"type": "flux",
"device": "cuda:0",
"virtual_vram_gb": 0,
"use_other_vram": false,
"use_other_vram": true,
"expert_mode_allocations": ""
},
"class_type": "DualCLIPLoaderGGUFDisTorchMultiGPU",
@ -314,7 +192,7 @@
"title": "DualCLIPLoaderGGUFDisTorchMultiGPU"
}
},
"73": {
"41": {
"inputs": {
"vae_name": "FLUX1/ae.safetensors",
"device": "cuda:0"
@ -324,20 +202,18 @@
"title": "VAELoaderMultiGPU"
}
},
"35": {
"42": {
"inputs": {
"unet_name": "flux1-dev-Q4_0.gguf",
"dequant_dtype": "default",
"patch_dtype": "default",
"patch_on_device": false,
"device": "cuda:1",
"virtual_vram_gb": 0,
"use_other_vram": false,
"expert_mode_allocations": ""
"offload_model": true,
"offload_cache": true,
"anything": [
"8",
0
]
},
"class_type": "UnetLoaderGGUFAdvancedDisTorchMultiGPU",
"class_type": "VRAMCleanup",
"_meta": {
"title": "UnetLoaderGGUFAdvancedDisTorchMultiGPU"
"title": "🎈VRAM-Cleanup"
}
}
}

161
workflow_qwen.json Normal file
View File

@ -0,0 +1,161 @@
{
"93": {
"inputs": {
"text": "jpeg compression",
"speak_and_recognation": {
"__value__": [
false,
true
]
},
"clip": [
"126",
0
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Prompt)"
}
},
"95": {
"inputs": {
"seed": 22,
"steps": 10,
"cfg": 4.5,
"sampler_name": "euler",
"scheduler": "normal",
"denoise": 1,
"model": [
"127",
0
],
"positive": [
"100",
0
],
"negative": [
"93",
0
],
"latent_image": [
"97",
0
]
},
"class_type": "KSampler",
"_meta": {
"title": "KSampler"
}
},
"97": {
"inputs": {
"width": 1280,
"height": 768,
"length": 1,
"batch_size": 1
},
"class_type": "EmptyHunyuanLatentVideo",
"_meta": {
"title": "EmptyHunyuanLatentVideo"
}
},
"98": {
"inputs": {
"samples": [
"95",
0
],
"vae": [
"128",
0
]
},
"class_type": "VAEDecode",
"_meta": {
"title": "VAE Decode"
}
},
"100": {
"inputs": {
"text": "Terminator riding a push bike",
"speak_and_recognation": {
"__value__": [
false,
true
]
},
"clip": [
"126",
0
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Prompt)"
}
},
"102": {
"inputs": {
"images": [
"129",
0
]
},
"class_type": "PreviewImage",
"_meta": {
"title": "Preview Image"
}
},
"126": {
"inputs": {
"clip_name": "Qwen2.5-VL-7B-Instruct-Q3_K_M.gguf",
"type": "qwen_image",
"device": "cuda:1",
"virtual_vram_gb": 6,
"use_other_vram": true,
"expert_mode_allocations": ""
},
"class_type": "CLIPLoaderGGUFDisTorchMultiGPU",
"_meta": {
"title": "CLIPLoaderGGUFDisTorchMultiGPU"
}
},
"127": {
"inputs": {
"unet_name": "qwen-image-Q2_K.gguf",
"device": "cuda:0",
"virtual_vram_gb": 6,
"use_other_vram": true,
"expert_mode_allocations": ""
},
"class_type": "UnetLoaderGGUFDisTorchMultiGPU",
"_meta": {
"title": "UnetLoaderGGUFDisTorchMultiGPU"
}
},
"128": {
"inputs": {
"vae_name": "qwen_image_vae.safetensors",
"device": "cuda:1"
},
"class_type": "VAELoaderMultiGPU",
"_meta": {
"title": "VAELoaderMultiGPU"
}
},
"129": {
"inputs": {
"offload_model": true,
"offload_cache": true,
"anything": [
"98",
0
]
},
"class_type": "VRAMCleanup",
"_meta": {
"title": "🎈VRAM-Cleanup"
}
}
}

View File

@ -52,6 +52,12 @@
"6": {
"inputs": {
"text": "A bustling cyberpunk street at night, filled with neon signs, rain-soaked pavement, and futuristic street vendors. High detail, vivid neon colors, and realistic reflections.",
"speak_and_recognation": {
"__value__": [
false,
true
]
},
"clip": [
"4",
1
@ -65,6 +71,12 @@
"7": {
"inputs": {
"text": "text, watermark, deformed Avoid flat colors, poor lighting, and artificial elements. No unrealistic elements, low resolution, or flat colors. Avoid generic objects, poor lighting, and inconsistent styles, blurry, low-quality, distorted faces, overexposed lighting, extra limbs, bad anatomy, low contrast",
"speak_and_recognation": {
"__value__": [
false,
true
]
},
"clip": [
"4",
1
@ -95,7 +107,7 @@
"inputs": {
"filename_prefix": "ComfyUI",
"images": [
"8",
"10",
0
]
},
@ -103,5 +115,19 @@
"_meta": {
"title": "Save Image"
}
},
"10": {
"inputs": {
"offload_model": true,
"offload_cache": true,
"anything": [
"8",
0
]
},
"class_type": "VRAMCleanup",
"_meta": {
"title": "🎈VRAM-Cleanup"
}
}
}