Compare commits

..

22 Commits
0.2.20 ... main

Author SHA1 Message Date
918e37e077 Bump version: 0.3.7 → 0.3.8 2025-08-12 15:21:26 +01:00
f5427d18ed revert blocking image gen 2025-08-12 15:21:23 +01:00
34f8a05035 Bump version: 0.3.6 → 0.3.7 2025-08-12 15:17:34 +01:00
82f29a4fde block generation if image in queue 2025-08-12 15:17:02 +01:00
ad814855ab Bump version: 0.3.5 → 0.3.6 2025-08-12 15:03:57 +01:00
1b75417360 update the queue 2025-08-12 15:03:50 +01:00
9f3cbf736a Bump version: 0.3.4 → 0.3.5 2025-08-12 14:45:40 +01:00
3e46b3363b working queue logic 2025-08-12 14:15:23 +01:00
ff5dfbcbce show queue count 2025-08-12 13:02:38 +01:00
14e69f7608 initial qwen support 2025-08-12 12:08:12 +01:00
1468ac4bbe Bump version: 0.3.3 → 0.3.4 2025-08-09 09:38:52 +01:00
2e13ecfa2f feat(prompt): implement robust error handling and fallback mechanism
Add retry logic and fallback mechanism to prompt generation. When OpenWebUI
fails, the system now attempts a second try before falling back to OpenRouter.
Proper error handling and logging have been added throughout the prompt
generation flow to ensure more reliable operation.
2025-08-09 09:38:46 +01:00
fa59f3cfeb Bump version: 0.3.2 → 0.3.3 2025-07-30 09:22:31 +01:00
fdd2893255 pass the version to all templates 2025-07-30 09:22:28 +01:00
d40f6a95b0 Bump version: 0.3.1 → 0.3.2 2025-07-30 09:03:57 +01:00
f381fbc9c7 fix scheduled task 2025-07-30 09:03:46 +01:00
57bb0fed5b Bump version: 0.3.0 → 0.3.1 2025-07-29 14:31:05 +01:00
6e39c34a58 update the openapi folder name 2025-07-29 14:27:50 +01:00
e2acd2dcd6 openweb ui client rather than litellm and ollama 2025-07-29 14:25:13 +01:00
aa75646d5f Bump version: 0.2.22 → 0.3.0 2025-07-29 13:26:07 +01:00
ba2b943c0d Bump version: 0.2.21 → 0.2.22 2025-07-29 13:16:37 +01:00
5c45b8b832 Bump version: 0.2.20 → 0.2.21 2025-07-29 13:14:37 +01:00
13 changed files with 519 additions and 71 deletions

View File

@ -1,5 +1,5 @@
[tool.bumpversion]
current_version = "0.2.20"
current_version = "0.3.8"
parse = "(?P<major>\\d+)\\.(?P<minor>\\d+)\\.(?P<patch>\\d+)"
serialize = ["{major}.{minor}.{patch}"]
replace = "{new_version}"

View File

@ -4,7 +4,7 @@ FROM python:3.11-slim
# Set the working directory in the container
WORKDIR /app
# Set version label
ARG VERSION="0.2.20"
ARG VERSION="0.3.8"
LABEL version=$VERSION
# Copy project files into the container

View File

@ -18,6 +18,13 @@ user_config = load_config()
app = Flask(__name__)
app.secret_key = os.environ.get("SECRET_KEY")
# Make version available to all templates
from libs.generic import get_current_version
@app.context_processor
def inject_version():
version = get_current_version()
return dict(version=version)
# Inject config into routes that need it
create_routes.init_app(user_config)
auth_routes.init_app(user_config)
@ -39,7 +46,16 @@ from libs.comfyui import create_image
def scheduled_task():
print(f"Executing scheduled task at {time.strftime('%Y-%m-%d %H:%M:%S')}")
create_image(None)
# Generate a random prompt using either OpenWebUI or OpenRouter
from libs.generic import create_prompt_with_random_model
prompt = create_prompt_with_random_model("Generate a random detailed prompt for stable diffusion.")
if prompt:
# Select a random model
import random
model = "Random Image Model"
create_image(prompt, model)
else:
print("Failed to generate a prompt for the scheduled task.")
if user_config["frame"]["auto_regen"] == "True":
if os.environ.get("WERKZEUG_RUN_MAIN") == "true":

View File

@ -15,7 +15,7 @@ from tenacity import (
import nest_asyncio
from libs.generic import rename_image, load_config, save_prompt
from libs.create_thumbnail import generate_thumbnail
from libs.ollama import create_prompt_on_openwebui
from libs.openwebui import create_prompt_on_openwebui
nest_asyncio.apply()
logging.basicConfig(level=logging.INFO)
@ -122,6 +122,7 @@ def generate_image(
def select_model(model: str) -> tuple[str, str]:
use_flux = json.loads(user_config["comfyui"].get("FLUX", "false").lower())
only_flux = json.loads(user_config["comfyui"].get("ONLY_FLUX", "false").lower())
use_qwen = json.loads(user_config["comfyui"].get("Qwen", "false").lower())
if model == "Random Image Model":
selected_workflow = "FLUX" if (use_flux and (only_flux or random.choice([True, False]))) else "SDXL"
@ -133,6 +134,8 @@ def select_model(model: str) -> tuple[str, str]:
if model == "Random Image Model":
if selected_workflow == "FLUX":
valid_models = user_config["comfyui:flux"]["models"].split(",")
elif selected_workflow == "Qwen":
valid_models = user_config["comfyui:qwen"]["models"].split(",")
else: # SDXL
available_model_list = user_config["comfyui"]["models"].split(",")
valid_models = list(set(get_available_models()) & set(available_model_list))
@ -145,7 +148,11 @@ def create_image(prompt: str | None = None, model: str = "Random Image Model") -
"""Generate an image with a chosen workflow (Random, FLUX*, or SDXL*)."""
if prompt is None:
logging.error("No prompt provided.")
# Generate a random prompt using either OpenWebUI or OpenRouter
from libs.generic import create_prompt_with_random_model
prompt = create_prompt_with_random_model("Generate a random detailed prompt for stable diffusion.")
if not prompt:
logging.error("Failed to generate a prompt.")
return
if not prompt:
@ -169,7 +176,86 @@ def create_image(prompt: str | None = None, model: str = "Random Image Model") -
model_param="unet_name",
model=model
)
elif selected_workflow == "Qwen":
generate_image(
file_name="image",
comfy_prompt=prompt,
workflow_path="./workflow_qwen.json",
prompt_node="Positive",
seed_node="KSampler",
seed_param="seed",
save_node="Save Image",
save_param="filename_prefix",
model_node="Load Checkpoint",
model_param="ckpt_name",
model=model
)
else: # SDXL
generate_image("image", comfy_prompt=prompt, model=model)
logging.info(f"{selected_workflow} generation started with prompt: {prompt}")
def get_queue_count() -> int:
"""Fetches the current queue count from ComfyUI (pending + running jobs)."""
url = user_config["comfyui"]["comfyui_url"] + "/queue"
try:
response = requests.get(url)
response.raise_for_status()
data = response.json()
pending = len(data.get("queue_pending", []))
running = len(data.get("queue_running", []))
return pending + running
except Exception as e:
logging.error(f"Error fetching queue count: {e}")
return 0
def get_queue_details() -> list:
"""Fetches detailed queue information including model names and prompts."""
url = user_config["comfyui"]["comfyui_url"] + "/queue"
try:
response = requests.get(url)
response.raise_for_status()
data = response.json()
jobs = []
for job_list in [data.get("queue_running", []), data.get("queue_pending", [])]:
for job in job_list:
# Extract prompt data (format: [priority, time, prompt])
prompt_data = job[2]
model = "Unknown"
prompt = "No prompt"
# Find model loader node (works for SDXL/FLUX/Qwen workflows)
for node in prompt_data.values():
if node.get("class_type") in ["CheckpointLoaderSimple", "UnetLoaderGGUFAdvancedDisTorchMultiGPU"]:
model = node["inputs"].get("ckpt_name", "Unknown")
break
# Find prompt node using class_type pattern and title matching
for node in prompt_data.values():
class_type = node.get("class_type", "")
if "CLIPTextEncode" in class_type and "text" in node["inputs"]:
meta = node.get('_meta', {})
title = meta.get('title', '').lower()
if 'positive' in title or 'prompt' in title:
prompt = node["inputs"]["text"]
break
jobs.append({
"id": job[0],
"model": model.split(".")[0] if model != "Unknown" else model,
"prompt": prompt
})
return jobs
except Exception as e:
logging.error(f"Error fetching queue details: {e}")
return []
try:
response = requests.get(url)
response.raise_for_status()
data = response.json()
pending = len(data.get("queue_pending", []))
running = len(data.get("queue_running", []))
return pending + running
except Exception as e:
logging.error(f"Error fetching queue count: {e}")
return 0

View File

@ -110,14 +110,31 @@ def get_current_version():
return version
except subprocess.CalledProcessError as e:
print("Error running bump-my-version:", e)
return None
return "unknown"
def load_models_from_config():
flux_models = load_config()["comfyui:flux"]["models"].split(",")
sdxl_models = load_config()["comfyui"]["models"].split(",")
config = load_config()
# Only load FLUX models if FLUX feature is enabled
use_flux = config["comfyui"].get("flux", "False").lower() == "true"
if use_flux and "comfyui:flux" in config and "models" in config["comfyui:flux"]:
flux_models = config["comfyui:flux"]["models"].split(",")
else:
flux_models = []
sdxl_models = config["comfyui"]["models"].split(",")
# Only load Qwen models if Qwen feature is enabled
use_qwen = config["comfyui"].get("qwen", "False").lower() == "true"
if use_qwen and "comfyui:qwen" in config and "models" in config["comfyui:qwen"]:
qwen_models = config["comfyui:qwen"]["models"].split(",")
else:
qwen_models = []
sorted_flux_models = sorted(flux_models, key=str.lower)
sorted_sdxl_models = sorted(sdxl_models, key=str.lower)
return sorted_sdxl_models, sorted_flux_models
sorted_qwen_models = sorted(qwen_models, key=str.lower)
return sorted_sdxl_models, sorted_flux_models, sorted_qwen_models
def load_topics_from_config():
@ -158,7 +175,10 @@ def load_prompt_models_from_config():
def create_prompt_with_random_model(base_prompt: str, topic: str = "random"):
"""Create a prompt using a randomly selected model from OpenWebUI or OpenRouter."""
"""Create a prompt using a randomly selected model from OpenWebUI or OpenRouter.
If OpenWebUI fails, it will retry once. If it fails again, it will fallback to OpenRouter.
"""
prompt_models = load_prompt_models_from_config()
if not prompt_models:
@ -168,16 +188,59 @@ def create_prompt_with_random_model(base_prompt: str, topic: str = "random"):
# Randomly select a model
service, model = random.choice(prompt_models)
if service == "openwebui":
# Import here to avoid circular imports
from libs.ollama import create_prompt_on_openwebui
return create_prompt_on_openwebui(base_prompt, topic)
elif service == "openrouter":
# Import here to avoid circular imports
from libs.openwebui import create_prompt_on_openwebui
from libs.openrouter import create_prompt_on_openrouter
return create_prompt_on_openrouter(base_prompt, topic)
return None
if service == "openwebui":
try:
# First attempt with OpenWebUI
logging.info(f"Attempting to generate prompt with OpenWebUI using model: {model}")
result = create_prompt_on_openwebui(base_prompt, topic, model)
if result:
return result
# If first attempt returns None, try again
logging.warning("First OpenWebUI attempt failed. Retrying...")
result = create_prompt_on_openwebui(base_prompt, topic, model)
if result:
return result
# If second attempt fails, fallback to OpenRouter
logging.warning("Second OpenWebUI attempt failed. Falling back to OpenRouter...")
openrouter_models = [m for m in prompt_models if m[0] == "openrouter"]
if openrouter_models:
_, openrouter_model = random.choice(openrouter_models)
return create_prompt_on_openrouter(base_prompt, topic, openrouter_model)
else:
logging.error("No OpenRouter models configured for fallback.")
return "A colorful abstract composition" # Default fallback prompt
except Exception as e:
logging.error(f"Error with OpenWebUI: {e}")
# Fallback to OpenRouter on exception
logging.warning("OpenWebUI exception. Falling back to OpenRouter...")
openrouter_models = [m for m in prompt_models if m[0] == "openrouter"]
if openrouter_models:
_, openrouter_model = random.choice(openrouter_models)
try:
return create_prompt_on_openrouter(base_prompt, topic, openrouter_model)
except Exception as e2:
logging.error(f"Error with OpenRouter fallback: {e2}")
return "A colorful abstract composition" # Default fallback prompt
else:
logging.error("No OpenRouter models configured for fallback.")
return "A colorful abstract composition" # Default fallback prompt
elif service == "openrouter":
try:
# Use OpenRouter
return create_prompt_on_openrouter(base_prompt, topic, model)
except Exception as e:
logging.error(f"Error with OpenRouter: {e}")
return "A colorful abstract composition" # Default fallback prompt
return "A colorful abstract composition" # Default fallback prompt
user_config = load_config()
output_folder = user_config["comfyui"]["output_dir"]

View File

@ -1,9 +1,11 @@
import random
import logging
import litellm
import nest_asyncio
from libs.generic import load_recent_prompts, load_config
import re
from openwebui_chat_client import OpenWebUIClient
from datetime import datetime
nest_asyncio.apply()
logging.basicConfig(level=logging.INFO)
@ -33,23 +35,28 @@ def create_prompt_on_openwebui(prompt: str, topic: str = "random", model: str =
topic_instruction = f" Incorporate the theme of '{selected_topic}' into the new prompt."
user_content = (
"Can you generate me a really random image idea, Do not exceed 10 words. Use clear language, not poetic metaphors."
"Can you generate me a really random image idea, Do not exceed 10 words. Use clear language, not poetic metaphors."
+ topic_instruction
+ "Avoid prompts similar to the following:"
+ "\n".join(f"{i+1}. {p}" for i, p in enumerate(recent_prompts))
)
if model:
# Use the specified model
model = model
else:
# Select a random model
model = random.choice(user_config["openwebui"]["models"].split(","))
response = litellm.completion(
api_base=user_config["openwebui"]["base_url"],
model="openai/" + model,
messages=[
model = random.choice(user_config["openwebui"]["models"].split(",")).strip()
# Create OpenWebUI client
client = OpenWebUIClient(
base_url=user_config["openwebui"]["base_url"],
token=user_config["openwebui"]["api_key"],
default_model_id=model
)
# Prepare messages for the chat
messages = [
{
"role": "system",
"content": (
@ -63,23 +70,26 @@ def create_prompt_on_openwebui(prompt: str, topic: str = "random", model: str =
"role": "user",
"content": user_content,
},
],
api_key=user_config["openwebui"]["api_key"],
]
# Send the chat request
try:
result = client.chat(
question=user_content,
chat_title=datetime.now().strftime("%Y-%m-%d %H:%M"),
folder_name="ai-frame-image-server"
)
prompt = response["choices"][0]["message"]["content"].strip('"')
# response = litellm.completion(
# api_base=user_config["openwebui"]["base_url"],
# model="openai/brxce/stable-diffusion-prompt-generator:latest",
# messages=[
# {
# "role": "user",
# "content": prompt,
# },
# ],
# api_key=user_config["openwebui"]["api_key"],
# )
# prompt = response["choices"][0]["message"]["content"].strip('"')
if result:
prompt = result["response"].strip('"')
else:
# Return None if the request fails
logging.warning(f"OpenWebUI request failed with model: {model}")
return None
except Exception as e:
logging.error(f"Error in OpenWebUI request with model {model}: {e}")
return None
match = re.search(r'"([^"]+)"', prompt)
if not match:
match = re.search(r":\s*\n*\s*(.+)", prompt)

Binary file not shown.

View File

@ -1,7 +1,7 @@
from flask import Blueprint, request, render_template, redirect, url_for, session
import threading
from libs.comfyui import create_image, select_model, get_available_models
from libs.ollama import create_prompt_on_openwebui
from libs.comfyui import create_image, select_model, get_available_models, get_queue_count
from libs.openwebui import create_prompt_on_openwebui
from libs.generic import load_models_from_config, load_topics_from_config, load_openrouter_models_from_config, load_openwebui_models_from_config, create_prompt_with_random_model
import os
@ -23,7 +23,7 @@ def create():
# Use the specified prompt model
service, service_model = prompt_model.split(":", 1) if ":" in prompt_model else (prompt_model, "")
if service == "openwebui":
from libs.ollama import create_prompt_on_openwebui
from libs.openwebui import create_prompt_on_openwebui
prompt = create_prompt_on_openwebui(user_config["comfyui"]["prompt"], topic, service_model)
elif service == "openrouter":
from libs.openrouter import create_prompt_on_openrouter
@ -35,17 +35,20 @@ def create():
threading.Thread(target=lambda: create_image(prompt, model)).start()
return redirect(url_for("create_routes.image_queued", prompt=prompt, model=model.split(".")[0]))
# Load all models (SDXL and FLUX only)
sdxl_models, flux_models = load_models_from_config()
# Load all models (SDXL, FLUX, and Qwen)
sdxl_models, flux_models, qwen_models = load_models_from_config()
openwebui_models = load_openwebui_models_from_config()
openrouter_models = load_openrouter_models_from_config()
queue_count = get_queue_count()
return render_template("create_image.html",
sdxl_models=sdxl_models,
sdxx_models=sdxl_models,
flux_models=flux_models,
qwen_models=qwen_models,
openwebui_models=openwebui_models,
openrouter_models=openrouter_models,
topics=load_topics_from_config())
topics=load_topics_from_config(),
queue_count=queue_count)
@bp.route("/image_queued")
def image_queued():
@ -62,17 +65,20 @@ def create_image_page():
if user_config["frame"]["create_requires_auth"] == "True" and not session.get("authenticated"):
return redirect(url_for("auth_routes.login", next=request.path))
# Load all models (SDXL and FLUX only)
sdxl_models, flux_models = load_models_from_config()
# Load all models (SDXL, FLUX, and Qwen)
sdxl_models, flux_models, qwen_models = load_models_from_config()
openwebui_models = load_openwebui_models_from_config()
openrouter_models = load_openrouter_models_from_config()
queue_count = get_queue_count()
return render_template("create_image.html",
sdxl_models=sdxl_models,
flux_models=flux_models,
qwen_models=qwen_models,
openwebui_models=openwebui_models,
openrouter_models=openrouter_models,
topics=load_topics_from_config())
topics=load_topics_from_config(),
queue_count=queue_count)
def init_app(config):

View File

@ -11,12 +11,10 @@ def index():
image_filename = "./image.png"
image_path = os.path.join(image_folder, image_filename)
prompt = get_details_from_png(image_path)["p"]
version = get_current_version()
return render_template(
"index.html",
image=image_filename,
prompt=prompt,
reload_interval=user_config["frame"]["reload_interval"],
version=version,
)

View File

@ -1,8 +1,12 @@
from flask import Blueprint
from libs.comfyui import cancel_current_job
from flask import Blueprint, jsonify
from libs.comfyui import cancel_current_job, get_queue_details
bp = Blueprint("job_routes", __name__)
@bp.route("/cancel", methods=["GET"])
def cancel_job():
return cancel_current_job()
@bp.route("/api/queue", methods=["GET"])
def api_queue():
return jsonify(get_queue_details())

View File

@ -12,7 +12,7 @@
<!-- Version number at bottom right -->
<div class="version">
<a href="{{ url_for('settings_route.config_editor') }}">v{{ version }}</a>
<a href="{{ url_for('settings_route.config_editor') }}">{% if version and version != 'unknown' %}v{{ version }}{% else %}v?.?.?{% endif %}</a>
</div>
{% block scripts %}{% endblock %}

View File

@ -73,6 +73,7 @@
background: #555;
}
#spinner-overlay {
position: fixed;
inset: 0;
@ -131,10 +132,66 @@
height: 150px;
}
}
.queue-dropdown {
position: absolute;
top: 100%;
right: 0;
background: #222;
border: 1px solid #444;
border-radius: 5px;
padding: 10px;
z-index: 1001;
display: none;
max-height: 300px;
overflow-y: auto;
width: 400px;
}
.queue-item {
margin-bottom: 5px;
padding: 5px;
border-bottom: 1px solid #333;
}
.queue-item:last-child {
border-bottom: none;
}
.queue-item .prompt {
font-size: 0.9em;
color: #aaa;
white-space: normal;
word-wrap: break-word;
position: relative;
cursor: pointer;
}
.queue-item .prompt:hover::after {
content: "Model: " attr(data-model);
position: absolute;
bottom: 100%;
left: 0;
background: #333;
color: #00aaff;
padding: 4px 8px;
border-radius: 4px;
font-size: 0.8em;
white-space: nowrap;
z-index: 1002;
box-shadow: 0 2px 4px rgba(0,0,0,0.3);
}
</style>
{% endblock %}
{% block content %}
<div class="queue-container" style="position: fixed; top: 20px; right: 20px; z-index: 1000;">
<button id="queue-btn" style="background: #333; color: white; border: none; padding: 5px 10px; border-radius: 5px; cursor: pointer;">
Queue: <span id="queue-count">{{ queue_count | default(0) }}</span>
</button>
<div id="queue-dropdown" class="queue-dropdown">
<!-- Queue items will be populated here -->
</div>
</div>
<h1 style="margin-bottom: 20px;">Create An Image</h1>
<textarea id="prompt-box" placeholder="Enter your custom prompt here..."></textarea>
@ -157,6 +214,13 @@
{% endfor %}
</optgroup>
{% endif %}
{% if qwen_models %}
<optgroup label="Qwen">
{% for m in qwen_models %}
<option value="{{ m }}">{{ m.rsplit('.', 1)[0] if '.' in m else m }}</option>
{% endfor %}
</optgroup>
{% endif %}
{% if sdxl_models %}
<optgroup label="SDXL">
{% for m in sdxl_models %}
@ -262,5 +326,59 @@
alert("Error requesting random prompt: " + error);
});
}
document.addEventListener('DOMContentLoaded', function() {
const queueBtn = document.getElementById('queue-btn');
const queueDropdown = document.getElementById('queue-dropdown');
const queueCountSpan = document.getElementById('queue-count');
// Toggle dropdown visibility
queueBtn.addEventListener('click', function(e) {
e.stopPropagation();
if (queueDropdown.style.display === 'block') {
queueDropdown.style.display = 'none';
} else {
fetchQueueDetails();
queueDropdown.style.display = 'block';
}
});
// Close dropdown when clicking outside
document.addEventListener('click', function() {
queueDropdown.style.display = 'none';
});
// Prevent dropdown from closing when clicking inside it
queueDropdown.addEventListener('click', function(e) {
e.stopPropagation();
});
function fetchQueueDetails() {
fetch('/api/queue')
.then(response => response.json())
.then(jobs => {
queueCountSpan.textContent = jobs.length;
const container = queueDropdown;
container.innerHTML = '';
if (jobs.length === 0) {
container.innerHTML = '<div class="queue-item">No jobs in queue</div>';
return;
}
jobs.forEach(job => {
const item = document.createElement('div');
item.className = 'queue-item';
item.innerHTML = `
<div class="prompt" data-model="${job.model}">${job.prompt}</div>
`;
container.appendChild(item);
});
})
.catch(error => {
console.error('Error fetching queue:', error);
queueDropdown.innerHTML = '<div class="queue-item">Error loading queue</div>';
});
}
});
</script>
{% endblock %}

147
workflow_qwen.json Normal file
View File

@ -0,0 +1,147 @@
{
"93": {
"inputs": {
"text": "jpeg compression",
"speak_and_recognation": {
"__value__": [
false,
true
]
},
"clip": [
"126",
0
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Prompt)"
}
},
"95": {
"inputs": {
"seed": 22,
"steps": 10,
"cfg": 4.5,
"sampler_name": "euler",
"scheduler": "normal",
"denoise": 1,
"model": [
"127",
0
],
"positive": [
"100",
0
],
"negative": [
"93",
0
],
"latent_image": [
"97",
0
]
},
"class_type": "KSampler",
"_meta": {
"title": "KSampler"
}
},
"97": {
"inputs": {
"width": 1280,
"height": 768,
"length": 1,
"batch_size": 1
},
"class_type": "EmptyHunyuanLatentVideo",
"_meta": {
"title": "EmptyHunyuanLatentVideo"
}
},
"98": {
"inputs": {
"samples": [
"95",
0
],
"vae": [
"128",
0
]
},
"class_type": "VAEDecode",
"_meta": {
"title": "VAE Decode"
}
},
"100": {
"inputs": {
"text": "Terminator riding a push bike",
"speak_and_recognation": {
"__value__": [
false,
true
]
},
"clip": [
"126",
0
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Prompt)"
}
},
"102": {
"inputs": {
"images": [
"98",
0
]
},
"class_type": "PreviewImage",
"_meta": {
"title": "Preview Image"
}
},
"126": {
"inputs": {
"clip_name": "Qwen2.5-VL-7B-Instruct-Q3_K_M.gguf",
"type": "qwen_image",
"device": "cuda:1",
"virtual_vram_gb": 6,
"use_other_vram": true,
"expert_mode_allocations": ""
},
"class_type": "CLIPLoaderGGUFDisTorchMultiGPU",
"_meta": {
"title": "CLIPLoaderGGUFDisTorchMultiGPU"
}
},
"127": {
"inputs": {
"unet_name": "qwen-image-Q2_K.gguf",
"device": "cuda:0",
"virtual_vram_gb": 6,
"use_other_vram": true,
"expert_mode_allocations": ""
},
"class_type": "UnetLoaderGGUFDisTorchMultiGPU",
"_meta": {
"title": "UnetLoaderGGUFDisTorchMultiGPU"
}
},
"128": {
"inputs": {
"vae_name": "qwen_image_vae.safetensors",
"device": "cuda:1"
},
"class_type": "VAELoaderMultiGPU",
"_meta": {
"title": "VAELoaderMultiGPU"
}
}
}