mirror of
https://github.com/karl0ss/ai_image_frame_server.git
synced 2025-09-08 15:33:16 +01:00
Compare commits
No commits in common. "main" and "0.3.7" have entirely different histories.
@ -1,5 +1,5 @@
|
|||||||
[tool.bumpversion]
|
[tool.bumpversion]
|
||||||
current_version = "0.3.12"
|
current_version = "0.3.7"
|
||||||
parse = "(?P<major>\\d+)\\.(?P<minor>\\d+)\\.(?P<patch>\\d+)"
|
parse = "(?P<major>\\d+)\\.(?P<minor>\\d+)\\.(?P<patch>\\d+)"
|
||||||
serialize = ["{major}.{minor}.{patch}"]
|
serialize = ["{major}.{minor}.{patch}"]
|
||||||
replace = "{new_version}"
|
replace = "{new_version}"
|
||||||
|
1
.gitignore
vendored
1
.gitignore
vendored
@ -12,4 +12,3 @@ test.py
|
|||||||
.vscode/launch.json
|
.vscode/launch.json
|
||||||
favourites.json
|
favourites.json
|
||||||
.vscode/launch.json
|
.vscode/launch.json
|
||||||
venv/*
|
|
@ -4,7 +4,7 @@ FROM python:3.11-slim
|
|||||||
# Set the working directory in the container
|
# Set the working directory in the container
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
# Set version label
|
# Set version label
|
||||||
ARG VERSION="0.3.12"
|
ARG VERSION="0.3.7"
|
||||||
LABEL version=$VERSION
|
LABEL version=$VERSION
|
||||||
|
|
||||||
# Copy project files into the container
|
# Copy project files into the container
|
||||||
|
@ -32,17 +32,9 @@ def get_available_models() -> list:
|
|||||||
response = requests.get(url)
|
response = requests.get(url)
|
||||||
if response.status_code == 200:
|
if response.status_code == 200:
|
||||||
data = response.json()
|
data = response.json()
|
||||||
# Get SDXL models from CheckpointLoaderSimple
|
general = data.get("CheckpointLoaderSimple", {}).get("input", {}).get("required", {}).get("ckpt_name", [])[0]
|
||||||
general = data.get("CheckpointLoaderSimple", {}).get("input", {}).get("required", {}).get("ckpt_name", [[]])[0]
|
flux = data.get("UnetLoaderGGUF", {}).get("input", {}).get("required", {}).get("unet_name", [])[0]
|
||||||
# Get FLUX models from UnetLoaderGGUF
|
return general + flux
|
||||||
flux = data.get("UnetLoaderGGUF", {}).get("input", {}).get("required", {}).get("unet_name", [[]])[0]
|
|
||||||
# Combine both lists, handling cases where one might be missing
|
|
||||||
all_models = []
|
|
||||||
if isinstance(general, list):
|
|
||||||
all_models.extend(general)
|
|
||||||
if isinstance(flux, list):
|
|
||||||
all_models.extend(flux)
|
|
||||||
return all_models
|
|
||||||
else:
|
else:
|
||||||
print(f"Failed to fetch models: {response.status_code}")
|
print(f"Failed to fetch models: {response.status_code}")
|
||||||
return []
|
return []
|
||||||
@ -133,25 +125,9 @@ def select_model(model: str) -> tuple[str, str]:
|
|||||||
use_qwen = json.loads(user_config["comfyui"].get("Qwen", "false").lower())
|
use_qwen = json.loads(user_config["comfyui"].get("Qwen", "false").lower())
|
||||||
|
|
||||||
if model == "Random Image Model":
|
if model == "Random Image Model":
|
||||||
# Create a list of available workflows based on configuration
|
selected_workflow = "FLUX" if (use_flux and (only_flux or random.choice([True, False]))) else "SDXL"
|
||||||
available_workflows = []
|
|
||||||
if not only_flux:
|
|
||||||
available_workflows.append("SDXL")
|
|
||||||
if use_flux:
|
|
||||||
available_workflows.append("FLUX")
|
|
||||||
if use_qwen:
|
|
||||||
available_workflows.append("Qwen")
|
|
||||||
|
|
||||||
# If no workflows are available, default to SDXL
|
|
||||||
if not available_workflows:
|
|
||||||
available_workflows.append("SDXL")
|
|
||||||
|
|
||||||
# Randomly select a workflow
|
|
||||||
selected_workflow = random.choice(available_workflows)
|
|
||||||
elif "flux" in model.lower():
|
elif "flux" in model.lower():
|
||||||
selected_workflow = "FLUX"
|
selected_workflow = "FLUX"
|
||||||
elif "qwen" in model.lower():
|
|
||||||
selected_workflow = "Qwen"
|
|
||||||
else:
|
else:
|
||||||
selected_workflow = "SDXL"
|
selected_workflow = "SDXL"
|
||||||
|
|
||||||
@ -163,13 +139,6 @@ def select_model(model: str) -> tuple[str, str]:
|
|||||||
else: # SDXL
|
else: # SDXL
|
||||||
available_model_list = user_config["comfyui"]["models"].split(",")
|
available_model_list = user_config["comfyui"]["models"].split(",")
|
||||||
valid_models = list(set(get_available_models()) & set(available_model_list))
|
valid_models = list(set(get_available_models()) & set(available_model_list))
|
||||||
# If no valid models found, fall back to configured models
|
|
||||||
if not valid_models:
|
|
||||||
valid_models = available_model_list
|
|
||||||
# Ensure we have at least one model to choose from
|
|
||||||
if not valid_models:
|
|
||||||
# Fallback to a default model
|
|
||||||
valid_models = ["zavychromaxl_v100.safetensors"]
|
|
||||||
model = random.choice(valid_models)
|
model = random.choice(valid_models)
|
||||||
|
|
||||||
return selected_workflow, model
|
return selected_workflow, model
|
||||||
@ -198,12 +167,12 @@ def create_image(prompt: str | None = None, model: str = "Random Image Model") -
|
|||||||
file_name="image",
|
file_name="image",
|
||||||
comfy_prompt=prompt,
|
comfy_prompt=prompt,
|
||||||
workflow_path="./workflow_flux.json",
|
workflow_path="./workflow_flux.json",
|
||||||
prompt_node="CLIP Text Encode (Positive Prompt)",
|
prompt_node="Positive Prompt T5",
|
||||||
seed_node="RandomNoise",
|
seed_node="Seed",
|
||||||
seed_param="noise_seed",
|
seed_param="seed",
|
||||||
save_node="Save Image",
|
save_node="CivitAI Image Saver",
|
||||||
save_param="filename_prefix",
|
save_param="filename",
|
||||||
model_node="UnetLoaderGGUFDisTorchMultiGPU",
|
model_node="UnetLoaderGGUFAdvancedDisTorchMultiGPU",
|
||||||
model_param="unet_name",
|
model_param="unet_name",
|
||||||
model=model
|
model=model
|
||||||
)
|
)
|
||||||
|
@ -84,8 +84,8 @@ def get_details_from_png(path):
|
|||||||
try:
|
try:
|
||||||
# Flux workflow
|
# Flux workflow
|
||||||
data = json.loads(img.info["prompt"])
|
data = json.loads(img.info["prompt"])
|
||||||
prompt = data['6']['inputs']['text']
|
prompt = data['44']['inputs']['text']
|
||||||
model = data['38']['inputs']['unet_name'].split(".")[0]
|
model = data['35']['inputs']['unet_name'].split(".")[0]
|
||||||
except KeyError:
|
except KeyError:
|
||||||
# SDXL workflow
|
# SDXL workflow
|
||||||
data = json.loads(img.info["prompt"])
|
data = json.loads(img.info["prompt"])
|
||||||
@ -240,6 +240,7 @@ def create_prompt_with_random_model(base_prompt: str, topic: str = "random"):
|
|||||||
logging.error(f"Error with OpenRouter: {e}")
|
logging.error(f"Error with OpenRouter: {e}")
|
||||||
return "A colorful abstract composition" # Default fallback prompt
|
return "A colorful abstract composition" # Default fallback prompt
|
||||||
|
|
||||||
|
return "A colorful abstract composition" # Default fallback prompt
|
||||||
|
|
||||||
user_config = load_config()
|
user_config = load_config()
|
||||||
output_folder = user_config["comfyui"]["output_dir"]
|
output_folder = user_config["comfyui"]["output_dir"]
|
@ -1,9 +1,8 @@
|
|||||||
import random
|
import random
|
||||||
import logging
|
import logging
|
||||||
from openai import OpenAI, RateLimitError
|
from openai import OpenAI
|
||||||
import nest_asyncio
|
import nest_asyncio
|
||||||
from libs.generic import load_recent_prompts, load_config
|
from libs.generic import load_recent_prompts, load_config
|
||||||
from libs.openwebui import create_prompt_on_openwebui
|
|
||||||
import re
|
import re
|
||||||
nest_asyncio.apply()
|
nest_asyncio.apply()
|
||||||
|
|
||||||
@ -91,20 +90,6 @@ def create_prompt_on_openrouter(prompt: str, topic: str = "random", model: str =
|
|||||||
prompt = match.group(1)
|
prompt = match.group(1)
|
||||||
logging.debug(prompt)
|
logging.debug(prompt)
|
||||||
return prompt
|
return prompt
|
||||||
except RateLimitError as e:
|
|
||||||
logging.warning(f"OpenRouter rate limit exceeded (429): {e}. Falling back to local OpenWebUI model.")
|
|
||||||
# Try to use OpenWebUI as fallback
|
|
||||||
openwebui_models = [m.strip() for m in user_config["openwebui"]["models"].split(",") if m.strip()] if "openwebui" in user_config and "models" in user_config["openwebui"] else []
|
|
||||||
if openwebui_models:
|
|
||||||
selected_model = random.choice(openwebui_models)
|
|
||||||
try:
|
|
||||||
return create_prompt_on_openwebui(user_content, topic, selected_model)
|
|
||||||
except Exception as e2:
|
|
||||||
logging.error(f"OpenWebUI fallback also failed: {e2}")
|
|
||||||
return "A colorful abstract composition" # Final fallback
|
|
||||||
else:
|
|
||||||
logging.error("No OpenWebUI models configured for fallback.")
|
|
||||||
return "A colorful abstract composition" # Final fallback
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logging.error(f"Error generating prompt with OpenRouter: {e}")
|
logging.error(f"Error generating prompt with OpenRouter: {e}")
|
||||||
return ""
|
return ""
|
@ -73,6 +73,19 @@
|
|||||||
background: #555;
|
background: #555;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
button:disabled {
|
||||||
|
background: #555;
|
||||||
|
cursor: not-allowed;
|
||||||
|
opacity: 0.6;
|
||||||
|
}
|
||||||
|
|
||||||
|
.queue-message {
|
||||||
|
color: #ffcc00;
|
||||||
|
font-size: 14px;
|
||||||
|
margin-top: 10px;
|
||||||
|
text-align: center;
|
||||||
|
min-height: 20px;
|
||||||
|
}
|
||||||
|
|
||||||
#spinner-overlay {
|
#spinner-overlay {
|
||||||
position: fixed;
|
position: fixed;
|
||||||
@ -198,9 +211,10 @@
|
|||||||
|
|
||||||
<div class="button-group">
|
<div class="button-group">
|
||||||
<button onclick="showSpinner(); location.href='/'">Back</button>
|
<button onclick="showSpinner(); location.href='/'">Back</button>
|
||||||
<button onclick="sendPrompt()">Send Prompt</button>
|
<button id="send-prompt-btn" onclick="sendPrompt()">Send Prompt</button>
|
||||||
<button onclick="randomPrompt()">Random Prompt</button>
|
<button id="random-prompt-btn" onclick="randomPrompt()">Random Prompt</button>
|
||||||
</div>
|
</div>
|
||||||
|
<div id="queue-message" class="queue-message"></div>
|
||||||
|
|
||||||
<div class="model-selection">
|
<div class="model-selection">
|
||||||
<div class="model-group">
|
<div class="model-group">
|
||||||
@ -274,10 +288,31 @@
|
|||||||
{% block scripts %}
|
{% block scripts %}
|
||||||
<script>
|
<script>
|
||||||
const overlay = document.getElementById('spinner-overlay');
|
const overlay = document.getElementById('spinner-overlay');
|
||||||
|
const sendPromptBtn = document.getElementById('send-prompt-btn');
|
||||||
|
const randomPromptBtn = document.getElementById('random-prompt-btn');
|
||||||
|
const queueMessage = document.getElementById('queue-message');
|
||||||
|
|
||||||
function showSpinner() { overlay.style.visibility = 'visible'; }
|
function showSpinner() { overlay.style.visibility = 'visible'; }
|
||||||
|
|
||||||
|
function updateButtonStates(queueCount) {
|
||||||
|
if (queueCount > 0) {
|
||||||
|
sendPromptBtn.disabled = true;
|
||||||
|
randomPromptBtn.disabled = true;
|
||||||
|
queueMessage.textContent = "Please wait until the current image is processed...";
|
||||||
|
} else {
|
||||||
|
sendPromptBtn.disabled = false;
|
||||||
|
randomPromptBtn.disabled = false;
|
||||||
|
queueMessage.textContent = "";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
function sendPrompt() {
|
function sendPrompt() {
|
||||||
|
// Check if buttons are disabled
|
||||||
|
if (sendPromptBtn.disabled) {
|
||||||
|
alert("Please wait until the current image is processed.");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
showSpinner();
|
showSpinner();
|
||||||
const prompt = document.getElementById('prompt-box').value;
|
const prompt = document.getElementById('prompt-box').value;
|
||||||
const model = document.getElementById('model-select').value;
|
const model = document.getElementById('model-select').value;
|
||||||
@ -303,6 +338,12 @@
|
|||||||
}
|
}
|
||||||
|
|
||||||
function randomPrompt() {
|
function randomPrompt() {
|
||||||
|
// Check if buttons are disabled
|
||||||
|
if (randomPromptBtn.disabled) {
|
||||||
|
alert("Please wait until the current image is processed.");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
showSpinner();
|
showSpinner();
|
||||||
const model = document.getElementById('model-select').value;
|
const model = document.getElementById('model-select').value;
|
||||||
const promptModel = document.getElementById('prompt-model-select').value;
|
const promptModel = document.getElementById('prompt-model-select').value;
|
||||||
@ -331,6 +372,10 @@
|
|||||||
const queueDropdown = document.getElementById('queue-dropdown');
|
const queueDropdown = document.getElementById('queue-dropdown');
|
||||||
const queueCountSpan = document.getElementById('queue-count');
|
const queueCountSpan = document.getElementById('queue-count');
|
||||||
|
|
||||||
|
// Check initial queue count and update button states
|
||||||
|
const initialQueueCount = parseInt(queueCountSpan.textContent) || 0;
|
||||||
|
updateButtonStates(initialQueueCount);
|
||||||
|
|
||||||
// Toggle dropdown visibility
|
// Toggle dropdown visibility
|
||||||
queueBtn.addEventListener('click', function(e) {
|
queueBtn.addEventListener('click', function(e) {
|
||||||
e.stopPropagation();
|
e.stopPropagation();
|
||||||
@ -356,7 +401,10 @@
|
|||||||
fetch('/api/queue')
|
fetch('/api/queue')
|
||||||
.then(response => response.json())
|
.then(response => response.json())
|
||||||
.then(jobs => {
|
.then(jobs => {
|
||||||
queueCountSpan.textContent = jobs.length;
|
const queueCount = jobs.length;
|
||||||
|
queueCountSpan.textContent = queueCount;
|
||||||
|
updateButtonStates(queueCount);
|
||||||
|
|
||||||
const container = queueDropdown;
|
const container = queueDropdown;
|
||||||
container.innerHTML = '';
|
container.innerHTML = '';
|
||||||
|
|
||||||
|
@ -1,31 +1,12 @@
|
|||||||
{
|
{
|
||||||
"6": {
|
|
||||||
"inputs": {
|
|
||||||
"text": "Terminator endoskeleton riding a bmx bike",
|
|
||||||
"speak_and_recognation": {
|
|
||||||
"__value__": [
|
|
||||||
false,
|
|
||||||
true
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"clip": [
|
|
||||||
"39",
|
|
||||||
0
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"class_type": "CLIPTextEncode",
|
|
||||||
"_meta": {
|
|
||||||
"title": "CLIP Text Encode (Positive Prompt)"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"8": {
|
"8": {
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"samples": [
|
"samples": [
|
||||||
"13",
|
"62",
|
||||||
0
|
1
|
||||||
],
|
],
|
||||||
"vae": [
|
"vae": [
|
||||||
"41",
|
"73",
|
||||||
0
|
0
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@ -34,157 +15,298 @@
|
|||||||
"title": "VAE Decode"
|
"title": "VAE Decode"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"9": {
|
"40": {
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"filename_prefix": "ComfyUI",
|
"int": 20
|
||||||
|
},
|
||||||
|
"class_type": "Int Literal (Image Saver)",
|
||||||
|
"_meta": {
|
||||||
|
"title": "Generation Steps"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"41": {
|
||||||
|
"inputs": {
|
||||||
|
"width": 720,
|
||||||
|
"height": 1080,
|
||||||
|
"aspect_ratio": "custom",
|
||||||
|
"swap_dimensions": "Off",
|
||||||
|
"upscale_factor": 2,
|
||||||
|
"prescale_factor": 1,
|
||||||
|
"batch_size": 1
|
||||||
|
},
|
||||||
|
"class_type": "CR Aspect Ratio",
|
||||||
|
"_meta": {
|
||||||
|
"title": "CR Aspect Ratio"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"42": {
|
||||||
|
"inputs": {
|
||||||
|
"filename": "THISFILE",
|
||||||
|
"path": "",
|
||||||
|
"extension": "png",
|
||||||
|
"steps": [
|
||||||
|
"40",
|
||||||
|
0
|
||||||
|
],
|
||||||
|
"cfg": [
|
||||||
|
"52",
|
||||||
|
0
|
||||||
|
],
|
||||||
|
"modelname": "flux1-dev-Q4_0.gguf",
|
||||||
|
"sampler_name": [
|
||||||
|
"50",
|
||||||
|
1
|
||||||
|
],
|
||||||
|
"scheduler_name": "normal",
|
||||||
|
"positive": [
|
||||||
|
"44",
|
||||||
|
0
|
||||||
|
],
|
||||||
|
"negative": [
|
||||||
|
"45",
|
||||||
|
0
|
||||||
|
],
|
||||||
|
"seed_value": [
|
||||||
|
"48",
|
||||||
|
0
|
||||||
|
],
|
||||||
|
"width": [
|
||||||
|
"41",
|
||||||
|
0
|
||||||
|
],
|
||||||
|
"height": [
|
||||||
|
"41",
|
||||||
|
1
|
||||||
|
],
|
||||||
|
"lossless_webp": true,
|
||||||
|
"quality_jpeg_or_webp": 100,
|
||||||
|
"optimize_png": false,
|
||||||
|
"counter": 0,
|
||||||
|
"denoise": [
|
||||||
|
"53",
|
||||||
|
0
|
||||||
|
],
|
||||||
|
"clip_skip": 0,
|
||||||
|
"time_format": "%Y-%m-%d-%H%M%S",
|
||||||
|
"save_workflow_as_json": true,
|
||||||
|
"embed_workflow": true,
|
||||||
|
"additional_hashes": "",
|
||||||
|
"download_civitai_data": true,
|
||||||
|
"easy_remix": true,
|
||||||
|
"speak_and_recognation": {
|
||||||
|
"__value__": [
|
||||||
|
false,
|
||||||
|
true
|
||||||
|
]
|
||||||
|
},
|
||||||
"images": [
|
"images": [
|
||||||
"42",
|
"8",
|
||||||
0
|
0
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"class_type": "SaveImage",
|
"class_type": "Image Saver",
|
||||||
"_meta": {
|
"_meta": {
|
||||||
"title": "Save Image"
|
"title": "CivitAI Image Saver"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"13": {
|
"44": {
|
||||||
|
"inputs": {
|
||||||
|
"text": "Yautja Predator wielding flamethrower in smoky, cyberpunk alleyway darkness",
|
||||||
|
"speak_and_recognation": {
|
||||||
|
"__value__": [
|
||||||
|
false,
|
||||||
|
true
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"class_type": "ttN text",
|
||||||
|
"_meta": {
|
||||||
|
"title": "Positive Prompt T5"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"45": {
|
||||||
|
"inputs": {
|
||||||
|
"text": "text, watermark, deformed Avoid flat colors, poor lighting, and artificial elements. No unrealistic elements, low resolution, or flat colors. Avoid generic objects, poor lighting, and inconsistent styles, blurry, low-quality, distorted faces, overexposed lighting, extra limbs, bad anatomy, low contrast",
|
||||||
|
"speak_and_recognation": {
|
||||||
|
"__value__": [
|
||||||
|
false,
|
||||||
|
true
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"class_type": "ttN text",
|
||||||
|
"_meta": {
|
||||||
|
"title": "Negative Prompt"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"47": {
|
||||||
|
"inputs": {
|
||||||
|
"text": [
|
||||||
|
"44",
|
||||||
|
0
|
||||||
|
],
|
||||||
|
"speak_and_recognation": {
|
||||||
|
"__value__": [
|
||||||
|
false,
|
||||||
|
true
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"clip": [
|
||||||
|
"72",
|
||||||
|
0
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"class_type": "CLIPTextEncode",
|
||||||
|
"_meta": {
|
||||||
|
"title": "Prompt Encoder"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"48": {
|
||||||
|
"inputs": {
|
||||||
|
"seed": 47371998700984,
|
||||||
|
"increment": 1
|
||||||
|
},
|
||||||
|
"class_type": "Seed Generator (Image Saver)",
|
||||||
|
"_meta": {
|
||||||
|
"title": "Seed"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"49": {
|
||||||
|
"inputs": {
|
||||||
|
"scheduler": "beta"
|
||||||
|
},
|
||||||
|
"class_type": "Scheduler Selector (Comfy) (Image Saver)",
|
||||||
|
"_meta": {
|
||||||
|
"title": "Scheduler"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"50": {
|
||||||
|
"inputs": {
|
||||||
|
"sampler_name": "euler"
|
||||||
|
},
|
||||||
|
"class_type": "Sampler Selector (Image Saver)",
|
||||||
|
"_meta": {
|
||||||
|
"title": "Sampler"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"52": {
|
||||||
|
"inputs": {
|
||||||
|
"float": 3.500000000000001
|
||||||
|
},
|
||||||
|
"class_type": "Float Literal (Image Saver)",
|
||||||
|
"_meta": {
|
||||||
|
"title": "CFG Scale"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"53": {
|
||||||
|
"inputs": {
|
||||||
|
"float": 1.0000000000000002
|
||||||
|
},
|
||||||
|
"class_type": "Float Literal (Image Saver)",
|
||||||
|
"_meta": {
|
||||||
|
"title": "Denoise"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"62": {
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"noise": [
|
"noise": [
|
||||||
"25",
|
"65",
|
||||||
0
|
0
|
||||||
],
|
],
|
||||||
"guider": [
|
"guider": [
|
||||||
"22",
|
"67",
|
||||||
0
|
0
|
||||||
],
|
],
|
||||||
"sampler": [
|
"sampler": [
|
||||||
"16",
|
"63",
|
||||||
0
|
0
|
||||||
],
|
],
|
||||||
"sigmas": [
|
"sigmas": [
|
||||||
"17",
|
"64",
|
||||||
0
|
0
|
||||||
],
|
],
|
||||||
"latent_image": [
|
"latent_image": [
|
||||||
"27",
|
"41",
|
||||||
0
|
5
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"class_type": "SamplerCustomAdvanced",
|
"class_type": "SamplerCustomAdvanced",
|
||||||
"_meta": {
|
"_meta": {
|
||||||
"title": "SamplerCustomAdvanced"
|
"title": "Custom Sampler"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"16": {
|
"63": {
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"sampler_name": "euler"
|
"sampler_name": [
|
||||||
|
"50",
|
||||||
|
0
|
||||||
|
]
|
||||||
},
|
},
|
||||||
"class_type": "KSamplerSelect",
|
"class_type": "KSamplerSelect",
|
||||||
"_meta": {
|
"_meta": {
|
||||||
"title": "KSampler Select"
|
"title": "KSampler Select"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"17": {
|
"64": {
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"scheduler": "simple",
|
"scheduler": [
|
||||||
"steps": 20,
|
"49",
|
||||||
"denoise": 1,
|
0
|
||||||
|
],
|
||||||
|
"steps": [
|
||||||
|
"40",
|
||||||
|
0
|
||||||
|
],
|
||||||
|
"denoise": [
|
||||||
|
"53",
|
||||||
|
0
|
||||||
|
],
|
||||||
"model": [
|
"model": [
|
||||||
"30",
|
"35",
|
||||||
0
|
0
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"class_type": "BasicScheduler",
|
"class_type": "BasicScheduler",
|
||||||
"_meta": {
|
"_meta": {
|
||||||
"title": "BasicScheduler"
|
"title": "Sigma Generator"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"22": {
|
"65": {
|
||||||
|
"inputs": {
|
||||||
|
"noise_seed": [
|
||||||
|
"48",
|
||||||
|
0
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"class_type": "RandomNoise",
|
||||||
|
"_meta": {
|
||||||
|
"title": "Noise Generator"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"67": {
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"model": [
|
"model": [
|
||||||
"30",
|
"35",
|
||||||
0
|
0
|
||||||
],
|
],
|
||||||
"conditioning": [
|
"conditioning": [
|
||||||
"26",
|
"47",
|
||||||
0
|
0
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"class_type": "BasicGuider",
|
"class_type": "BasicGuider",
|
||||||
"_meta": {
|
"_meta": {
|
||||||
"title": "BasicGuider"
|
"title": "Prompt Guider"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"25": {
|
"72": {
|
||||||
"inputs": {
|
|
||||||
"noise_seed": 707623342760804
|
|
||||||
},
|
|
||||||
"class_type": "RandomNoise",
|
|
||||||
"_meta": {
|
|
||||||
"title": "RandomNoise"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"26": {
|
|
||||||
"inputs": {
|
|
||||||
"guidance": 3.5,
|
|
||||||
"conditioning": [
|
|
||||||
"6",
|
|
||||||
0
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"class_type": "FluxGuidance",
|
|
||||||
"_meta": {
|
|
||||||
"title": "FluxGuidance"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"27": {
|
|
||||||
"inputs": {
|
|
||||||
"width": 720,
|
|
||||||
"height": 1088,
|
|
||||||
"batch_size": 1
|
|
||||||
},
|
|
||||||
"class_type": "EmptySD3LatentImage",
|
|
||||||
"_meta": {
|
|
||||||
"title": "CR Aspect Ratio"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"30": {
|
|
||||||
"inputs": {
|
|
||||||
"max_shift": 1.15,
|
|
||||||
"base_shift": 0.5,
|
|
||||||
"width": 720,
|
|
||||||
"height": 1088,
|
|
||||||
"model": [
|
|
||||||
"38",
|
|
||||||
0
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"class_type": "ModelSamplingFlux",
|
|
||||||
"_meta": {
|
|
||||||
"title": "ModelSamplingFlux"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"38": {
|
|
||||||
"inputs": {
|
|
||||||
"unet_name": "flux1-dev-Q4_0.gguf",
|
|
||||||
"device": "cuda:1",
|
|
||||||
"virtual_vram_gb": 0,
|
|
||||||
"use_other_vram": true,
|
|
||||||
"expert_mode_allocations": ""
|
|
||||||
},
|
|
||||||
"class_type": "UnetLoaderGGUFDisTorchMultiGPU",
|
|
||||||
"_meta": {
|
|
||||||
"title": "UnetLoaderGGUFDisTorchMultiGPU"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"39": {
|
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"clip_name1": "t5-v1_1-xxl-encoder-Q4_K_M.gguf",
|
"clip_name1": "t5-v1_1-xxl-encoder-Q4_K_M.gguf",
|
||||||
"clip_name2": "clip_l.safetensors",
|
"clip_name2": "clip_l.safetensors",
|
||||||
"type": "flux",
|
"type": "flux",
|
||||||
"device": "cuda:0",
|
"device": "cuda:0",
|
||||||
"virtual_vram_gb": 0,
|
"virtual_vram_gb": 0,
|
||||||
"use_other_vram": true,
|
"use_other_vram": false,
|
||||||
"expert_mode_allocations": ""
|
"expert_mode_allocations": ""
|
||||||
},
|
},
|
||||||
"class_type": "DualCLIPLoaderGGUFDisTorchMultiGPU",
|
"class_type": "DualCLIPLoaderGGUFDisTorchMultiGPU",
|
||||||
@ -192,7 +314,7 @@
|
|||||||
"title": "DualCLIPLoaderGGUFDisTorchMultiGPU"
|
"title": "DualCLIPLoaderGGUFDisTorchMultiGPU"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"41": {
|
"73": {
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"vae_name": "FLUX1/ae.safetensors",
|
"vae_name": "FLUX1/ae.safetensors",
|
||||||
"device": "cuda:0"
|
"device": "cuda:0"
|
||||||
@ -202,18 +324,20 @@
|
|||||||
"title": "VAELoaderMultiGPU"
|
"title": "VAELoaderMultiGPU"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"42": {
|
"35": {
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"offload_model": true,
|
"unet_name": "flux1-dev-Q4_0.gguf",
|
||||||
"offload_cache": true,
|
"dequant_dtype": "default",
|
||||||
"anything": [
|
"patch_dtype": "default",
|
||||||
"8",
|
"patch_on_device": false,
|
||||||
0
|
"device": "cuda:1",
|
||||||
]
|
"virtual_vram_gb": 0,
|
||||||
|
"use_other_vram": false,
|
||||||
|
"expert_mode_allocations": ""
|
||||||
},
|
},
|
||||||
"class_type": "VRAMCleanup",
|
"class_type": "UnetLoaderGGUFAdvancedDisTorchMultiGPU",
|
||||||
"_meta": {
|
"_meta": {
|
||||||
"title": "🎈VRAM-Cleanup"
|
"title": "UnetLoaderGGUFAdvancedDisTorchMultiGPU"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
@ -98,7 +98,7 @@
|
|||||||
"102": {
|
"102": {
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"images": [
|
"images": [
|
||||||
"129",
|
"98",
|
||||||
0
|
0
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@ -143,19 +143,5 @@
|
|||||||
"_meta": {
|
"_meta": {
|
||||||
"title": "VAELoaderMultiGPU"
|
"title": "VAELoaderMultiGPU"
|
||||||
}
|
}
|
||||||
},
|
|
||||||
"129": {
|
|
||||||
"inputs": {
|
|
||||||
"offload_model": true,
|
|
||||||
"offload_cache": true,
|
|
||||||
"anything": [
|
|
||||||
"98",
|
|
||||||
0
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"class_type": "VRAMCleanup",
|
|
||||||
"_meta": {
|
|
||||||
"title": "🎈VRAM-Cleanup"
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
@ -52,12 +52,6 @@
|
|||||||
"6": {
|
"6": {
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"text": "A bustling cyberpunk street at night, filled with neon signs, rain-soaked pavement, and futuristic street vendors. High detail, vivid neon colors, and realistic reflections.",
|
"text": "A bustling cyberpunk street at night, filled with neon signs, rain-soaked pavement, and futuristic street vendors. High detail, vivid neon colors, and realistic reflections.",
|
||||||
"speak_and_recognation": {
|
|
||||||
"__value__": [
|
|
||||||
false,
|
|
||||||
true
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"clip": [
|
"clip": [
|
||||||
"4",
|
"4",
|
||||||
1
|
1
|
||||||
@ -71,12 +65,6 @@
|
|||||||
"7": {
|
"7": {
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"text": "text, watermark, deformed Avoid flat colors, poor lighting, and artificial elements. No unrealistic elements, low resolution, or flat colors. Avoid generic objects, poor lighting, and inconsistent styles, blurry, low-quality, distorted faces, overexposed lighting, extra limbs, bad anatomy, low contrast",
|
"text": "text, watermark, deformed Avoid flat colors, poor lighting, and artificial elements. No unrealistic elements, low resolution, or flat colors. Avoid generic objects, poor lighting, and inconsistent styles, blurry, low-quality, distorted faces, overexposed lighting, extra limbs, bad anatomy, low contrast",
|
||||||
"speak_and_recognation": {
|
|
||||||
"__value__": [
|
|
||||||
false,
|
|
||||||
true
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"clip": [
|
"clip": [
|
||||||
"4",
|
"4",
|
||||||
1
|
1
|
||||||
@ -107,7 +95,7 @@
|
|||||||
"inputs": {
|
"inputs": {
|
||||||
"filename_prefix": "ComfyUI",
|
"filename_prefix": "ComfyUI",
|
||||||
"images": [
|
"images": [
|
||||||
"10",
|
"8",
|
||||||
0
|
0
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@ -115,19 +103,5 @@
|
|||||||
"_meta": {
|
"_meta": {
|
||||||
"title": "Save Image"
|
"title": "Save Image"
|
||||||
}
|
}
|
||||||
},
|
|
||||||
"10": {
|
|
||||||
"inputs": {
|
|
||||||
"offload_model": true,
|
|
||||||
"offload_cache": true,
|
|
||||||
"anything": [
|
|
||||||
"8",
|
|
||||||
0
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"class_type": "VRAMCleanup",
|
|
||||||
"_meta": {
|
|
||||||
"title": "🎈VRAM-Cleanup"
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
Loading…
x
Reference in New Issue
Block a user