mirror of
https://github.com/karl0ss/ai_image_frame_server.git
synced 2025-11-12 21:09:41 +00:00
feat: add Qwen workflow support and enhance model validation
- Add extraction logic for Qwen workflow metadata in PNG files - Improve OpenRouter model selection with validation and fallback to free/configured models - Remove outdated Flux workflow file - Update Qwen workflow configuration with new parameters and simplified structure
This commit is contained in:
parent
5c09bcd9e8
commit
f15c83ebaa
@ -81,17 +81,20 @@ def get_details_from_png(path):
|
||||
try:
|
||||
date = datetime.fromtimestamp(os.path.getctime(path)).strftime("%d-%m-%Y")
|
||||
with Image.open(path) as img:
|
||||
try:
|
||||
data = json.loads(img.info["prompt"])
|
||||
prompt = data['6']['inputs']['text']
|
||||
if '38' in data and 'unet_name' in data['38']['inputs']:
|
||||
# Flux workflow
|
||||
data = json.loads(img.info["prompt"])
|
||||
prompt = data['6']['inputs']['text']
|
||||
model = data['38']['inputs']['unet_name'].split(".")[0]
|
||||
except KeyError:
|
||||
elif '4' in data and 'ckpt_name' in data['4']['inputs']:
|
||||
# SDXL workflow
|
||||
data = json.loads(img.info["prompt"])
|
||||
prompt = data['6']['inputs']['text']
|
||||
model = data['4']['inputs']['ckpt_name']
|
||||
return {"p":prompt,"m":model,"d":date} or {"p":"","m":"","c":""}
|
||||
elif '80' in data and 'unet_name' in data['80']['inputs']:
|
||||
# Qwen workflow
|
||||
model = data['80']['inputs']['unet_name'].split(".")[0]
|
||||
else:
|
||||
model = "unknown"
|
||||
return {"p":prompt,"m":model,"d":date}
|
||||
except Exception as e:
|
||||
print(f"Error reading metadata from {path}: {e}")
|
||||
return ""
|
||||
|
||||
@ -45,18 +45,50 @@ def create_prompt_on_openrouter(prompt: str, topic: str = "random", model: str =
|
||||
+ "\n".join(f"{i+1}. {p}" for i, p in enumerate(recent_prompts))
|
||||
)
|
||||
|
||||
# Use the specified model or select a random model from the configured OpenRouter models
|
||||
# Load configured models
|
||||
configured_models = [m.strip() for m in user_config["openrouter"]["models"].split(",") if m.strip()]
|
||||
if not configured_models:
|
||||
logging.error("No OpenRouter models configured.")
|
||||
return ""
|
||||
|
||||
# Create client early for model checking
|
||||
client = OpenAI(
|
||||
base_url="https://openrouter.ai/api/v1",
|
||||
api_key=user_config["openrouter"]["api_key"],
|
||||
)
|
||||
|
||||
# Select model
|
||||
if model:
|
||||
# Use the specified model
|
||||
model = model
|
||||
original_model = model
|
||||
# Always check if model exists on OpenRouter
|
||||
try:
|
||||
all_models_response = client.models.list()
|
||||
all_models = [m.id for m in all_models_response.data]
|
||||
if model not in all_models:
|
||||
# Fallback to random free model from all OpenRouter models
|
||||
free_models = [m for m in all_models if "free" in m.lower()]
|
||||
if free_models:
|
||||
model = random.choice(free_models)
|
||||
logging.info(f"Specified model '{original_model}' not found on OpenRouter, falling back to free model: {model}")
|
||||
else:
|
||||
# No free models, fallback to random configured model
|
||||
model = random.choice(configured_models)
|
||||
logging.warning(f"Specified model '{original_model}' not found, no free models available on OpenRouter, using random configured model: {model}")
|
||||
# else model exists, use it
|
||||
except Exception as e:
|
||||
logging.warning(f"Failed to fetch OpenRouter models for validation: {e}. Falling back to configured models.")
|
||||
if model not in configured_models:
|
||||
# Fallback to random free from configured
|
||||
free_models = [m for m in configured_models if "free" in m.lower()]
|
||||
if free_models:
|
||||
model = random.choice(free_models)
|
||||
logging.info(f"Specified model '{original_model}' not found, falling back to free configured model: {model}")
|
||||
else:
|
||||
model = random.choice(configured_models)
|
||||
logging.warning(f"Specified model '{original_model}' not found, no free configured models available, using random configured model: {model}")
|
||||
# else use the specified model
|
||||
else:
|
||||
# Select a random model from the configured OpenRouter models
|
||||
models = [m.strip() for m in user_config["openrouter"]["models"].split(",") if m.strip()]
|
||||
if not models:
|
||||
logging.error("No OpenRouter models configured.")
|
||||
return ""
|
||||
|
||||
model = random.choice(models)
|
||||
model = random.choice(configured_models)
|
||||
|
||||
try:
|
||||
client = OpenAI(
|
||||
|
||||
@ -1,433 +0,0 @@
|
||||
{
|
||||
"8": {
|
||||
"inputs": {
|
||||
"samples": [
|
||||
"62",
|
||||
1
|
||||
],
|
||||
"vae": [
|
||||
"27",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "VAEDecode",
|
||||
"_meta": {
|
||||
"title": "VAE Decode"
|
||||
}
|
||||
},
|
||||
"22": {
|
||||
"inputs": {
|
||||
"clip_name1": "t5/t5xxl_fp8_e4m3fn.safetensors",
|
||||
"clip_name2": "clip_l.safetensors",
|
||||
"type": "flux",
|
||||
"device": "default"
|
||||
},
|
||||
"class_type": "DualCLIPLoader",
|
||||
"_meta": {
|
||||
"title": "DualCLIPLoader"
|
||||
}
|
||||
},
|
||||
"27": {
|
||||
"inputs": {
|
||||
"vae_name": "FLUX1/ae.safetensors"
|
||||
},
|
||||
"class_type": "VAELoader",
|
||||
"_meta": {
|
||||
"title": "Load VAE"
|
||||
}
|
||||
},
|
||||
"32": {
|
||||
"inputs": {
|
||||
"upscale_model": [
|
||||
"33",
|
||||
0
|
||||
],
|
||||
"image": [
|
||||
"8",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "ImageUpscaleWithModel",
|
||||
"_meta": {
|
||||
"title": "Upscale Image (using Model)"
|
||||
}
|
||||
},
|
||||
"33": {
|
||||
"inputs": {
|
||||
"model_name": "4x-UltraSharp.pth"
|
||||
},
|
||||
"class_type": "UpscaleModelLoader",
|
||||
"_meta": {
|
||||
"title": "Load Upscale Model"
|
||||
}
|
||||
},
|
||||
"34": {
|
||||
"inputs": {
|
||||
"upscale_method": "lanczos",
|
||||
"scale_by": 0.5,
|
||||
"image": [
|
||||
"32",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "ImageScaleBy",
|
||||
"_meta": {
|
||||
"title": "Half size"
|
||||
}
|
||||
},
|
||||
"35": {
|
||||
"inputs": {
|
||||
"unet_name": "flux1-dev-Q4_0.gguf"
|
||||
},
|
||||
"class_type": "UnetLoaderGGUF",
|
||||
"_meta": {
|
||||
"title": "Unet Loader (GGUF)"
|
||||
}
|
||||
},
|
||||
"40": {
|
||||
"inputs": {
|
||||
"int": 20
|
||||
},
|
||||
"class_type": "Int Literal (Image Saver)",
|
||||
"_meta": {
|
||||
"title": "Generation Steps"
|
||||
}
|
||||
},
|
||||
"41": {
|
||||
"inputs": {
|
||||
"width": 720,
|
||||
"height": 1080,
|
||||
"aspect_ratio": "custom",
|
||||
"swap_dimensions": "Off",
|
||||
"upscale_factor": 2,
|
||||
"prescale_factor": 1,
|
||||
"batch_size": 1
|
||||
},
|
||||
"class_type": "CR Aspect Ratio",
|
||||
"_meta": {
|
||||
"title": "CR Aspect Ratio"
|
||||
}
|
||||
},
|
||||
"42": {
|
||||
"inputs": {
|
||||
"filename": "THISFILE",
|
||||
"path": "",
|
||||
"extension": "png",
|
||||
"steps": [
|
||||
"40",
|
||||
0
|
||||
],
|
||||
"cfg": [
|
||||
"52",
|
||||
0
|
||||
],
|
||||
"modelname": "flux1-dev-Q4_0.gguf",
|
||||
"sampler_name": [
|
||||
"50",
|
||||
1
|
||||
],
|
||||
"positive": [
|
||||
"44",
|
||||
0
|
||||
],
|
||||
"negative": [
|
||||
"45",
|
||||
0
|
||||
],
|
||||
"seed_value": [
|
||||
"48",
|
||||
0
|
||||
],
|
||||
"width": [
|
||||
"41",
|
||||
0
|
||||
],
|
||||
"height": [
|
||||
"41",
|
||||
1
|
||||
],
|
||||
"lossless_webp": true,
|
||||
"quality_jpeg_or_webp": 100,
|
||||
"optimize_png": false,
|
||||
"counter": 0,
|
||||
"denoise": [
|
||||
"53",
|
||||
0
|
||||
],
|
||||
"clip_skip": 0,
|
||||
"time_format": "%Y-%m-%d-%H%M%S",
|
||||
"save_workflow_as_json": true,
|
||||
"embed_workflow": true,
|
||||
"additional_hashes": "",
|
||||
"download_civitai_data": true,
|
||||
"easy_remix": true,
|
||||
"speak_and_recognation": {
|
||||
"__value__": [
|
||||
false,
|
||||
true
|
||||
]
|
||||
},
|
||||
"images": [
|
||||
"34",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "Image Saver",
|
||||
"_meta": {
|
||||
"title": "CivitAI Image Saver"
|
||||
}
|
||||
},
|
||||
"44": {
|
||||
"inputs": {
|
||||
"text": "",
|
||||
"speak_and_recognation": {
|
||||
"__value__": [
|
||||
false,
|
||||
true
|
||||
]
|
||||
}
|
||||
},
|
||||
"class_type": "ttN text",
|
||||
"_meta": {
|
||||
"title": "Positive Prompt T5"
|
||||
}
|
||||
},
|
||||
"45": {
|
||||
"inputs": {
|
||||
"text": "text, watermark, deformed Avoid flat colors, poor lighting, and artificial elements. No unrealistic elements, low resolution, or flat colors. Avoid generic objects, poor lighting, and inconsistent styles, blurry, low-quality, distorted faces, overexposed lighting, extra limbs, bad anatomy, low contrast",
|
||||
"speak_and_recognation": {
|
||||
"__value__": [
|
||||
false,
|
||||
true
|
||||
]
|
||||
}
|
||||
},
|
||||
"class_type": "ttN text",
|
||||
"_meta": {
|
||||
"title": "Negative Prompt"
|
||||
}
|
||||
},
|
||||
"47": {
|
||||
"inputs": {
|
||||
"text": [
|
||||
"44",
|
||||
0
|
||||
],
|
||||
"speak_and_recognation": {
|
||||
"__value__": [
|
||||
false,
|
||||
true
|
||||
]
|
||||
},
|
||||
"clip": [
|
||||
"68",
|
||||
1
|
||||
]
|
||||
},
|
||||
"class_type": "CLIPTextEncode",
|
||||
"_meta": {
|
||||
"title": "CLIP Text Encode (Prompt)"
|
||||
}
|
||||
},
|
||||
"48": {
|
||||
"inputs": {
|
||||
"seed": 903006749445372,
|
||||
"increment": 1
|
||||
},
|
||||
"class_type": "Seed Generator (Image Saver)",
|
||||
"_meta": {
|
||||
"title": "Seed"
|
||||
}
|
||||
},
|
||||
"49": {
|
||||
"inputs": {
|
||||
"scheduler": "beta"
|
||||
},
|
||||
"class_type": "Scheduler Selector (Comfy) (Image Saver)",
|
||||
"_meta": {
|
||||
"title": "Scheduler Selector"
|
||||
}
|
||||
},
|
||||
"50": {
|
||||
"inputs": {
|
||||
"sampler_name": "euler"
|
||||
},
|
||||
"class_type": "Sampler Selector (Image Saver)",
|
||||
"_meta": {
|
||||
"title": "Sampler Selector (Image Saver)"
|
||||
}
|
||||
},
|
||||
"51": {
|
||||
"inputs": {
|
||||
"images": [
|
||||
"8",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "PreviewImage",
|
||||
"_meta": {
|
||||
"title": "Preview Image"
|
||||
}
|
||||
},
|
||||
"52": {
|
||||
"inputs": {
|
||||
"float": 3.5
|
||||
},
|
||||
"class_type": "Float Literal (Image Saver)",
|
||||
"_meta": {
|
||||
"title": "CFG"
|
||||
}
|
||||
},
|
||||
"53": {
|
||||
"inputs": {
|
||||
"float": 1
|
||||
},
|
||||
"class_type": "Float Literal (Image Saver)",
|
||||
"_meta": {
|
||||
"title": "Denoise"
|
||||
}
|
||||
},
|
||||
"60": {
|
||||
"inputs": {
|
||||
"clip_l": "",
|
||||
"t5xxl": [
|
||||
"44",
|
||||
0
|
||||
],
|
||||
"guidance": [
|
||||
"52",
|
||||
0
|
||||
],
|
||||
"speak_and_recognation": {
|
||||
"__value__": [
|
||||
false,
|
||||
true
|
||||
]
|
||||
},
|
||||
"clip": [
|
||||
"68",
|
||||
1
|
||||
]
|
||||
},
|
||||
"class_type": "CLIPTextEncodeFlux",
|
||||
"_meta": {
|
||||
"title": "CLIPTextEncodeFlux"
|
||||
}
|
||||
},
|
||||
"62": {
|
||||
"inputs": {
|
||||
"noise": [
|
||||
"65",
|
||||
0
|
||||
],
|
||||
"guider": [
|
||||
"67",
|
||||
0
|
||||
],
|
||||
"sampler": [
|
||||
"63",
|
||||
0
|
||||
],
|
||||
"sigmas": [
|
||||
"64",
|
||||
0
|
||||
],
|
||||
"latent_image": [
|
||||
"41",
|
||||
5
|
||||
]
|
||||
},
|
||||
"class_type": "SamplerCustomAdvanced",
|
||||
"_meta": {
|
||||
"title": "SamplerCustomAdvanced"
|
||||
}
|
||||
},
|
||||
"63": {
|
||||
"inputs": {
|
||||
"sampler_name": [
|
||||
"50",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "KSamplerSelect",
|
||||
"_meta": {
|
||||
"title": "KSamplerSelect"
|
||||
}
|
||||
},
|
||||
"64": {
|
||||
"inputs": {
|
||||
"scheduler": [
|
||||
"49",
|
||||
0
|
||||
],
|
||||
"steps": [
|
||||
"40",
|
||||
0
|
||||
],
|
||||
"denoise": [
|
||||
"53",
|
||||
0
|
||||
],
|
||||
"model": [
|
||||
"68",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "BasicScheduler",
|
||||
"_meta": {
|
||||
"title": "BasicScheduler"
|
||||
}
|
||||
},
|
||||
"65": {
|
||||
"inputs": {
|
||||
"noise_seed": [
|
||||
"48",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "RandomNoise",
|
||||
"_meta": {
|
||||
"title": "RandomNoise"
|
||||
}
|
||||
},
|
||||
"67": {
|
||||
"inputs": {
|
||||
"model": [
|
||||
"68",
|
||||
0
|
||||
],
|
||||
"conditioning": [
|
||||
"47",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "BasicGuider",
|
||||
"_meta": {
|
||||
"title": "BasicGuider"
|
||||
}
|
||||
},
|
||||
"68": {
|
||||
"inputs": {
|
||||
"lora_01": "None",
|
||||
"strength_01": 1,
|
||||
"lora_02": "None",
|
||||
"strength_02": 1,
|
||||
"lora_03": "None",
|
||||
"strength_03": 1,
|
||||
"lora_04": "None",
|
||||
"strength_04": 1,
|
||||
"model": [
|
||||
"35",
|
||||
0
|
||||
],
|
||||
"clip": [
|
||||
"22",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "Lora Loader Stack (rgthree)",
|
||||
"_meta": {
|
||||
"title": "Lora Loader Stack (rgthree)"
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1,45 +1,26 @@
|
||||
{
|
||||
"93": {
|
||||
"3": {
|
||||
"inputs": {
|
||||
"text": "jpeg compression",
|
||||
"speak_and_recognation": {
|
||||
"__value__": [
|
||||
false,
|
||||
true
|
||||
]
|
||||
},
|
||||
"clip": [
|
||||
"126",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "CLIPTextEncode",
|
||||
"_meta": {
|
||||
"title": "CLIP Text Encode (Prompt)"
|
||||
}
|
||||
},
|
||||
"95": {
|
||||
"inputs": {
|
||||
"seed": 22,
|
||||
"steps": 10,
|
||||
"cfg": 4.5,
|
||||
"seed": 367723847870487,
|
||||
"steps": 8,
|
||||
"cfg": 2.5,
|
||||
"sampler_name": "euler",
|
||||
"scheduler": "normal",
|
||||
"scheduler": "simple",
|
||||
"denoise": 1,
|
||||
"model": [
|
||||
"127",
|
||||
"66",
|
||||
0
|
||||
],
|
||||
"positive": [
|
||||
"100",
|
||||
"6",
|
||||
0
|
||||
],
|
||||
"negative": [
|
||||
"93",
|
||||
"7",
|
||||
0
|
||||
],
|
||||
"latent_image": [
|
||||
"97",
|
||||
"58",
|
||||
0
|
||||
]
|
||||
},
|
||||
@ -48,26 +29,40 @@
|
||||
"title": "KSampler"
|
||||
}
|
||||
},
|
||||
"97": {
|
||||
"6": {
|
||||
"inputs": {
|
||||
"width": 1280,
|
||||
"height": 768,
|
||||
"length": 1,
|
||||
"batch_size": 1
|
||||
"text": "Cat sitting at desk, surrounded by taxidermied dinosaurs",
|
||||
"clip": [
|
||||
"38",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "EmptyHunyuanLatentVideo",
|
||||
"class_type": "CLIPTextEncode",
|
||||
"_meta": {
|
||||
"title": "EmptyHunyuanLatentVideo"
|
||||
"title": "Positive"
|
||||
}
|
||||
},
|
||||
"98": {
|
||||
"7": {
|
||||
"inputs": {
|
||||
"text": "text, watermark, deformed Avoid flat colors, poor lighting, and artificial elements. No unrealistic elements, low resolution, or flat colors. Avoid generic objects, poor lighting, and inconsistent styles, blurry, low-quality, distorted faces, overexposed lighting, extra limbs, bad anatomy, low contrast",
|
||||
"clip": [
|
||||
"38",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "CLIPTextEncode",
|
||||
"_meta": {
|
||||
"title": "Negative"
|
||||
}
|
||||
},
|
||||
"8": {
|
||||
"inputs": {
|
||||
"samples": [
|
||||
"95",
|
||||
"3",
|
||||
0
|
||||
],
|
||||
"vae": [
|
||||
"128",
|
||||
"39",
|
||||
0
|
||||
]
|
||||
},
|
||||
@ -76,86 +71,84 @@
|
||||
"title": "VAE Decode"
|
||||
}
|
||||
},
|
||||
"100": {
|
||||
"38": {
|
||||
"inputs": {
|
||||
"text": "Terminator riding a push bike",
|
||||
"speak_and_recognation": {
|
||||
"__value__": [
|
||||
false,
|
||||
true
|
||||
]
|
||||
},
|
||||
"clip": [
|
||||
"126",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "CLIPTextEncode",
|
||||
"_meta": {
|
||||
"title": "CLIP Text Encode (Prompt)"
|
||||
}
|
||||
},
|
||||
"102": {
|
||||
"inputs": {
|
||||
"images": [
|
||||
"129",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "PreviewImage",
|
||||
"_meta": {
|
||||
"title": "Preview Image"
|
||||
}
|
||||
},
|
||||
"126": {
|
||||
"inputs": {
|
||||
"clip_name": "Qwen2.5-VL-7B-Instruct-Q3_K_M.gguf",
|
||||
"clip_name": "qwen_2.5_vl_7b_fp8_scaled.safetensors",
|
||||
"type": "qwen_image",
|
||||
"device": "cuda:1",
|
||||
"virtual_vram_gb": 6,
|
||||
"use_other_vram": true,
|
||||
"expert_mode_allocations": ""
|
||||
"device": "default"
|
||||
},
|
||||
"class_type": "CLIPLoaderGGUFDisTorchMultiGPU",
|
||||
"class_type": "CLIPLoader",
|
||||
"_meta": {
|
||||
"title": "CLIPLoaderGGUFDisTorchMultiGPU"
|
||||
"title": "Load CLIP"
|
||||
}
|
||||
},
|
||||
"127": {
|
||||
"39": {
|
||||
"inputs": {
|
||||
"unet_name": "qwen-image-Q2_K.gguf",
|
||||
"device": "cuda:0",
|
||||
"virtual_vram_gb": 6,
|
||||
"use_other_vram": true,
|
||||
"expert_mode_allocations": ""
|
||||
"vae_name": "qwen_image_vae.safetensors"
|
||||
},
|
||||
"class_type": "UnetLoaderGGUFDisTorchMultiGPU",
|
||||
"class_type": "VAELoader",
|
||||
"_meta": {
|
||||
"title": "UnetLoaderGGUFDisTorchMultiGPU"
|
||||
"title": "Load VAE"
|
||||
}
|
||||
},
|
||||
"128": {
|
||||
"58": {
|
||||
"inputs": {
|
||||
"vae_name": "qwen_image_vae.safetensors",
|
||||
"device": "cuda:1"
|
||||
"width": 720,
|
||||
"height": 1088,
|
||||
"batch_size": 1
|
||||
},
|
||||
"class_type": "VAELoaderMultiGPU",
|
||||
"class_type": "EmptySD3LatentImage",
|
||||
"_meta": {
|
||||
"title": "VAELoaderMultiGPU"
|
||||
"title": "CR Aspect Ratio"
|
||||
}
|
||||
},
|
||||
"129": {
|
||||
"60": {
|
||||
"inputs": {
|
||||
"offload_model": true,
|
||||
"offload_cache": true,
|
||||
"anything": [
|
||||
"98",
|
||||
"filename_prefix": "ComfyUI",
|
||||
"images": [
|
||||
"8",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "VRAMCleanup",
|
||||
"class_type": "SaveImage",
|
||||
"_meta": {
|
||||
"title": "🎈VRAM-Cleanup"
|
||||
"title": "Save Image"
|
||||
}
|
||||
},
|
||||
"66": {
|
||||
"inputs": {
|
||||
"shift": 3.1000000000000005,
|
||||
"model": [
|
||||
"73",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "ModelSamplingAuraFlow",
|
||||
"_meta": {
|
||||
"title": "ModelSamplingAuraFlow"
|
||||
}
|
||||
},
|
||||
"73": {
|
||||
"inputs": {
|
||||
"lora_name": "Qwen-Image-Lightning-8steps-V1.0.safetensors",
|
||||
"strength_model": 1,
|
||||
"model": [
|
||||
"80",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "LoraLoaderModelOnly",
|
||||
"_meta": {
|
||||
"title": "LoraLoaderModelOnly"
|
||||
}
|
||||
},
|
||||
"80": {
|
||||
"inputs": {
|
||||
"unet_name": "qwen-image-Q4_K_S.gguf"
|
||||
},
|
||||
"class_type": "UnetLoaderGGUF",
|
||||
"_meta": {
|
||||
"title": "Load Checkpoint"
|
||||
}
|
||||
}
|
||||
}
|
||||
Loading…
x
Reference in New Issue
Block a user