Compare commits

...

7 Commits
0.3.9 ... main

Author SHA1 Message Date
086695d898 Bump version: 0.3.11 → 0.3.12 2025-09-04 08:58:28 +01:00
a63668cc93 No markdown formatting, no extra text.
fix(openrouter): handle rate limit errors with OpenWebUI fallback

When OpenRouter returns a 429 error due to rate limiting, the application now attempts to fall back to using an OpenWebUI model instead of returning a default prompt. This provides better resilience when external API limits are exceeded while maintaining functionality through local models.

The changes include:
- Adding RateLimitError import from openai
- Implementing fallback logic in create_prompt_on_openrouter function
- Using OpenWebUI as secondary source for prompts when rate limiting occurs
- Proper error handling and logging for both primary and fallback scenarios

This change improves the robustness of prompt generation by ensuring that users receive generated content even when external services are temporarily unavailable due to rate limits. The fallback mechanism prioritizes configured local models if available, with a final default prompt as backup.

The implementation follows the existing pattern of using random selection from configured OpenWebUI models and includes comprehensive error handling for both primary and fallback operations. This ensures that all failure modes are gracefully handled while maintaining backward compatibility.
2025-09-04 08:58:24 +01:00
06d3a64bb9 Bump version: 0.3.10 → 0.3.11 2025-09-02 13:41:43 +01:00
d7c25373bd clear vram on comfyui 2025-09-02 12:19:38 +01:00
006c88b084 Bump version: 0.3.9 → 0.3.10 2025-09-01 13:22:20 +01:00
e7df200f8c add new venv to gitignore 2025-09-01 13:22:17 +01:00
506dece377 **refactor(comfyui.py): improve model selection logic**
Refactor `get_available_models` to handle multiple models and improve error handling. Adjust `select_model` to support configurable workflows and fallbacks.
2025-09-01 13:19:28 +01:00
9 changed files with 111 additions and 11 deletions

View File

@ -1,5 +1,5 @@
[tool.bumpversion]
current_version = "0.3.9"
current_version = "0.3.12"
parse = "(?P<major>\\d+)\\.(?P<minor>\\d+)\\.(?P<patch>\\d+)"
serialize = ["{major}.{minor}.{patch}"]
replace = "{new_version}"

1
.gitignore vendored
View File

@ -12,3 +12,4 @@ test.py
.vscode/launch.json
favourites.json
.vscode/launch.json
venv/*

View File

@ -4,7 +4,7 @@ FROM python:3.11-slim
# Set the working directory in the container
WORKDIR /app
# Set version label
ARG VERSION="0.3.9"
ARG VERSION="0.3.12"
LABEL version=$VERSION
# Copy project files into the container

View File

@ -32,9 +32,17 @@ def get_available_models() -> list:
response = requests.get(url)
if response.status_code == 200:
data = response.json()
general = data.get("CheckpointLoaderSimple", {}).get("input", {}).get("required", {}).get("ckpt_name", [])[0]
flux = data.get("UnetLoaderGGUF", {}).get("input", {}).get("required", {}).get("unet_name", [])[0]
return general + flux
# Get SDXL models from CheckpointLoaderSimple
general = data.get("CheckpointLoaderSimple", {}).get("input", {}).get("required", {}).get("ckpt_name", [[]])[0]
# Get FLUX models from UnetLoaderGGUF
flux = data.get("UnetLoaderGGUF", {}).get("input", {}).get("required", {}).get("unet_name", [[]])[0]
# Combine both lists, handling cases where one might be missing
all_models = []
if isinstance(general, list):
all_models.extend(general)
if isinstance(flux, list):
all_models.extend(flux)
return all_models
else:
print(f"Failed to fetch models: {response.status_code}")
return []
@ -125,9 +133,25 @@ def select_model(model: str) -> tuple[str, str]:
use_qwen = json.loads(user_config["comfyui"].get("Qwen", "false").lower())
if model == "Random Image Model":
selected_workflow = "FLUX" if (use_flux and (only_flux or random.choice([True, False]))) else "SDXL"
# Create a list of available workflows based on configuration
available_workflows = []
if not only_flux:
available_workflows.append("SDXL")
if use_flux:
available_workflows.append("FLUX")
if use_qwen:
available_workflows.append("Qwen")
# If no workflows are available, default to SDXL
if not available_workflows:
available_workflows.append("SDXL")
# Randomly select a workflow
selected_workflow = random.choice(available_workflows)
elif "flux" in model.lower():
selected_workflow = "FLUX"
elif "qwen" in model.lower():
selected_workflow = "Qwen"
else:
selected_workflow = "SDXL"
@ -139,6 +163,13 @@ def select_model(model: str) -> tuple[str, str]:
else: # SDXL
available_model_list = user_config["comfyui"]["models"].split(",")
valid_models = list(set(get_available_models()) & set(available_model_list))
# If no valid models found, fall back to configured models
if not valid_models:
valid_models = available_model_list
# Ensure we have at least one model to choose from
if not valid_models:
# Fallback to a default model
valid_models = ["zavychromaxl_v100.safetensors"]
model = random.choice(valid_models)
return selected_workflow, model

View File

@ -240,7 +240,6 @@ def create_prompt_with_random_model(base_prompt: str, topic: str = "random"):
logging.error(f"Error with OpenRouter: {e}")
return "A colorful abstract composition" # Default fallback prompt
return "A colorful abstract composition" # Default fallback prompt
user_config = load_config()
output_folder = user_config["comfyui"]["output_dir"]

View File

@ -1,8 +1,9 @@
import random
import logging
from openai import OpenAI
from openai import OpenAI, RateLimitError
import nest_asyncio
from libs.generic import load_recent_prompts, load_config
from libs.openwebui import create_prompt_on_openwebui
import re
nest_asyncio.apply()
@ -90,6 +91,20 @@ def create_prompt_on_openrouter(prompt: str, topic: str = "random", model: str =
prompt = match.group(1)
logging.debug(prompt)
return prompt
except RateLimitError as e:
logging.warning(f"OpenRouter rate limit exceeded (429): {e}. Falling back to local OpenWebUI model.")
# Try to use OpenWebUI as fallback
openwebui_models = [m.strip() for m in user_config["openwebui"]["models"].split(",") if m.strip()] if "openwebui" in user_config and "models" in user_config["openwebui"] else []
if openwebui_models:
selected_model = random.choice(openwebui_models)
try:
return create_prompt_on_openwebui(user_content, topic, selected_model)
except Exception as e2:
logging.error(f"OpenWebUI fallback also failed: {e2}")
return "A colorful abstract composition" # Final fallback
else:
logging.error("No OpenWebUI models configured for fallback.")
return "A colorful abstract composition" # Final fallback
except Exception as e:
logging.error(f"Error generating prompt with OpenRouter: {e}")
return ""

View File

@ -38,7 +38,7 @@
"inputs": {
"filename_prefix": "ComfyUI",
"images": [
"8",
"42",
0
]
},
@ -201,5 +201,19 @@
"_meta": {
"title": "VAELoaderMultiGPU"
}
},
"42": {
"inputs": {
"offload_model": true,
"offload_cache": true,
"anything": [
"8",
0
]
},
"class_type": "VRAMCleanup",
"_meta": {
"title": "🎈VRAM-Cleanup"
}
}
}

View File

@ -98,7 +98,7 @@
"102": {
"inputs": {
"images": [
"98",
"129",
0
]
},
@ -143,5 +143,19 @@
"_meta": {
"title": "VAELoaderMultiGPU"
}
},
"129": {
"inputs": {
"offload_model": true,
"offload_cache": true,
"anything": [
"98",
0
]
},
"class_type": "VRAMCleanup",
"_meta": {
"title": "🎈VRAM-Cleanup"
}
}
}

View File

@ -52,6 +52,12 @@
"6": {
"inputs": {
"text": "A bustling cyberpunk street at night, filled with neon signs, rain-soaked pavement, and futuristic street vendors. High detail, vivid neon colors, and realistic reflections.",
"speak_and_recognation": {
"__value__": [
false,
true
]
},
"clip": [
"4",
1
@ -65,6 +71,12 @@
"7": {
"inputs": {
"text": "text, watermark, deformed Avoid flat colors, poor lighting, and artificial elements. No unrealistic elements, low resolution, or flat colors. Avoid generic objects, poor lighting, and inconsistent styles, blurry, low-quality, distorted faces, overexposed lighting, extra limbs, bad anatomy, low contrast",
"speak_and_recognation": {
"__value__": [
false,
true
]
},
"clip": [
"4",
1
@ -95,7 +107,7 @@
"inputs": {
"filename_prefix": "ComfyUI",
"images": [
"8",
"10",
0
]
},
@ -103,5 +115,19 @@
"_meta": {
"title": "Save Image"
}
},
"10": {
"inputs": {
"offload_model": true,
"offload_cache": true,
"anything": [
"8",
0
]
},
"class_type": "VRAMCleanup",
"_meta": {
"title": "🎈VRAM-Cleanup"
}
}
}