Compare commits

..

No commits in common. "main" and "0.3.9" have entirely different histories.
main ... 0.3.9

9 changed files with 11 additions and 111 deletions

View File

@ -1,5 +1,5 @@
[tool.bumpversion]
current_version = "0.3.12"
current_version = "0.3.9"
parse = "(?P<major>\\d+)\\.(?P<minor>\\d+)\\.(?P<patch>\\d+)"
serialize = ["{major}.{minor}.{patch}"]
replace = "{new_version}"

1
.gitignore vendored
View File

@ -12,4 +12,3 @@ test.py
.vscode/launch.json
favourites.json
.vscode/launch.json
venv/*

View File

@ -4,7 +4,7 @@ FROM python:3.11-slim
# Set the working directory in the container
WORKDIR /app
# Set version label
ARG VERSION="0.3.12"
ARG VERSION="0.3.9"
LABEL version=$VERSION
# Copy project files into the container

View File

@ -32,17 +32,9 @@ def get_available_models() -> list:
response = requests.get(url)
if response.status_code == 200:
data = response.json()
# Get SDXL models from CheckpointLoaderSimple
general = data.get("CheckpointLoaderSimple", {}).get("input", {}).get("required", {}).get("ckpt_name", [[]])[0]
# Get FLUX models from UnetLoaderGGUF
flux = data.get("UnetLoaderGGUF", {}).get("input", {}).get("required", {}).get("unet_name", [[]])[0]
# Combine both lists, handling cases where one might be missing
all_models = []
if isinstance(general, list):
all_models.extend(general)
if isinstance(flux, list):
all_models.extend(flux)
return all_models
general = data.get("CheckpointLoaderSimple", {}).get("input", {}).get("required", {}).get("ckpt_name", [])[0]
flux = data.get("UnetLoaderGGUF", {}).get("input", {}).get("required", {}).get("unet_name", [])[0]
return general + flux
else:
print(f"Failed to fetch models: {response.status_code}")
return []
@ -133,25 +125,9 @@ def select_model(model: str) -> tuple[str, str]:
use_qwen = json.loads(user_config["comfyui"].get("Qwen", "false").lower())
if model == "Random Image Model":
# Create a list of available workflows based on configuration
available_workflows = []
if not only_flux:
available_workflows.append("SDXL")
if use_flux:
available_workflows.append("FLUX")
if use_qwen:
available_workflows.append("Qwen")
# If no workflows are available, default to SDXL
if not available_workflows:
available_workflows.append("SDXL")
# Randomly select a workflow
selected_workflow = random.choice(available_workflows)
selected_workflow = "FLUX" if (use_flux and (only_flux or random.choice([True, False]))) else "SDXL"
elif "flux" in model.lower():
selected_workflow = "FLUX"
elif "qwen" in model.lower():
selected_workflow = "Qwen"
else:
selected_workflow = "SDXL"
@ -163,13 +139,6 @@ def select_model(model: str) -> tuple[str, str]:
else: # SDXL
available_model_list = user_config["comfyui"]["models"].split(",")
valid_models = list(set(get_available_models()) & set(available_model_list))
# If no valid models found, fall back to configured models
if not valid_models:
valid_models = available_model_list
# Ensure we have at least one model to choose from
if not valid_models:
# Fallback to a default model
valid_models = ["zavychromaxl_v100.safetensors"]
model = random.choice(valid_models)
return selected_workflow, model

View File

@ -240,6 +240,7 @@ def create_prompt_with_random_model(base_prompt: str, topic: str = "random"):
logging.error(f"Error with OpenRouter: {e}")
return "A colorful abstract composition" # Default fallback prompt
return "A colorful abstract composition" # Default fallback prompt
user_config = load_config()
output_folder = user_config["comfyui"]["output_dir"]

View File

@ -1,9 +1,8 @@
import random
import logging
from openai import OpenAI, RateLimitError
from openai import OpenAI
import nest_asyncio
from libs.generic import load_recent_prompts, load_config
from libs.openwebui import create_prompt_on_openwebui
import re
nest_asyncio.apply()
@ -91,20 +90,6 @@ def create_prompt_on_openrouter(prompt: str, topic: str = "random", model: str =
prompt = match.group(1)
logging.debug(prompt)
return prompt
except RateLimitError as e:
logging.warning(f"OpenRouter rate limit exceeded (429): {e}. Falling back to local OpenWebUI model.")
# Try to use OpenWebUI as fallback
openwebui_models = [m.strip() for m in user_config["openwebui"]["models"].split(",") if m.strip()] if "openwebui" in user_config and "models" in user_config["openwebui"] else []
if openwebui_models:
selected_model = random.choice(openwebui_models)
try:
return create_prompt_on_openwebui(user_content, topic, selected_model)
except Exception as e2:
logging.error(f"OpenWebUI fallback also failed: {e2}")
return "A colorful abstract composition" # Final fallback
else:
logging.error("No OpenWebUI models configured for fallback.")
return "A colorful abstract composition" # Final fallback
except Exception as e:
logging.error(f"Error generating prompt with OpenRouter: {e}")
return ""

View File

@ -38,7 +38,7 @@
"inputs": {
"filename_prefix": "ComfyUI",
"images": [
"42",
"8",
0
]
},
@ -201,19 +201,5 @@
"_meta": {
"title": "VAELoaderMultiGPU"
}
},
"42": {
"inputs": {
"offload_model": true,
"offload_cache": true,
"anything": [
"8",
0
]
},
"class_type": "VRAMCleanup",
"_meta": {
"title": "🎈VRAM-Cleanup"
}
}
}

View File

@ -98,7 +98,7 @@
"102": {
"inputs": {
"images": [
"129",
"98",
0
]
},
@ -143,19 +143,5 @@
"_meta": {
"title": "VAELoaderMultiGPU"
}
},
"129": {
"inputs": {
"offload_model": true,
"offload_cache": true,
"anything": [
"98",
0
]
},
"class_type": "VRAMCleanup",
"_meta": {
"title": "🎈VRAM-Cleanup"
}
}
}

View File

@ -52,12 +52,6 @@
"6": {
"inputs": {
"text": "A bustling cyberpunk street at night, filled with neon signs, rain-soaked pavement, and futuristic street vendors. High detail, vivid neon colors, and realistic reflections.",
"speak_and_recognation": {
"__value__": [
false,
true
]
},
"clip": [
"4",
1
@ -71,12 +65,6 @@
"7": {
"inputs": {
"text": "text, watermark, deformed Avoid flat colors, poor lighting, and artificial elements. No unrealistic elements, low resolution, or flat colors. Avoid generic objects, poor lighting, and inconsistent styles, blurry, low-quality, distorted faces, overexposed lighting, extra limbs, bad anatomy, low contrast",
"speak_and_recognation": {
"__value__": [
false,
true
]
},
"clip": [
"4",
1
@ -107,7 +95,7 @@
"inputs": {
"filename_prefix": "ComfyUI",
"images": [
"10",
"8",
0
]
},
@ -115,19 +103,5 @@
"_meta": {
"title": "Save Image"
}
},
"10": {
"inputs": {
"offload_model": true,
"offload_cache": true,
"anything": [
"8",
0
]
},
"class_type": "VRAMCleanup",
"_meta": {
"title": "🎈VRAM-Cleanup"
}
}
}