mirror of
https://github.com/karl0ss/ai_image_frame_server.git
synced 2025-09-06 23:03:15 +01:00
Compare commits
7 Commits
Author | SHA1 | Date | |
---|---|---|---|
086695d898 | |||
a63668cc93 | |||
06d3a64bb9 | |||
d7c25373bd | |||
006c88b084 | |||
e7df200f8c | |||
506dece377 |
@ -1,5 +1,5 @@
|
|||||||
[tool.bumpversion]
|
[tool.bumpversion]
|
||||||
current_version = "0.3.9"
|
current_version = "0.3.12"
|
||||||
parse = "(?P<major>\\d+)\\.(?P<minor>\\d+)\\.(?P<patch>\\d+)"
|
parse = "(?P<major>\\d+)\\.(?P<minor>\\d+)\\.(?P<patch>\\d+)"
|
||||||
serialize = ["{major}.{minor}.{patch}"]
|
serialize = ["{major}.{minor}.{patch}"]
|
||||||
replace = "{new_version}"
|
replace = "{new_version}"
|
||||||
|
1
.gitignore
vendored
1
.gitignore
vendored
@ -12,3 +12,4 @@ test.py
|
|||||||
.vscode/launch.json
|
.vscode/launch.json
|
||||||
favourites.json
|
favourites.json
|
||||||
.vscode/launch.json
|
.vscode/launch.json
|
||||||
|
venv/*
|
@ -4,7 +4,7 @@ FROM python:3.11-slim
|
|||||||
# Set the working directory in the container
|
# Set the working directory in the container
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
# Set version label
|
# Set version label
|
||||||
ARG VERSION="0.3.9"
|
ARG VERSION="0.3.12"
|
||||||
LABEL version=$VERSION
|
LABEL version=$VERSION
|
||||||
|
|
||||||
# Copy project files into the container
|
# Copy project files into the container
|
||||||
|
@ -32,9 +32,17 @@ def get_available_models() -> list:
|
|||||||
response = requests.get(url)
|
response = requests.get(url)
|
||||||
if response.status_code == 200:
|
if response.status_code == 200:
|
||||||
data = response.json()
|
data = response.json()
|
||||||
general = data.get("CheckpointLoaderSimple", {}).get("input", {}).get("required", {}).get("ckpt_name", [])[0]
|
# Get SDXL models from CheckpointLoaderSimple
|
||||||
flux = data.get("UnetLoaderGGUF", {}).get("input", {}).get("required", {}).get("unet_name", [])[0]
|
general = data.get("CheckpointLoaderSimple", {}).get("input", {}).get("required", {}).get("ckpt_name", [[]])[0]
|
||||||
return general + flux
|
# Get FLUX models from UnetLoaderGGUF
|
||||||
|
flux = data.get("UnetLoaderGGUF", {}).get("input", {}).get("required", {}).get("unet_name", [[]])[0]
|
||||||
|
# Combine both lists, handling cases where one might be missing
|
||||||
|
all_models = []
|
||||||
|
if isinstance(general, list):
|
||||||
|
all_models.extend(general)
|
||||||
|
if isinstance(flux, list):
|
||||||
|
all_models.extend(flux)
|
||||||
|
return all_models
|
||||||
else:
|
else:
|
||||||
print(f"Failed to fetch models: {response.status_code}")
|
print(f"Failed to fetch models: {response.status_code}")
|
||||||
return []
|
return []
|
||||||
@ -125,9 +133,25 @@ def select_model(model: str) -> tuple[str, str]:
|
|||||||
use_qwen = json.loads(user_config["comfyui"].get("Qwen", "false").lower())
|
use_qwen = json.loads(user_config["comfyui"].get("Qwen", "false").lower())
|
||||||
|
|
||||||
if model == "Random Image Model":
|
if model == "Random Image Model":
|
||||||
selected_workflow = "FLUX" if (use_flux and (only_flux or random.choice([True, False]))) else "SDXL"
|
# Create a list of available workflows based on configuration
|
||||||
|
available_workflows = []
|
||||||
|
if not only_flux:
|
||||||
|
available_workflows.append("SDXL")
|
||||||
|
if use_flux:
|
||||||
|
available_workflows.append("FLUX")
|
||||||
|
if use_qwen:
|
||||||
|
available_workflows.append("Qwen")
|
||||||
|
|
||||||
|
# If no workflows are available, default to SDXL
|
||||||
|
if not available_workflows:
|
||||||
|
available_workflows.append("SDXL")
|
||||||
|
|
||||||
|
# Randomly select a workflow
|
||||||
|
selected_workflow = random.choice(available_workflows)
|
||||||
elif "flux" in model.lower():
|
elif "flux" in model.lower():
|
||||||
selected_workflow = "FLUX"
|
selected_workflow = "FLUX"
|
||||||
|
elif "qwen" in model.lower():
|
||||||
|
selected_workflow = "Qwen"
|
||||||
else:
|
else:
|
||||||
selected_workflow = "SDXL"
|
selected_workflow = "SDXL"
|
||||||
|
|
||||||
@ -139,6 +163,13 @@ def select_model(model: str) -> tuple[str, str]:
|
|||||||
else: # SDXL
|
else: # SDXL
|
||||||
available_model_list = user_config["comfyui"]["models"].split(",")
|
available_model_list = user_config["comfyui"]["models"].split(",")
|
||||||
valid_models = list(set(get_available_models()) & set(available_model_list))
|
valid_models = list(set(get_available_models()) & set(available_model_list))
|
||||||
|
# If no valid models found, fall back to configured models
|
||||||
|
if not valid_models:
|
||||||
|
valid_models = available_model_list
|
||||||
|
# Ensure we have at least one model to choose from
|
||||||
|
if not valid_models:
|
||||||
|
# Fallback to a default model
|
||||||
|
valid_models = ["zavychromaxl_v100.safetensors"]
|
||||||
model = random.choice(valid_models)
|
model = random.choice(valid_models)
|
||||||
|
|
||||||
return selected_workflow, model
|
return selected_workflow, model
|
||||||
|
@ -240,7 +240,6 @@ def create_prompt_with_random_model(base_prompt: str, topic: str = "random"):
|
|||||||
logging.error(f"Error with OpenRouter: {e}")
|
logging.error(f"Error with OpenRouter: {e}")
|
||||||
return "A colorful abstract composition" # Default fallback prompt
|
return "A colorful abstract composition" # Default fallback prompt
|
||||||
|
|
||||||
return "A colorful abstract composition" # Default fallback prompt
|
|
||||||
|
|
||||||
user_config = load_config()
|
user_config = load_config()
|
||||||
output_folder = user_config["comfyui"]["output_dir"]
|
output_folder = user_config["comfyui"]["output_dir"]
|
@ -1,8 +1,9 @@
|
|||||||
import random
|
import random
|
||||||
import logging
|
import logging
|
||||||
from openai import OpenAI
|
from openai import OpenAI, RateLimitError
|
||||||
import nest_asyncio
|
import nest_asyncio
|
||||||
from libs.generic import load_recent_prompts, load_config
|
from libs.generic import load_recent_prompts, load_config
|
||||||
|
from libs.openwebui import create_prompt_on_openwebui
|
||||||
import re
|
import re
|
||||||
nest_asyncio.apply()
|
nest_asyncio.apply()
|
||||||
|
|
||||||
@ -90,6 +91,20 @@ def create_prompt_on_openrouter(prompt: str, topic: str = "random", model: str =
|
|||||||
prompt = match.group(1)
|
prompt = match.group(1)
|
||||||
logging.debug(prompt)
|
logging.debug(prompt)
|
||||||
return prompt
|
return prompt
|
||||||
|
except RateLimitError as e:
|
||||||
|
logging.warning(f"OpenRouter rate limit exceeded (429): {e}. Falling back to local OpenWebUI model.")
|
||||||
|
# Try to use OpenWebUI as fallback
|
||||||
|
openwebui_models = [m.strip() for m in user_config["openwebui"]["models"].split(",") if m.strip()] if "openwebui" in user_config and "models" in user_config["openwebui"] else []
|
||||||
|
if openwebui_models:
|
||||||
|
selected_model = random.choice(openwebui_models)
|
||||||
|
try:
|
||||||
|
return create_prompt_on_openwebui(user_content, topic, selected_model)
|
||||||
|
except Exception as e2:
|
||||||
|
logging.error(f"OpenWebUI fallback also failed: {e2}")
|
||||||
|
return "A colorful abstract composition" # Final fallback
|
||||||
|
else:
|
||||||
|
logging.error("No OpenWebUI models configured for fallback.")
|
||||||
|
return "A colorful abstract composition" # Final fallback
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logging.error(f"Error generating prompt with OpenRouter: {e}")
|
logging.error(f"Error generating prompt with OpenRouter: {e}")
|
||||||
return ""
|
return ""
|
@ -38,7 +38,7 @@
|
|||||||
"inputs": {
|
"inputs": {
|
||||||
"filename_prefix": "ComfyUI",
|
"filename_prefix": "ComfyUI",
|
||||||
"images": [
|
"images": [
|
||||||
"8",
|
"42",
|
||||||
0
|
0
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@ -201,5 +201,19 @@
|
|||||||
"_meta": {
|
"_meta": {
|
||||||
"title": "VAELoaderMultiGPU"
|
"title": "VAELoaderMultiGPU"
|
||||||
}
|
}
|
||||||
|
},
|
||||||
|
"42": {
|
||||||
|
"inputs": {
|
||||||
|
"offload_model": true,
|
||||||
|
"offload_cache": true,
|
||||||
|
"anything": [
|
||||||
|
"8",
|
||||||
|
0
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"class_type": "VRAMCleanup",
|
||||||
|
"_meta": {
|
||||||
|
"title": "🎈VRAM-Cleanup"
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
@ -98,7 +98,7 @@
|
|||||||
"102": {
|
"102": {
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"images": [
|
"images": [
|
||||||
"98",
|
"129",
|
||||||
0
|
0
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@ -143,5 +143,19 @@
|
|||||||
"_meta": {
|
"_meta": {
|
||||||
"title": "VAELoaderMultiGPU"
|
"title": "VAELoaderMultiGPU"
|
||||||
}
|
}
|
||||||
|
},
|
||||||
|
"129": {
|
||||||
|
"inputs": {
|
||||||
|
"offload_model": true,
|
||||||
|
"offload_cache": true,
|
||||||
|
"anything": [
|
||||||
|
"98",
|
||||||
|
0
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"class_type": "VRAMCleanup",
|
||||||
|
"_meta": {
|
||||||
|
"title": "🎈VRAM-Cleanup"
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
@ -52,6 +52,12 @@
|
|||||||
"6": {
|
"6": {
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"text": "A bustling cyberpunk street at night, filled with neon signs, rain-soaked pavement, and futuristic street vendors. High detail, vivid neon colors, and realistic reflections.",
|
"text": "A bustling cyberpunk street at night, filled with neon signs, rain-soaked pavement, and futuristic street vendors. High detail, vivid neon colors, and realistic reflections.",
|
||||||
|
"speak_and_recognation": {
|
||||||
|
"__value__": [
|
||||||
|
false,
|
||||||
|
true
|
||||||
|
]
|
||||||
|
},
|
||||||
"clip": [
|
"clip": [
|
||||||
"4",
|
"4",
|
||||||
1
|
1
|
||||||
@ -65,6 +71,12 @@
|
|||||||
"7": {
|
"7": {
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"text": "text, watermark, deformed Avoid flat colors, poor lighting, and artificial elements. No unrealistic elements, low resolution, or flat colors. Avoid generic objects, poor lighting, and inconsistent styles, blurry, low-quality, distorted faces, overexposed lighting, extra limbs, bad anatomy, low contrast",
|
"text": "text, watermark, deformed Avoid flat colors, poor lighting, and artificial elements. No unrealistic elements, low resolution, or flat colors. Avoid generic objects, poor lighting, and inconsistent styles, blurry, low-quality, distorted faces, overexposed lighting, extra limbs, bad anatomy, low contrast",
|
||||||
|
"speak_and_recognation": {
|
||||||
|
"__value__": [
|
||||||
|
false,
|
||||||
|
true
|
||||||
|
]
|
||||||
|
},
|
||||||
"clip": [
|
"clip": [
|
||||||
"4",
|
"4",
|
||||||
1
|
1
|
||||||
@ -95,7 +107,7 @@
|
|||||||
"inputs": {
|
"inputs": {
|
||||||
"filename_prefix": "ComfyUI",
|
"filename_prefix": "ComfyUI",
|
||||||
"images": [
|
"images": [
|
||||||
"8",
|
"10",
|
||||||
0
|
0
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@ -103,5 +115,19 @@
|
|||||||
"_meta": {
|
"_meta": {
|
||||||
"title": "Save Image"
|
"title": "Save Image"
|
||||||
}
|
}
|
||||||
|
},
|
||||||
|
"10": {
|
||||||
|
"inputs": {
|
||||||
|
"offload_model": true,
|
||||||
|
"offload_cache": true,
|
||||||
|
"anything": [
|
||||||
|
"8",
|
||||||
|
0
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"class_type": "VRAMCleanup",
|
||||||
|
"_meta": {
|
||||||
|
"title": "🎈VRAM-Cleanup"
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
Loading…
x
Reference in New Issue
Block a user