diff --git a/libs/comfyui.py b/libs/comfyui.py
index 9c05052..52aa6c0 100644
--- a/libs/comfyui.py
+++ b/libs/comfyui.py
@@ -145,7 +145,8 @@ def create_image(prompt: str | None = None, model: str = "Random") -> None:
"""Generate an image with a chosen workflow (Random, FLUX*, or SDXL*)."""
if prompt is None:
- prompt = create_prompt_on_openwebui(user_config["comfyui"]["prompt"])
+ logging.error("No prompt provided.")
+ return
if not prompt:
logging.error("No prompt generated.")
@@ -153,7 +154,7 @@ def create_image(prompt: str | None = None, model: str = "Random") -> None:
save_prompt(prompt)
selected_workflow, model = select_model(model)
-
+
if selected_workflow == "FLUX":
generate_image(
file_name="image",
diff --git a/libs/generic.py b/libs/generic.py
index 299528c..ca7a0e8 100644
--- a/libs/generic.py
+++ b/libs/generic.py
@@ -124,5 +124,51 @@ def load_topics_from_config():
sorted_topics = sorted(topics, key=str.lower)
return sorted_topics
+def load_openrouter_models_from_config():
+ config = load_config()
+ if config["openrouter"].get("enabled", "False").lower() == "true":
+ models = config["openrouter"]["models"].split(",")
+ return sorted([model.strip() for model in models if model.strip()], key=str.lower)
+ return []
+def load_prompt_models_from_config():
+ """Load and return a list of available prompt generation models (both OpenWebUI and OpenRouter)."""
+ config = load_config()
+ prompt_models = []
+
+ # Add OpenWebUI models if configured
+ if "openwebui" in config and "models" in config["openwebui"]:
+ openwebui_models = config["openwebui"]["models"].split(",")
+ prompt_models.extend([("openwebui", model.strip()) for model in openwebui_models if model.strip()])
+
+ # Add OpenRouter models if enabled and configured
+ if config["openrouter"].get("enabled", "False").lower() == "true" and "models" in config["openrouter"]:
+ openrouter_models = config["openrouter"]["models"].split(",")
+ prompt_models.extend([("openrouter", model.strip()) for model in openrouter_models if model.strip()])
+
+ return prompt_models
+
+
+def create_prompt_with_random_model(base_prompt: str, topic: str = "random"):
+ """Create a prompt using a randomly selected model from OpenWebUI or OpenRouter."""
+ prompt_models = load_prompt_models_from_config()
+
+ if not prompt_models:
+ logging.warning("No prompt generation models configured.")
+ return None
+
+ # Randomly select a model
+ service, model = random.choice(prompt_models)
+
+ if service == "openwebui":
+ # Import here to avoid circular imports
+ from libs.ollama import create_prompt_on_openwebui
+ return create_prompt_on_openwebui(base_prompt, topic)
+ elif service == "openrouter":
+ # Import here to avoid circular imports
+ from libs.openrouter import create_prompt_on_openrouter
+ return create_prompt_on_openrouter(base_prompt, topic)
+
+ return None
+
user_config = load_config()
output_folder = user_config["comfyui"]["output_dir"]
\ No newline at end of file
diff --git a/libs/openrouter.py b/libs/openrouter.py
new file mode 100644
index 0000000..dd2ed55
--- /dev/null
+++ b/libs/openrouter.py
@@ -0,0 +1,95 @@
+import random
+import logging
+from openai import OpenAI
+import nest_asyncio
+from libs.generic import load_recent_prompts, load_config
+import re
+nest_asyncio.apply()
+
+logging.basicConfig(level=logging.INFO)
+
+LOG_FILE = "./prompts_log.jsonl"
+
+user_config = load_config()
+output_folder = user_config["comfyui"]["output_dir"]
+
+def create_prompt_on_openrouter(prompt: str, topic: str = "random", model: str = None) -> str:
+ """Sends prompt to OpenRouter and returns the generated response."""
+ # Check if OpenRouter is enabled
+ if user_config["openrouter"].get("enabled", "False").lower() != "true":
+ logging.warning("OpenRouter is not enabled in the configuration.")
+ return ""
+
+ topic_instruction = ""
+ selected_topic = ""
+ # Unique list of recent prompts
+ recent_prompts = list(set(load_recent_prompts()))
+ if topic == "random":
+ topics = [t.strip() for t in user_config["comfyui"]["topics"].split(",") if t.strip()]
+ selected_topic = random.choice(topics) if topics else ""
+ elif topic != "":
+ selected_topic = topic
+ else:
+ # Decide on whether to include a topic (e.g., 30% chance to include)
+ topics = [t.strip() for t in user_config["comfyui"]["topics"].split(",") if t.strip()]
+ if random.random() < 0.3 and topics:
+ selected_topic = random.choice(topics)
+ if selected_topic != "":
+ topic_instruction = f" Incorporate the theme of '{selected_topic}' into the new prompt."
+
+ user_content = (
+ "Can you generate me a really random image idea, Do not exceed 10 words. Use clear language, not poetic metaphors."
+ + topic_instruction
+ + "Avoid prompts similar to the following:"
+ + "\n".join(f"{i+1}. {p}" for i, p in enumerate(recent_prompts))
+ )
+
+ # Use the specified model or select a random model from the configured OpenRouter models
+ if model:
+ # Use the specified model
+ model = model
+ else:
+ # Select a random model from the configured OpenRouter models
+ models = [m.strip() for m in user_config["openrouter"]["models"].split(",") if m.strip()]
+ if not models:
+ logging.error("No OpenRouter models configured.")
+ return ""
+
+ model = random.choice(models)
+
+ try:
+ client = OpenAI(
+ base_url="https://openrouter.ai/api/v1",
+ api_key=user_config["openrouter"]["api_key"],
+ )
+
+ completion = client.chat.completions.create(
+ model=model,
+ messages=[
+ {
+ "role": "system",
+ "content": (
+ "You are a prompt generator for Stable Diffusion. "
+ "Generate a detailed and imaginative prompt with a strong visual theme. "
+ "Focus on lighting, atmosphere, and artistic style. "
+ "Keep the prompt concise, no extra commentary or formatting."
+ ),
+ },
+ {
+ "role": "user",
+ "content": user_content,
+ },
+ ]
+ )
+
+ prompt = completion.choices[0].message.content.strip('"')
+ match = re.search(r'"([^"]+)"', prompt)
+ if not match:
+ match = re.search(r":\s*\n*\s*(.+)", prompt)
+ if match:
+ prompt = match.group(1)
+ logging.debug(prompt)
+ return prompt
+ except Exception as e:
+ logging.error(f"Error generating prompt with OpenRouter: {e}")
+ return ""
\ No newline at end of file
diff --git a/requirements.txt b/requirements.txt
index ffcaa4e..8d8f8f8 100644
Binary files a/requirements.txt and b/requirements.txt differ
diff --git a/routes/create_routes.py b/routes/create_routes.py
index b92476d..5bdaa51 100644
--- a/routes/create_routes.py
+++ b/routes/create_routes.py
@@ -2,7 +2,7 @@ from flask import Blueprint, request, render_template, redirect, url_for, sessio
import threading
from libs.comfyui import create_image, select_model, get_available_models
from libs.ollama import create_prompt_on_openwebui
-from libs.generic import load_models_from_config, load_topics_from_config
+from libs.generic import load_models_from_config, load_topics_from_config, load_openrouter_models_from_config, create_prompt_with_random_model
import os
bp = Blueprint("create_routes", __name__)
@@ -16,12 +16,18 @@ def create():
topic = request.form.get("topic")
if not prompt:
- prompt = create_prompt_on_openwebui(user_config["comfyui"]["prompt"], topic)
+ prompt = create_prompt_with_random_model(user_config["comfyui"]["prompt"], topic)
threading.Thread(target=lambda: create_image(prompt, model)).start()
return redirect(url_for("create_routes.image_queued", prompt=prompt, model=model.split(".")[0]))
- return render_template("create_image.html", models=load_models_from_config()[0]+load_models_from_config()[1], topics=load_topics_from_config())
+ # Load all models (SDXL and FLUX only)
+ sdxl_models, flux_models = load_models_from_config()
+
+ return render_template("create_image.html",
+ sdxl_models=sdxl_models,
+ flux_models=flux_models,
+ topics=load_topics_from_config())
@bp.route("/image_queued")
def image_queued():
@@ -33,7 +39,14 @@ def image_queued():
def create_image_page():
if user_config["frame"]["create_requires_auth"] == "True" and not session.get("authenticated"):
return redirect(url_for("auth_routes.login", next=request.path))
- return render_template("create_image.html", models=load_models_from_config()[0]+load_models_from_config()[1], topics=load_topics_from_config())
+
+ # Load all models (SDXL and FLUX only)
+ sdxl_models, flux_models = load_models_from_config()
+
+ return render_template("create_image.html",
+ sdxl_models=sdxl_models,
+ flux_models=flux_models,
+ topics=load_topics_from_config())
def init_app(config):
diff --git a/templates/create_image.html b/templates/create_image.html
index f779476..6c39375 100644
--- a/templates/create_image.html
+++ b/templates/create_image.html
@@ -116,16 +116,20 @@