mirror of
https://github.com/karl0ss/ai_image_frame_server.git
synced 2025-08-05 09:58:28 +01:00
initial openrouter support
This commit is contained in:
parent
d80cf9473a
commit
76e33ea523
@ -145,7 +145,8 @@ def create_image(prompt: str | None = None, model: str = "Random") -> None:
|
|||||||
"""Generate an image with a chosen workflow (Random, FLUX*, or SDXL*)."""
|
"""Generate an image with a chosen workflow (Random, FLUX*, or SDXL*)."""
|
||||||
|
|
||||||
if prompt is None:
|
if prompt is None:
|
||||||
prompt = create_prompt_on_openwebui(user_config["comfyui"]["prompt"])
|
logging.error("No prompt provided.")
|
||||||
|
return
|
||||||
|
|
||||||
if not prompt:
|
if not prompt:
|
||||||
logging.error("No prompt generated.")
|
logging.error("No prompt generated.")
|
||||||
|
@ -124,5 +124,51 @@ def load_topics_from_config():
|
|||||||
sorted_topics = sorted(topics, key=str.lower)
|
sorted_topics = sorted(topics, key=str.lower)
|
||||||
return sorted_topics
|
return sorted_topics
|
||||||
|
|
||||||
|
def load_openrouter_models_from_config():
|
||||||
|
config = load_config()
|
||||||
|
if config["openrouter"].get("enabled", "False").lower() == "true":
|
||||||
|
models = config["openrouter"]["models"].split(",")
|
||||||
|
return sorted([model.strip() for model in models if model.strip()], key=str.lower)
|
||||||
|
return []
|
||||||
|
def load_prompt_models_from_config():
|
||||||
|
"""Load and return a list of available prompt generation models (both OpenWebUI and OpenRouter)."""
|
||||||
|
config = load_config()
|
||||||
|
prompt_models = []
|
||||||
|
|
||||||
|
# Add OpenWebUI models if configured
|
||||||
|
if "openwebui" in config and "models" in config["openwebui"]:
|
||||||
|
openwebui_models = config["openwebui"]["models"].split(",")
|
||||||
|
prompt_models.extend([("openwebui", model.strip()) for model in openwebui_models if model.strip()])
|
||||||
|
|
||||||
|
# Add OpenRouter models if enabled and configured
|
||||||
|
if config["openrouter"].get("enabled", "False").lower() == "true" and "models" in config["openrouter"]:
|
||||||
|
openrouter_models = config["openrouter"]["models"].split(",")
|
||||||
|
prompt_models.extend([("openrouter", model.strip()) for model in openrouter_models if model.strip()])
|
||||||
|
|
||||||
|
return prompt_models
|
||||||
|
|
||||||
|
|
||||||
|
def create_prompt_with_random_model(base_prompt: str, topic: str = "random"):
|
||||||
|
"""Create a prompt using a randomly selected model from OpenWebUI or OpenRouter."""
|
||||||
|
prompt_models = load_prompt_models_from_config()
|
||||||
|
|
||||||
|
if not prompt_models:
|
||||||
|
logging.warning("No prompt generation models configured.")
|
||||||
|
return None
|
||||||
|
|
||||||
|
# Randomly select a model
|
||||||
|
service, model = random.choice(prompt_models)
|
||||||
|
|
||||||
|
if service == "openwebui":
|
||||||
|
# Import here to avoid circular imports
|
||||||
|
from libs.ollama import create_prompt_on_openwebui
|
||||||
|
return create_prompt_on_openwebui(base_prompt, topic)
|
||||||
|
elif service == "openrouter":
|
||||||
|
# Import here to avoid circular imports
|
||||||
|
from libs.openrouter import create_prompt_on_openrouter
|
||||||
|
return create_prompt_on_openrouter(base_prompt, topic)
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
user_config = load_config()
|
user_config = load_config()
|
||||||
output_folder = user_config["comfyui"]["output_dir"]
|
output_folder = user_config["comfyui"]["output_dir"]
|
95
libs/openrouter.py
Normal file
95
libs/openrouter.py
Normal file
@ -0,0 +1,95 @@
|
|||||||
|
import random
|
||||||
|
import logging
|
||||||
|
from openai import OpenAI
|
||||||
|
import nest_asyncio
|
||||||
|
from libs.generic import load_recent_prompts, load_config
|
||||||
|
import re
|
||||||
|
nest_asyncio.apply()
|
||||||
|
|
||||||
|
logging.basicConfig(level=logging.INFO)
|
||||||
|
|
||||||
|
LOG_FILE = "./prompts_log.jsonl"
|
||||||
|
|
||||||
|
user_config = load_config()
|
||||||
|
output_folder = user_config["comfyui"]["output_dir"]
|
||||||
|
|
||||||
|
def create_prompt_on_openrouter(prompt: str, topic: str = "random", model: str = None) -> str:
|
||||||
|
"""Sends prompt to OpenRouter and returns the generated response."""
|
||||||
|
# Check if OpenRouter is enabled
|
||||||
|
if user_config["openrouter"].get("enabled", "False").lower() != "true":
|
||||||
|
logging.warning("OpenRouter is not enabled in the configuration.")
|
||||||
|
return ""
|
||||||
|
|
||||||
|
topic_instruction = ""
|
||||||
|
selected_topic = ""
|
||||||
|
# Unique list of recent prompts
|
||||||
|
recent_prompts = list(set(load_recent_prompts()))
|
||||||
|
if topic == "random":
|
||||||
|
topics = [t.strip() for t in user_config["comfyui"]["topics"].split(",") if t.strip()]
|
||||||
|
selected_topic = random.choice(topics) if topics else ""
|
||||||
|
elif topic != "":
|
||||||
|
selected_topic = topic
|
||||||
|
else:
|
||||||
|
# Decide on whether to include a topic (e.g., 30% chance to include)
|
||||||
|
topics = [t.strip() for t in user_config["comfyui"]["topics"].split(",") if t.strip()]
|
||||||
|
if random.random() < 0.3 and topics:
|
||||||
|
selected_topic = random.choice(topics)
|
||||||
|
if selected_topic != "":
|
||||||
|
topic_instruction = f" Incorporate the theme of '{selected_topic}' into the new prompt."
|
||||||
|
|
||||||
|
user_content = (
|
||||||
|
"Can you generate me a really random image idea, Do not exceed 10 words. Use clear language, not poetic metaphors."
|
||||||
|
+ topic_instruction
|
||||||
|
+ "Avoid prompts similar to the following:"
|
||||||
|
+ "\n".join(f"{i+1}. {p}" for i, p in enumerate(recent_prompts))
|
||||||
|
)
|
||||||
|
|
||||||
|
# Use the specified model or select a random model from the configured OpenRouter models
|
||||||
|
if model:
|
||||||
|
# Use the specified model
|
||||||
|
model = model
|
||||||
|
else:
|
||||||
|
# Select a random model from the configured OpenRouter models
|
||||||
|
models = [m.strip() for m in user_config["openrouter"]["models"].split(",") if m.strip()]
|
||||||
|
if not models:
|
||||||
|
logging.error("No OpenRouter models configured.")
|
||||||
|
return ""
|
||||||
|
|
||||||
|
model = random.choice(models)
|
||||||
|
|
||||||
|
try:
|
||||||
|
client = OpenAI(
|
||||||
|
base_url="https://openrouter.ai/api/v1",
|
||||||
|
api_key=user_config["openrouter"]["api_key"],
|
||||||
|
)
|
||||||
|
|
||||||
|
completion = client.chat.completions.create(
|
||||||
|
model=model,
|
||||||
|
messages=[
|
||||||
|
{
|
||||||
|
"role": "system",
|
||||||
|
"content": (
|
||||||
|
"You are a prompt generator for Stable Diffusion. "
|
||||||
|
"Generate a detailed and imaginative prompt with a strong visual theme. "
|
||||||
|
"Focus on lighting, atmosphere, and artistic style. "
|
||||||
|
"Keep the prompt concise, no extra commentary or formatting."
|
||||||
|
),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": user_content,
|
||||||
|
},
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
prompt = completion.choices[0].message.content.strip('"')
|
||||||
|
match = re.search(r'"([^"]+)"', prompt)
|
||||||
|
if not match:
|
||||||
|
match = re.search(r":\s*\n*\s*(.+)", prompt)
|
||||||
|
if match:
|
||||||
|
prompt = match.group(1)
|
||||||
|
logging.debug(prompt)
|
||||||
|
return prompt
|
||||||
|
except Exception as e:
|
||||||
|
logging.error(f"Error generating prompt with OpenRouter: {e}")
|
||||||
|
return ""
|
BIN
requirements.txt
BIN
requirements.txt
Binary file not shown.
@ -2,7 +2,7 @@ from flask import Blueprint, request, render_template, redirect, url_for, sessio
|
|||||||
import threading
|
import threading
|
||||||
from libs.comfyui import create_image, select_model, get_available_models
|
from libs.comfyui import create_image, select_model, get_available_models
|
||||||
from libs.ollama import create_prompt_on_openwebui
|
from libs.ollama import create_prompt_on_openwebui
|
||||||
from libs.generic import load_models_from_config, load_topics_from_config
|
from libs.generic import load_models_from_config, load_topics_from_config, load_openrouter_models_from_config, create_prompt_with_random_model
|
||||||
import os
|
import os
|
||||||
|
|
||||||
bp = Blueprint("create_routes", __name__)
|
bp = Blueprint("create_routes", __name__)
|
||||||
@ -16,12 +16,18 @@ def create():
|
|||||||
topic = request.form.get("topic")
|
topic = request.form.get("topic")
|
||||||
|
|
||||||
if not prompt:
|
if not prompt:
|
||||||
prompt = create_prompt_on_openwebui(user_config["comfyui"]["prompt"], topic)
|
prompt = create_prompt_with_random_model(user_config["comfyui"]["prompt"], topic)
|
||||||
|
|
||||||
threading.Thread(target=lambda: create_image(prompt, model)).start()
|
threading.Thread(target=lambda: create_image(prompt, model)).start()
|
||||||
return redirect(url_for("create_routes.image_queued", prompt=prompt, model=model.split(".")[0]))
|
return redirect(url_for("create_routes.image_queued", prompt=prompt, model=model.split(".")[0]))
|
||||||
|
|
||||||
return render_template("create_image.html", models=load_models_from_config()[0]+load_models_from_config()[1], topics=load_topics_from_config())
|
# Load all models (SDXL and FLUX only)
|
||||||
|
sdxl_models, flux_models = load_models_from_config()
|
||||||
|
|
||||||
|
return render_template("create_image.html",
|
||||||
|
sdxl_models=sdxl_models,
|
||||||
|
flux_models=flux_models,
|
||||||
|
topics=load_topics_from_config())
|
||||||
|
|
||||||
@bp.route("/image_queued")
|
@bp.route("/image_queued")
|
||||||
def image_queued():
|
def image_queued():
|
||||||
@ -33,7 +39,14 @@ def image_queued():
|
|||||||
def create_image_page():
|
def create_image_page():
|
||||||
if user_config["frame"]["create_requires_auth"] == "True" and not session.get("authenticated"):
|
if user_config["frame"]["create_requires_auth"] == "True" and not session.get("authenticated"):
|
||||||
return redirect(url_for("auth_routes.login", next=request.path))
|
return redirect(url_for("auth_routes.login", next=request.path))
|
||||||
return render_template("create_image.html", models=load_models_from_config()[0]+load_models_from_config()[1], topics=load_topics_from_config())
|
|
||||||
|
# Load all models (SDXL and FLUX only)
|
||||||
|
sdxl_models, flux_models = load_models_from_config()
|
||||||
|
|
||||||
|
return render_template("create_image.html",
|
||||||
|
sdxl_models=sdxl_models,
|
||||||
|
flux_models=flux_models,
|
||||||
|
topics=load_topics_from_config())
|
||||||
|
|
||||||
|
|
||||||
def init_app(config):
|
def init_app(config):
|
||||||
|
@ -116,16 +116,20 @@
|
|||||||
|
|
||||||
<select id="model-select">
|
<select id="model-select">
|
||||||
<option value="" selected>Random</option>
|
<option value="" selected>Random</option>
|
||||||
|
{% if flux_models %}
|
||||||
<optgroup label="FLUX">
|
<optgroup label="FLUX">
|
||||||
{% for m in models if 'flux' in m|lower %}
|
{% for m in flux_models %}
|
||||||
<option value="{{ m }}">{{ m.rsplit('.', 1)[0] }}</option>
|
<option value="{{ m }}">{{ m.rsplit('.', 1)[0] if '.' in m else m }}</option>
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
</optgroup>
|
</optgroup>
|
||||||
|
{% endif %}
|
||||||
|
{% if sdxl_models %}
|
||||||
<optgroup label="SDXL">
|
<optgroup label="SDXL">
|
||||||
{% for m in models if 'flux' not in m|lower %}
|
{% for m in sdxl_models %}
|
||||||
<option value="{{ m }}">{{ m.rsplit('.', 1)[0] }}</option>
|
<option value="{{ m }}">{{ m.rsplit('.', 1)[0] if '.' in m else m }}</option>
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
</optgroup>
|
</optgroup>
|
||||||
|
{% endif %}
|
||||||
</select>
|
</select>
|
||||||
|
|
||||||
<select id="topic-select">
|
<select id="topic-select">
|
||||||
|
@ -25,3 +25,8 @@ models = flux1-dev-Q4_0.gguf,flux1-schnell-Q4_0.gguf
|
|||||||
base_url = https://openwebui
|
base_url = https://openwebui
|
||||||
api_key = sk-
|
api_key = sk-
|
||||||
models = llama3:latest,cogito:14b,gemma3:12b
|
models = llama3:latest,cogito:14b,gemma3:12b
|
||||||
|
|
||||||
|
[openrouter]
|
||||||
|
enabled = False
|
||||||
|
api_key =
|
||||||
|
models = mistralai/mistral-7b-instruct:free,google/gemma-7b-it:free,meta-llama/llama-3.1-8b-instruct:free
|
Loading…
x
Reference in New Issue
Block a user