feat(openrouter): add support for listing and using free OpenRouter models

Add a new configuration option `list_all_free_models` to enable fetching and displaying free models from OpenRouter. Enhance model loading functions to include free models when enabled, and implement fallback logic in prompt generation to try alternative models if the primary one fails. Update the UI to display free models in a separate optgroup.
This commit is contained in:
Karl 2025-10-30 17:15:59 +00:00
parent f15c83ebaa
commit bb4adbff2c
5 changed files with 130 additions and 28 deletions

View File

@ -149,8 +149,13 @@ def load_openrouter_models_from_config():
config = load_config()
if config["openrouter"].get("enabled", "False").lower() == "true":
models = config["openrouter"]["models"].split(",")
return sorted([model.strip() for model in models if model.strip()], key=str.lower)
return []
configured_models = sorted([model.strip() for model in models if model.strip()], key=str.lower)
free_models = []
if config["openrouter"].get("list_all_free_models", "False").lower() == "true":
from libs.openrouter import get_free_models
free_models = get_free_models()
return configured_models, free_models
return [], []
def load_openwebui_models_from_config():
config = load_config()
@ -163,17 +168,22 @@ def load_prompt_models_from_config():
"""Load and return a list of available prompt generation models (both OpenWebUI and OpenRouter)."""
config = load_config()
prompt_models = []
# Add OpenWebUI models if configured
if "openwebui" in config and "models" in config["openwebui"]:
openwebui_models = config["openwebui"]["models"].split(",")
prompt_models.extend([("openwebui", model.strip()) for model in openwebui_models if model.strip()])
# Add OpenRouter models if enabled and configured
if config["openrouter"].get("enabled", "False").lower() == "true" and "models" in config["openrouter"]:
openrouter_models = config["openrouter"]["models"].split(",")
prompt_models.extend([("openrouter", model.strip()) for model in openrouter_models if model.strip()])
# Add free models if flag is set
if config["openrouter"].get("list_all_free_models", "False").lower() == "true":
from libs.openrouter import get_free_models
free_models = get_free_models()
prompt_models.extend([("openrouter", model) for model in free_models])
return prompt_models

View File

@ -14,6 +14,23 @@ LOG_FILE = "./prompts_log.jsonl"
user_config = load_config()
output_folder = user_config["comfyui"]["output_dir"]
def get_free_models():
"""Fetch all free models from OpenRouter."""
if user_config["openrouter"].get("enabled", "False").lower() != "true":
return []
try:
client = OpenAI(
base_url="https://openrouter.ai/api/v1",
api_key=user_config["openrouter"]["api_key"],
)
all_models_response = client.models.list()
all_models = [m.id for m in all_models_response.data]
free_models = [m for m in all_models if "free" in m.lower()]
return sorted(free_models, key=str.lower)
except Exception as e:
logging.warning(f"Failed to fetch free models from OpenRouter: {e}")
return []
def create_prompt_on_openrouter(prompt: str, topic: str = "random", model: str = None) -> str:
"""Sends prompt to OpenRouter and returns the generated response."""
# Check if OpenRouter is enabled
@ -96,25 +113,50 @@ def create_prompt_on_openrouter(prompt: str, topic: str = "random", model: str =
api_key=user_config["openrouter"]["api_key"],
)
completion = client.chat.completions.create(
model=model,
messages=[
{
"role": "system",
"content": (
"You are a prompt generator for Stable Diffusion. "
"Generate a detailed and imaginative prompt with a strong visual theme. "
"Focus on lighting, atmosphere, and artistic style. "
"Keep the prompt concise, no extra commentary or formatting."
),
},
{
"role": "user",
"content": user_content,
},
]
system_content = (
"You are a prompt generator for Stable Diffusion. "
"Generate a detailed and imaginative prompt with a strong visual theme. "
"Focus on lighting, atmosphere, and artistic style. "
"Keep the prompt concise, no extra commentary or formatting."
)
# Try the specified model first
try:
completion = client.chat.completions.create(
model=model,
messages=[
{
"role": "system",
"content": system_content,
},
{
"role": "user",
"content": user_content,
},
]
)
except Exception as e:
# If system message fails (e.g., model doesn't support developer instructions),
# retry with instructions included in user message
if "developer instruction" in str(e).lower() or "system message" in str(e).lower():
logging.info(f"Model {model} doesn't support system messages, retrying with instructions in user message")
combined_content = f"{system_content}\n\n{user_content}"
completion = client.chat.completions.create(
model=model,
messages=[
{
"role": "user",
"content": combined_content,
},
]
)
else:
# If it's another error, try fallback models
logging.warning(f"Error with model {model}: {e}. Trying fallback models.")
raise e
# If we get here, the completion was successful
prompt = completion.choices[0].message.content.strip('"')
match = re.search(r'"([^"]+)"', prompt)
if not match:
@ -138,5 +180,45 @@ def create_prompt_on_openrouter(prompt: str, topic: str = "random", model: str =
logging.error("No OpenWebUI models configured for fallback.")
return "A colorful abstract composition" # Final fallback
except Exception as e:
logging.error(f"Error generating prompt with OpenRouter: {e}")
# If the specified model fails, try fallback models
logging.warning(f"Primary model {model} failed: {e}. Trying fallback models.")
# Get all available models for fallback
configured_models = [m.strip() for m in user_config["openrouter"]["models"].split(",") if m.strip()]
free_models = get_free_models()
# Combine configured and free models, excluding the failed one
all_models = configured_models + free_models
fallback_models = [m for m in all_models if m != model]
if not fallback_models:
logging.error("No fallback models available.")
return ""
# Try up to 3 fallback models
for fallback_model in fallback_models[:3]:
try:
logging.info(f"Trying fallback model: {fallback_model}")
completion = client.chat.completions.create(
model=fallback_model,
messages=[
{
"role": "user",
"content": f"{system_content}\n\n{user_content}",
},
]
)
prompt = completion.choices[0].message.content.strip('"')
match = re.search(r'"([^"]+)"', prompt)
if not match:
match = re.search(r":\s*\n*\s*(.+)", prompt)
if match:
prompt = match.group(1)
logging.info(f"Successfully generated prompt with fallback model: {fallback_model}")
return prompt
except Exception as fallback_e:
logging.warning(f"Fallback model {fallback_model} also failed: {fallback_e}")
continue
logging.error("All models failed, including fallbacks.")
return ""

View File

@ -38,8 +38,8 @@ def create():
# Load all models (SDXL, FLUX, and Qwen)
sdxl_models, flux_models, qwen_models = load_models_from_config()
openwebui_models = load_openwebui_models_from_config()
openrouter_models = load_openrouter_models_from_config()
openrouter_models, openrouter_free_models = load_openrouter_models_from_config()
queue_count = get_queue_count()
return render_template("create_image.html",
sdxx_models=sdxl_models,
@ -47,6 +47,7 @@ def create():
qwen_models=qwen_models,
openwebui_models=openwebui_models,
openrouter_models=openrouter_models,
openrouter_free_models=openrouter_free_models,
topics=load_topics_from_config(),
queue_count=queue_count)
@ -68,8 +69,8 @@ def create_image_page():
# Load all models (SDXL, FLUX, and Qwen)
sdxl_models, flux_models, qwen_models = load_models_from_config()
openwebui_models = load_openwebui_models_from_config()
openrouter_models = load_openrouter_models_from_config()
openrouter_models, openrouter_free_models = load_openrouter_models_from_config()
queue_count = get_queue_count()
return render_template("create_image.html",
sdxl_models=sdxl_models,
@ -77,6 +78,7 @@ def create_image_page():
qwen_models=qwen_models,
openwebui_models=openwebui_models,
openrouter_models=openrouter_models,
openrouter_free_models=openrouter_free_models,
topics=load_topics_from_config(),
queue_count=queue_count)

View File

@ -249,6 +249,13 @@
{% endfor %}
</optgroup>
{% endif %}
{% if openrouter_free_models %}
<optgroup label="OpenRouter Free">
{% for m in openrouter_free_models %}
<option value="openrouter:{{ m }}">{{ m }}</option>
{% endfor %}
</optgroup>
{% endif %}
</select>
</div>

View File

@ -29,4 +29,5 @@ models = llama3:latest,cogito:14b,gemma3:12b
[openrouter]
enabled = False
api_key =
models = mistralai/mistral-7b-instruct:free,google/gemma-7b-it:free,meta-llama/llama-3.1-8b-instruct:free
models = mistralai/mistral-7b-instruct:free,google/gemma-7b-it:free,meta-llama/llama-3.1-8b-instruct:free
list_all_free_models = False