From a63668cc93acdb2ea8a04177cc912d62c61d9ffa Mon Sep 17 00:00:00 2001 From: Karl Date: Thu, 4 Sep 2025 08:58:24 +0100 Subject: [PATCH] No markdown formatting, no extra text. fix(openrouter): handle rate limit errors with OpenWebUI fallback When OpenRouter returns a 429 error due to rate limiting, the application now attempts to fall back to using an OpenWebUI model instead of returning a default prompt. This provides better resilience when external API limits are exceeded while maintaining functionality through local models. The changes include: - Adding RateLimitError import from openai - Implementing fallback logic in create_prompt_on_openrouter function - Using OpenWebUI as secondary source for prompts when rate limiting occurs - Proper error handling and logging for both primary and fallback scenarios This change improves the robustness of prompt generation by ensuring that users receive generated content even when external services are temporarily unavailable due to rate limits. The fallback mechanism prioritizes configured local models if available, with a final default prompt as backup. The implementation follows the existing pattern of using random selection from configured OpenWebUI models and includes comprehensive error handling for both primary and fallback operations. This ensures that all failure modes are gracefully handled while maintaining backward compatibility. --- libs/generic.py | 1 - libs/openrouter.py | 17 ++++++++++++++++- 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/libs/generic.py b/libs/generic.py index bdbc27f..b56a68c 100644 --- a/libs/generic.py +++ b/libs/generic.py @@ -240,7 +240,6 @@ def create_prompt_with_random_model(base_prompt: str, topic: str = "random"): logging.error(f"Error with OpenRouter: {e}") return "A colorful abstract composition" # Default fallback prompt - return "A colorful abstract composition" # Default fallback prompt user_config = load_config() output_folder = user_config["comfyui"]["output_dir"] \ No newline at end of file diff --git a/libs/openrouter.py b/libs/openrouter.py index dd2ed55..5de3aa7 100644 --- a/libs/openrouter.py +++ b/libs/openrouter.py @@ -1,8 +1,9 @@ import random import logging -from openai import OpenAI +from openai import OpenAI, RateLimitError import nest_asyncio from libs.generic import load_recent_prompts, load_config +from libs.openwebui import create_prompt_on_openwebui import re nest_asyncio.apply() @@ -90,6 +91,20 @@ def create_prompt_on_openrouter(prompt: str, topic: str = "random", model: str = prompt = match.group(1) logging.debug(prompt) return prompt + except RateLimitError as e: + logging.warning(f"OpenRouter rate limit exceeded (429): {e}. Falling back to local OpenWebUI model.") + # Try to use OpenWebUI as fallback + openwebui_models = [m.strip() for m in user_config["openwebui"]["models"].split(",") if m.strip()] if "openwebui" in user_config and "models" in user_config["openwebui"] else [] + if openwebui_models: + selected_model = random.choice(openwebui_models) + try: + return create_prompt_on_openwebui(user_content, topic, selected_model) + except Exception as e2: + logging.error(f"OpenWebUI fallback also failed: {e2}") + return "A colorful abstract composition" # Final fallback + else: + logging.error("No OpenWebUI models configured for fallback.") + return "A colorful abstract composition" # Final fallback except Exception as e: logging.error(f"Error generating prompt with OpenRouter: {e}") return "" \ No newline at end of file