mirror of
https://github.com/karl0ss/ai_image_frame_server.git
synced 2025-09-06 23:03:15 +01:00
No markdown formatting, no extra text.
fix(openrouter): handle rate limit errors with OpenWebUI fallback When OpenRouter returns a 429 error due to rate limiting, the application now attempts to fall back to using an OpenWebUI model instead of returning a default prompt. This provides better resilience when external API limits are exceeded while maintaining functionality through local models. The changes include: - Adding RateLimitError import from openai - Implementing fallback logic in create_prompt_on_openrouter function - Using OpenWebUI as secondary source for prompts when rate limiting occurs - Proper error handling and logging for both primary and fallback scenarios This change improves the robustness of prompt generation by ensuring that users receive generated content even when external services are temporarily unavailable due to rate limits. The fallback mechanism prioritizes configured local models if available, with a final default prompt as backup. The implementation follows the existing pattern of using random selection from configured OpenWebUI models and includes comprehensive error handling for both primary and fallback operations. This ensures that all failure modes are gracefully handled while maintaining backward compatibility.
This commit is contained in:
parent
06d3a64bb9
commit
a63668cc93
@ -240,7 +240,6 @@ def create_prompt_with_random_model(base_prompt: str, topic: str = "random"):
|
||||
logging.error(f"Error with OpenRouter: {e}")
|
||||
return "A colorful abstract composition" # Default fallback prompt
|
||||
|
||||
return "A colorful abstract composition" # Default fallback prompt
|
||||
|
||||
user_config = load_config()
|
||||
output_folder = user_config["comfyui"]["output_dir"]
|
@ -1,8 +1,9 @@
|
||||
import random
|
||||
import logging
|
||||
from openai import OpenAI
|
||||
from openai import OpenAI, RateLimitError
|
||||
import nest_asyncio
|
||||
from libs.generic import load_recent_prompts, load_config
|
||||
from libs.openwebui import create_prompt_on_openwebui
|
||||
import re
|
||||
nest_asyncio.apply()
|
||||
|
||||
@ -90,6 +91,20 @@ def create_prompt_on_openrouter(prompt: str, topic: str = "random", model: str =
|
||||
prompt = match.group(1)
|
||||
logging.debug(prompt)
|
||||
return prompt
|
||||
except RateLimitError as e:
|
||||
logging.warning(f"OpenRouter rate limit exceeded (429): {e}. Falling back to local OpenWebUI model.")
|
||||
# Try to use OpenWebUI as fallback
|
||||
openwebui_models = [m.strip() for m in user_config["openwebui"]["models"].split(",") if m.strip()] if "openwebui" in user_config and "models" in user_config["openwebui"] else []
|
||||
if openwebui_models:
|
||||
selected_model = random.choice(openwebui_models)
|
||||
try:
|
||||
return create_prompt_on_openwebui(user_content, topic, selected_model)
|
||||
except Exception as e2:
|
||||
logging.error(f"OpenWebUI fallback also failed: {e2}")
|
||||
return "A colorful abstract composition" # Final fallback
|
||||
else:
|
||||
logging.error("No OpenWebUI models configured for fallback.")
|
||||
return "A colorful abstract composition" # Final fallback
|
||||
except Exception as e:
|
||||
logging.error(f"Error generating prompt with OpenRouter: {e}")
|
||||
return ""
|
Loading…
x
Reference in New Issue
Block a user