Compare commits

..

No commits in common. "main" and "0.3.0" have entirely different histories.
main ... 0.3.0

10 changed files with 56 additions and 130 deletions

View File

@ -1,5 +1,5 @@
[tool.bumpversion]
current_version = "0.3.4"
current_version = "0.3.0"
parse = "(?P<major>\\d+)\\.(?P<minor>\\d+)\\.(?P<patch>\\d+)"
serialize = ["{major}.{minor}.{patch}"]
replace = "{new_version}"

View File

@ -4,7 +4,7 @@ FROM python:3.11-slim
# Set the working directory in the container
WORKDIR /app
# Set version label
ARG VERSION="0.3.4"
ARG VERSION="0.3.0"
LABEL version=$VERSION
# Copy project files into the container

View File

@ -18,13 +18,6 @@ user_config = load_config()
app = Flask(__name__)
app.secret_key = os.environ.get("SECRET_KEY")
# Make version available to all templates
from libs.generic import get_current_version
@app.context_processor
def inject_version():
version = get_current_version()
return dict(version=version)
# Inject config into routes that need it
create_routes.init_app(user_config)
auth_routes.init_app(user_config)
@ -46,16 +39,7 @@ from libs.comfyui import create_image
def scheduled_task():
print(f"Executing scheduled task at {time.strftime('%Y-%m-%d %H:%M:%S')}")
# Generate a random prompt using either OpenWebUI or OpenRouter
from libs.generic import create_prompt_with_random_model
prompt = create_prompt_with_random_model("Generate a random detailed prompt for stable diffusion.")
if prompt:
# Select a random model
import random
model = "Random Image Model"
create_image(prompt, model)
else:
print("Failed to generate a prompt for the scheduled task.")
create_image(None)
if user_config["frame"]["auto_regen"] == "True":
if os.environ.get("WERKZEUG_RUN_MAIN") == "true":

View File

@ -15,7 +15,7 @@ from tenacity import (
import nest_asyncio
from libs.generic import rename_image, load_config, save_prompt
from libs.create_thumbnail import generate_thumbnail
from libs.openwebui import create_prompt_on_openwebui
from libs.ollama import create_prompt_on_openwebui
nest_asyncio.apply()
logging.basicConfig(level=logging.INFO)
@ -145,12 +145,8 @@ def create_image(prompt: str | None = None, model: str = "Random Image Model") -
"""Generate an image with a chosen workflow (Random, FLUX*, or SDXL*)."""
if prompt is None:
# Generate a random prompt using either OpenWebUI or OpenRouter
from libs.generic import create_prompt_with_random_model
prompt = create_prompt_with_random_model("Generate a random detailed prompt for stable diffusion.")
if not prompt:
logging.error("Failed to generate a prompt.")
return
logging.error("No prompt provided.")
return
if not prompt:
logging.error("No prompt generated.")

View File

@ -110,7 +110,7 @@ def get_current_version():
return version
except subprocess.CalledProcessError as e:
print("Error running bump-my-version:", e)
return "unknown"
return None
def load_models_from_config():
flux_models = load_config()["comfyui:flux"]["models"].split(",")
@ -158,10 +158,7 @@ def load_prompt_models_from_config():
def create_prompt_with_random_model(base_prompt: str, topic: str = "random"):
"""Create a prompt using a randomly selected model from OpenWebUI or OpenRouter.
If OpenWebUI fails, it will retry once. If it fails again, it will fallback to OpenRouter.
"""
"""Create a prompt using a randomly selected model from OpenWebUI or OpenRouter."""
prompt_models = load_prompt_models_from_config()
if not prompt_models:
@ -171,59 +168,16 @@ def create_prompt_with_random_model(base_prompt: str, topic: str = "random"):
# Randomly select a model
service, model = random.choice(prompt_models)
# Import here to avoid circular imports
from libs.openwebui import create_prompt_on_openwebui
from libs.openrouter import create_prompt_on_openrouter
if service == "openwebui":
try:
# First attempt with OpenWebUI
logging.info(f"Attempting to generate prompt with OpenWebUI using model: {model}")
result = create_prompt_on_openwebui(base_prompt, topic, model)
if result:
return result
# If first attempt returns None, try again
logging.warning("First OpenWebUI attempt failed. Retrying...")
result = create_prompt_on_openwebui(base_prompt, topic, model)
if result:
return result
# If second attempt fails, fallback to OpenRouter
logging.warning("Second OpenWebUI attempt failed. Falling back to OpenRouter...")
openrouter_models = [m for m in prompt_models if m[0] == "openrouter"]
if openrouter_models:
_, openrouter_model = random.choice(openrouter_models)
return create_prompt_on_openrouter(base_prompt, topic, openrouter_model)
else:
logging.error("No OpenRouter models configured for fallback.")
return "A colorful abstract composition" # Default fallback prompt
except Exception as e:
logging.error(f"Error with OpenWebUI: {e}")
# Fallback to OpenRouter on exception
logging.warning("OpenWebUI exception. Falling back to OpenRouter...")
openrouter_models = [m for m in prompt_models if m[0] == "openrouter"]
if openrouter_models:
_, openrouter_model = random.choice(openrouter_models)
try:
return create_prompt_on_openrouter(base_prompt, topic, openrouter_model)
except Exception as e2:
logging.error(f"Error with OpenRouter fallback: {e2}")
return "A colorful abstract composition" # Default fallback prompt
else:
logging.error("No OpenRouter models configured for fallback.")
return "A colorful abstract composition" # Default fallback prompt
# Import here to avoid circular imports
from libs.ollama import create_prompt_on_openwebui
return create_prompt_on_openwebui(base_prompt, topic)
elif service == "openrouter":
try:
# Use OpenRouter
return create_prompt_on_openrouter(base_prompt, topic, model)
except Exception as e:
logging.error(f"Error with OpenRouter: {e}")
return "A colorful abstract composition" # Default fallback prompt
# Import here to avoid circular imports
from libs.openrouter import create_prompt_on_openrouter
return create_prompt_on_openrouter(base_prompt, topic)
return "A colorful abstract composition" # Default fallback prompt
return None
user_config = load_config()
output_folder = user_config["comfyui"]["output_dir"]

View File

@ -1,11 +1,9 @@
import random
import logging
import litellm
import nest_asyncio
from libs.generic import load_recent_prompts, load_config
import re
from openwebui_chat_client import OpenWebUIClient
from datetime import datetime
nest_asyncio.apply()
logging.basicConfig(level=logging.INFO)
@ -35,61 +33,53 @@ def create_prompt_on_openwebui(prompt: str, topic: str = "random", model: str =
topic_instruction = f" Incorporate the theme of '{selected_topic}' into the new prompt."
user_content = (
"Can you generate me a really random image idea, Do not exceed 10 words. Use clear language, not poetic metaphors."
"Can you generate me a really random image idea, Do not exceed 10 words. Use clear language, not poetic metaphors."
+ topic_instruction
+ "Avoid prompts similar to the following:"
+ "\n".join(f"{i+1}. {p}" for i, p in enumerate(recent_prompts))
)
if model:
# Use the specified model
model = model
else:
# Select a random model
model = random.choice(user_config["openwebui"]["models"].split(",")).strip()
# Create OpenWebUI client
client = OpenWebUIClient(
base_url=user_config["openwebui"]["base_url"],
token=user_config["openwebui"]["api_key"],
default_model_id=model
model = random.choice(user_config["openwebui"]["models"].split(","))
response = litellm.completion(
api_base=user_config["openwebui"]["base_url"],
model="openai/" + model,
messages=[
{
"role": "system",
"content": (
"You are a prompt generator for Stable Diffusion. "
"Generate a detailed and imaginative prompt with a strong visual theme. "
"Focus on lighting, atmosphere, and artistic style. "
"Keep the prompt concise, no extra commentary or formatting."
),
},
{
"role": "user",
"content": user_content,
},
],
api_key=user_config["openwebui"]["api_key"],
)
# Prepare messages for the chat
messages = [
{
"role": "system",
"content": (
"You are a prompt generator for Stable Diffusion. "
"Generate a detailed and imaginative prompt with a strong visual theme. "
"Focus on lighting, atmosphere, and artistic style. "
"Keep the prompt concise, no extra commentary or formatting."
),
},
{
"role": "user",
"content": user_content,
},
]
# Send the chat request
try:
result = client.chat(
question=user_content,
chat_title=datetime.now().strftime("%Y-%m-%d %H:%M"),
folder_name="ai-frame-image-server"
)
if result:
prompt = result["response"].strip('"')
else:
# Return None if the request fails
logging.warning(f"OpenWebUI request failed with model: {model}")
return None
except Exception as e:
logging.error(f"Error in OpenWebUI request with model {model}: {e}")
return None
prompt = response["choices"][0]["message"]["content"].strip('"')
# response = litellm.completion(
# api_base=user_config["openwebui"]["base_url"],
# model="openai/brxce/stable-diffusion-prompt-generator:latest",
# messages=[
# {
# "role": "user",
# "content": prompt,
# },
# ],
# api_key=user_config["openwebui"]["api_key"],
# )
# prompt = response["choices"][0]["message"]["content"].strip('"')
match = re.search(r'"([^"]+)"', prompt)
if not match:
match = re.search(r":\s*\n*\s*(.+)", prompt)

Binary file not shown.

View File

@ -1,7 +1,7 @@
from flask import Blueprint, request, render_template, redirect, url_for, session
import threading
from libs.comfyui import create_image, select_model, get_available_models
from libs.openwebui import create_prompt_on_openwebui
from libs.ollama import create_prompt_on_openwebui
from libs.generic import load_models_from_config, load_topics_from_config, load_openrouter_models_from_config, load_openwebui_models_from_config, create_prompt_with_random_model
import os
@ -23,7 +23,7 @@ def create():
# Use the specified prompt model
service, service_model = prompt_model.split(":", 1) if ":" in prompt_model else (prompt_model, "")
if service == "openwebui":
from libs.openwebui import create_prompt_on_openwebui
from libs.ollama import create_prompt_on_openwebui
prompt = create_prompt_on_openwebui(user_config["comfyui"]["prompt"], topic, service_model)
elif service == "openrouter":
from libs.openrouter import create_prompt_on_openrouter

View File

@ -11,10 +11,12 @@ def index():
image_filename = "./image.png"
image_path = os.path.join(image_folder, image_filename)
prompt = get_details_from_png(image_path)["p"]
version = get_current_version()
return render_template(
"index.html",
image=image_filename,
prompt=prompt,
reload_interval=user_config["frame"]["reload_interval"],
version=version,
)

View File

@ -12,7 +12,7 @@
<!-- Version number at bottom right -->
<div class="version">
<a href="{{ url_for('settings_route.config_editor') }}">{% if version and version != 'unknown' %}v{{ version }}{% else %}v?.?.?{% endif %}</a>
<a href="{{ url_for('settings_route.config_editor') }}">v{{ version }}</a>
</div>
{% block scripts %}{% endblock %}