mirror of
https://github.com/karl0ss/ai_image_frame_server.git
synced 2025-08-10 11:48:28 +01:00
Compare commits
15 Commits
Author | SHA1 | Date | |
---|---|---|---|
1468ac4bbe | |||
2e13ecfa2f | |||
fa59f3cfeb | |||
fdd2893255 | |||
d40f6a95b0 | |||
f381fbc9c7 | |||
57bb0fed5b | |||
6e39c34a58 | |||
e2acd2dcd6 | |||
aa75646d5f | |||
ba2b943c0d | |||
5c45b8b832 | |||
9462888701 | |||
bd1bb98160 | |||
76e33ea523 |
@ -1,5 +1,5 @@
|
|||||||
[tool.bumpversion]
|
[tool.bumpversion]
|
||||||
current_version = "0.2.19"
|
current_version = "0.3.4"
|
||||||
parse = "(?P<major>\\d+)\\.(?P<minor>\\d+)\\.(?P<patch>\\d+)"
|
parse = "(?P<major>\\d+)\\.(?P<minor>\\d+)\\.(?P<patch>\\d+)"
|
||||||
serialize = ["{major}.{minor}.{patch}"]
|
serialize = ["{major}.{minor}.{patch}"]
|
||||||
replace = "{new_version}"
|
replace = "{new_version}"
|
||||||
|
1
.gitignore
vendored
1
.gitignore
vendored
@ -11,3 +11,4 @@ publish.sh
|
|||||||
test.py
|
test.py
|
||||||
.vscode/launch.json
|
.vscode/launch.json
|
||||||
favourites.json
|
favourites.json
|
||||||
|
.vscode/launch.json
|
||||||
|
2
.vscode/launch.json
vendored
2
.vscode/launch.json
vendored
@ -8,7 +8,7 @@
|
|||||||
"name": "Python Debugger: Current File",
|
"name": "Python Debugger: Current File",
|
||||||
"type": "debugpy",
|
"type": "debugpy",
|
||||||
"request": "launch",
|
"request": "launch",
|
||||||
"program": "${file}",
|
"program": "ai_frame_image_server.py",
|
||||||
"console": "integratedTerminal",
|
"console": "integratedTerminal",
|
||||||
"justMyCode": false,
|
"justMyCode": false,
|
||||||
"env": {"SECRET_KEY":"dkdkdk"}
|
"env": {"SECRET_KEY":"dkdkdk"}
|
||||||
|
@ -4,7 +4,7 @@ FROM python:3.11-slim
|
|||||||
# Set the working directory in the container
|
# Set the working directory in the container
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
# Set version label
|
# Set version label
|
||||||
ARG VERSION="0.2.19"
|
ARG VERSION="0.3.4"
|
||||||
LABEL version=$VERSION
|
LABEL version=$VERSION
|
||||||
|
|
||||||
# Copy project files into the container
|
# Copy project files into the container
|
||||||
|
@ -18,6 +18,13 @@ user_config = load_config()
|
|||||||
app = Flask(__name__)
|
app = Flask(__name__)
|
||||||
app.secret_key = os.environ.get("SECRET_KEY")
|
app.secret_key = os.environ.get("SECRET_KEY")
|
||||||
|
|
||||||
|
# Make version available to all templates
|
||||||
|
from libs.generic import get_current_version
|
||||||
|
@app.context_processor
|
||||||
|
def inject_version():
|
||||||
|
version = get_current_version()
|
||||||
|
return dict(version=version)
|
||||||
|
|
||||||
# Inject config into routes that need it
|
# Inject config into routes that need it
|
||||||
create_routes.init_app(user_config)
|
create_routes.init_app(user_config)
|
||||||
auth_routes.init_app(user_config)
|
auth_routes.init_app(user_config)
|
||||||
@ -39,7 +46,16 @@ from libs.comfyui import create_image
|
|||||||
|
|
||||||
def scheduled_task():
|
def scheduled_task():
|
||||||
print(f"Executing scheduled task at {time.strftime('%Y-%m-%d %H:%M:%S')}")
|
print(f"Executing scheduled task at {time.strftime('%Y-%m-%d %H:%M:%S')}")
|
||||||
create_image(None)
|
# Generate a random prompt using either OpenWebUI or OpenRouter
|
||||||
|
from libs.generic import create_prompt_with_random_model
|
||||||
|
prompt = create_prompt_with_random_model("Generate a random detailed prompt for stable diffusion.")
|
||||||
|
if prompt:
|
||||||
|
# Select a random model
|
||||||
|
import random
|
||||||
|
model = "Random Image Model"
|
||||||
|
create_image(prompt, model)
|
||||||
|
else:
|
||||||
|
print("Failed to generate a prompt for the scheduled task.")
|
||||||
|
|
||||||
if user_config["frame"]["auto_regen"] == "True":
|
if user_config["frame"]["auto_regen"] == "True":
|
||||||
if os.environ.get("WERKZEUG_RUN_MAIN") == "true":
|
if os.environ.get("WERKZEUG_RUN_MAIN") == "true":
|
||||||
|
@ -15,7 +15,7 @@ from tenacity import (
|
|||||||
import nest_asyncio
|
import nest_asyncio
|
||||||
from libs.generic import rename_image, load_config, save_prompt
|
from libs.generic import rename_image, load_config, save_prompt
|
||||||
from libs.create_thumbnail import generate_thumbnail
|
from libs.create_thumbnail import generate_thumbnail
|
||||||
from libs.ollama import create_prompt_on_openwebui
|
from libs.openwebui import create_prompt_on_openwebui
|
||||||
nest_asyncio.apply()
|
nest_asyncio.apply()
|
||||||
|
|
||||||
logging.basicConfig(level=logging.INFO)
|
logging.basicConfig(level=logging.INFO)
|
||||||
@ -123,14 +123,14 @@ def select_model(model: str) -> tuple[str, str]:
|
|||||||
use_flux = json.loads(user_config["comfyui"].get("FLUX", "false").lower())
|
use_flux = json.loads(user_config["comfyui"].get("FLUX", "false").lower())
|
||||||
only_flux = json.loads(user_config["comfyui"].get("ONLY_FLUX", "false").lower())
|
only_flux = json.loads(user_config["comfyui"].get("ONLY_FLUX", "false").lower())
|
||||||
|
|
||||||
if model == "Random":
|
if model == "Random Image Model":
|
||||||
selected_workflow = "FLUX" if (use_flux and (only_flux or random.choice([True, False]))) else "SDXL"
|
selected_workflow = "FLUX" if (use_flux and (only_flux or random.choice([True, False]))) else "SDXL"
|
||||||
elif "flux" in model.lower():
|
elif "flux" in model.lower():
|
||||||
selected_workflow = "FLUX"
|
selected_workflow = "FLUX"
|
||||||
else:
|
else:
|
||||||
selected_workflow = "SDXL"
|
selected_workflow = "SDXL"
|
||||||
|
|
||||||
if model == "Random":
|
if model == "Random Image Model":
|
||||||
if selected_workflow == "FLUX":
|
if selected_workflow == "FLUX":
|
||||||
valid_models = user_config["comfyui:flux"]["models"].split(",")
|
valid_models = user_config["comfyui:flux"]["models"].split(",")
|
||||||
else: # SDXL
|
else: # SDXL
|
||||||
@ -141,11 +141,16 @@ def select_model(model: str) -> tuple[str, str]:
|
|||||||
return selected_workflow, model
|
return selected_workflow, model
|
||||||
|
|
||||||
|
|
||||||
def create_image(prompt: str | None = None, model: str = "Random") -> None:
|
def create_image(prompt: str | None = None, model: str = "Random Image Model") -> None:
|
||||||
"""Generate an image with a chosen workflow (Random, FLUX*, or SDXL*)."""
|
"""Generate an image with a chosen workflow (Random, FLUX*, or SDXL*)."""
|
||||||
|
|
||||||
if prompt is None:
|
if prompt is None:
|
||||||
prompt = create_prompt_on_openwebui(user_config["comfyui"]["prompt"])
|
# Generate a random prompt using either OpenWebUI or OpenRouter
|
||||||
|
from libs.generic import create_prompt_with_random_model
|
||||||
|
prompt = create_prompt_with_random_model("Generate a random detailed prompt for stable diffusion.")
|
||||||
|
if not prompt:
|
||||||
|
logging.error("Failed to generate a prompt.")
|
||||||
|
return
|
||||||
|
|
||||||
if not prompt:
|
if not prompt:
|
||||||
logging.error("No prompt generated.")
|
logging.error("No prompt generated.")
|
||||||
@ -153,7 +158,7 @@ def create_image(prompt: str | None = None, model: str = "Random") -> None:
|
|||||||
|
|
||||||
save_prompt(prompt)
|
save_prompt(prompt)
|
||||||
selected_workflow, model = select_model(model)
|
selected_workflow, model = select_model(model)
|
||||||
|
|
||||||
if selected_workflow == "FLUX":
|
if selected_workflow == "FLUX":
|
||||||
generate_image(
|
generate_image(
|
||||||
file_name="image",
|
file_name="image",
|
||||||
|
103
libs/generic.py
103
libs/generic.py
@ -4,6 +4,7 @@ import logging
|
|||||||
import sys
|
import sys
|
||||||
import time
|
import time
|
||||||
import os
|
import os
|
||||||
|
import random
|
||||||
from PIL import Image
|
from PIL import Image
|
||||||
import nest_asyncio
|
import nest_asyncio
|
||||||
import json
|
import json
|
||||||
@ -109,7 +110,7 @@ def get_current_version():
|
|||||||
return version
|
return version
|
||||||
except subprocess.CalledProcessError as e:
|
except subprocess.CalledProcessError as e:
|
||||||
print("Error running bump-my-version:", e)
|
print("Error running bump-my-version:", e)
|
||||||
return None
|
return "unknown"
|
||||||
|
|
||||||
def load_models_from_config():
|
def load_models_from_config():
|
||||||
flux_models = load_config()["comfyui:flux"]["models"].split(",")
|
flux_models = load_config()["comfyui:flux"]["models"].split(",")
|
||||||
@ -124,5 +125,105 @@ def load_topics_from_config():
|
|||||||
sorted_topics = sorted(topics, key=str.lower)
|
sorted_topics = sorted(topics, key=str.lower)
|
||||||
return sorted_topics
|
return sorted_topics
|
||||||
|
|
||||||
|
def load_openrouter_models_from_config():
|
||||||
|
config = load_config()
|
||||||
|
if config["openrouter"].get("enabled", "False").lower() == "true":
|
||||||
|
models = config["openrouter"]["models"].split(",")
|
||||||
|
return sorted([model.strip() for model in models if model.strip()], key=str.lower)
|
||||||
|
return []
|
||||||
|
|
||||||
|
def load_openwebui_models_from_config():
|
||||||
|
config = load_config()
|
||||||
|
if "openwebui" in config and "models" in config["openwebui"]:
|
||||||
|
models = config["openwebui"]["models"].split(",")
|
||||||
|
return sorted([model.strip() for model in models if model.strip()], key=str.lower)
|
||||||
|
return []
|
||||||
|
|
||||||
|
def load_prompt_models_from_config():
|
||||||
|
"""Load and return a list of available prompt generation models (both OpenWebUI and OpenRouter)."""
|
||||||
|
config = load_config()
|
||||||
|
prompt_models = []
|
||||||
|
|
||||||
|
# Add OpenWebUI models if configured
|
||||||
|
if "openwebui" in config and "models" in config["openwebui"]:
|
||||||
|
openwebui_models = config["openwebui"]["models"].split(",")
|
||||||
|
prompt_models.extend([("openwebui", model.strip()) for model in openwebui_models if model.strip()])
|
||||||
|
|
||||||
|
# Add OpenRouter models if enabled and configured
|
||||||
|
if config["openrouter"].get("enabled", "False").lower() == "true" and "models" in config["openrouter"]:
|
||||||
|
openrouter_models = config["openrouter"]["models"].split(",")
|
||||||
|
prompt_models.extend([("openrouter", model.strip()) for model in openrouter_models if model.strip()])
|
||||||
|
|
||||||
|
return prompt_models
|
||||||
|
|
||||||
|
|
||||||
|
def create_prompt_with_random_model(base_prompt: str, topic: str = "random"):
|
||||||
|
"""Create a prompt using a randomly selected model from OpenWebUI or OpenRouter.
|
||||||
|
|
||||||
|
If OpenWebUI fails, it will retry once. If it fails again, it will fallback to OpenRouter.
|
||||||
|
"""
|
||||||
|
prompt_models = load_prompt_models_from_config()
|
||||||
|
|
||||||
|
if not prompt_models:
|
||||||
|
logging.warning("No prompt generation models configured.")
|
||||||
|
return None
|
||||||
|
|
||||||
|
# Randomly select a model
|
||||||
|
service, model = random.choice(prompt_models)
|
||||||
|
|
||||||
|
# Import here to avoid circular imports
|
||||||
|
from libs.openwebui import create_prompt_on_openwebui
|
||||||
|
from libs.openrouter import create_prompt_on_openrouter
|
||||||
|
|
||||||
|
if service == "openwebui":
|
||||||
|
try:
|
||||||
|
# First attempt with OpenWebUI
|
||||||
|
logging.info(f"Attempting to generate prompt with OpenWebUI using model: {model}")
|
||||||
|
result = create_prompt_on_openwebui(base_prompt, topic, model)
|
||||||
|
if result:
|
||||||
|
return result
|
||||||
|
|
||||||
|
# If first attempt returns None, try again
|
||||||
|
logging.warning("First OpenWebUI attempt failed. Retrying...")
|
||||||
|
result = create_prompt_on_openwebui(base_prompt, topic, model)
|
||||||
|
if result:
|
||||||
|
return result
|
||||||
|
|
||||||
|
# If second attempt fails, fallback to OpenRouter
|
||||||
|
logging.warning("Second OpenWebUI attempt failed. Falling back to OpenRouter...")
|
||||||
|
openrouter_models = [m for m in prompt_models if m[0] == "openrouter"]
|
||||||
|
if openrouter_models:
|
||||||
|
_, openrouter_model = random.choice(openrouter_models)
|
||||||
|
return create_prompt_on_openrouter(base_prompt, topic, openrouter_model)
|
||||||
|
else:
|
||||||
|
logging.error("No OpenRouter models configured for fallback.")
|
||||||
|
return "A colorful abstract composition" # Default fallback prompt
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logging.error(f"Error with OpenWebUI: {e}")
|
||||||
|
# Fallback to OpenRouter on exception
|
||||||
|
logging.warning("OpenWebUI exception. Falling back to OpenRouter...")
|
||||||
|
openrouter_models = [m for m in prompt_models if m[0] == "openrouter"]
|
||||||
|
if openrouter_models:
|
||||||
|
_, openrouter_model = random.choice(openrouter_models)
|
||||||
|
try:
|
||||||
|
return create_prompt_on_openrouter(base_prompt, topic, openrouter_model)
|
||||||
|
except Exception as e2:
|
||||||
|
logging.error(f"Error with OpenRouter fallback: {e2}")
|
||||||
|
return "A colorful abstract composition" # Default fallback prompt
|
||||||
|
else:
|
||||||
|
logging.error("No OpenRouter models configured for fallback.")
|
||||||
|
return "A colorful abstract composition" # Default fallback prompt
|
||||||
|
|
||||||
|
elif service == "openrouter":
|
||||||
|
try:
|
||||||
|
# Use OpenRouter
|
||||||
|
return create_prompt_on_openrouter(base_prompt, topic, model)
|
||||||
|
except Exception as e:
|
||||||
|
logging.error(f"Error with OpenRouter: {e}")
|
||||||
|
return "A colorful abstract composition" # Default fallback prompt
|
||||||
|
|
||||||
|
return "A colorful abstract composition" # Default fallback prompt
|
||||||
|
|
||||||
user_config = load_config()
|
user_config = load_config()
|
||||||
output_folder = user_config["comfyui"]["output_dir"]
|
output_folder = user_config["comfyui"]["output_dir"]
|
@ -1,84 +0,0 @@
|
|||||||
import random
|
|
||||||
import logging
|
|
||||||
import litellm
|
|
||||||
import nest_asyncio
|
|
||||||
from libs.generic import load_recent_prompts, load_config
|
|
||||||
import re
|
|
||||||
nest_asyncio.apply()
|
|
||||||
|
|
||||||
logging.basicConfig(level=logging.INFO)
|
|
||||||
|
|
||||||
LOG_FILE = "./prompts_log.jsonl"
|
|
||||||
|
|
||||||
user_config = load_config()
|
|
||||||
output_folder = user_config["comfyui"]["output_dir"]
|
|
||||||
|
|
||||||
def create_prompt_on_openwebui(prompt: str, topic: str = "random") -> str:
|
|
||||||
"""Sends prompt to OpenWebui and returns the generated response."""
|
|
||||||
topic_instruction = ""
|
|
||||||
selected_topic = ""
|
|
||||||
# Unique list of recent prompts
|
|
||||||
recent_prompts = list(set(load_recent_prompts()))
|
|
||||||
if topic == "random":
|
|
||||||
topics = [t.strip() for t in user_config["comfyui"]["topics"].split(",") if t.strip()]
|
|
||||||
selected_topic = random.choice(topics)
|
|
||||||
elif topic != "":
|
|
||||||
selected_topic = topic
|
|
||||||
else:
|
|
||||||
# Decide on whether to include a topic (e.g., 30% chance to include)
|
|
||||||
topics = [t.strip() for t in user_config["comfyui"]["topics"].split(",") if t.strip()]
|
|
||||||
if random.random() < 0.3 and topics:
|
|
||||||
selected_topic = random.choice(topics)
|
|
||||||
if selected_topic != "":
|
|
||||||
topic_instruction = f" Incorporate the theme of '{selected_topic}' into the new prompt."
|
|
||||||
|
|
||||||
user_content = (
|
|
||||||
"Can you generate me a really random image idea, Do not exceed 10 words. Use clear language, not poetic metaphors.”"
|
|
||||||
+ topic_instruction
|
|
||||||
+ "Avoid prompts similar to the following:"
|
|
||||||
+ "\n".join(f"{i+1}. {p}" for i, p in enumerate(recent_prompts))
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
model = random.choice(user_config["openwebui"]["models"].split(","))
|
|
||||||
response = litellm.completion(
|
|
||||||
api_base=user_config["openwebui"]["base_url"],
|
|
||||||
model="openai/" + model,
|
|
||||||
messages=[
|
|
||||||
{
|
|
||||||
"role": "system",
|
|
||||||
"content": (
|
|
||||||
"You are a prompt generator for Stable Diffusion. "
|
|
||||||
"Generate a detailed and imaginative prompt with a strong visual theme. "
|
|
||||||
"Focus on lighting, atmosphere, and artistic style. "
|
|
||||||
"Keep the prompt concise, no extra commentary or formatting."
|
|
||||||
),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"role": "user",
|
|
||||||
"content": user_content,
|
|
||||||
},
|
|
||||||
],
|
|
||||||
api_key=user_config["openwebui"]["api_key"],
|
|
||||||
)
|
|
||||||
|
|
||||||
prompt = response["choices"][0]["message"]["content"].strip('"')
|
|
||||||
# response = litellm.completion(
|
|
||||||
# api_base=user_config["openwebui"]["base_url"],
|
|
||||||
# model="openai/brxce/stable-diffusion-prompt-generator:latest",
|
|
||||||
# messages=[
|
|
||||||
# {
|
|
||||||
# "role": "user",
|
|
||||||
# "content": prompt,
|
|
||||||
# },
|
|
||||||
# ],
|
|
||||||
# api_key=user_config["openwebui"]["api_key"],
|
|
||||||
# )
|
|
||||||
# prompt = response["choices"][0]["message"]["content"].strip('"')
|
|
||||||
match = re.search(r'"([^"]+)"', prompt)
|
|
||||||
if not match:
|
|
||||||
match = re.search(r":\s*\n*\s*(.+)", prompt)
|
|
||||||
if match:
|
|
||||||
prompt = match.group(1)
|
|
||||||
logging.debug(prompt)
|
|
||||||
return prompt
|
|
95
libs/openrouter.py
Normal file
95
libs/openrouter.py
Normal file
@ -0,0 +1,95 @@
|
|||||||
|
import random
|
||||||
|
import logging
|
||||||
|
from openai import OpenAI
|
||||||
|
import nest_asyncio
|
||||||
|
from libs.generic import load_recent_prompts, load_config
|
||||||
|
import re
|
||||||
|
nest_asyncio.apply()
|
||||||
|
|
||||||
|
logging.basicConfig(level=logging.INFO)
|
||||||
|
|
||||||
|
LOG_FILE = "./prompts_log.jsonl"
|
||||||
|
|
||||||
|
user_config = load_config()
|
||||||
|
output_folder = user_config["comfyui"]["output_dir"]
|
||||||
|
|
||||||
|
def create_prompt_on_openrouter(prompt: str, topic: str = "random", model: str = None) -> str:
|
||||||
|
"""Sends prompt to OpenRouter and returns the generated response."""
|
||||||
|
# Check if OpenRouter is enabled
|
||||||
|
if user_config["openrouter"].get("enabled", "False").lower() != "true":
|
||||||
|
logging.warning("OpenRouter is not enabled in the configuration.")
|
||||||
|
return ""
|
||||||
|
|
||||||
|
topic_instruction = ""
|
||||||
|
selected_topic = ""
|
||||||
|
# Unique list of recent prompts
|
||||||
|
recent_prompts = list(set(load_recent_prompts()))
|
||||||
|
if topic == "random":
|
||||||
|
topics = [t.strip() for t in user_config["comfyui"]["topics"].split(",") if t.strip()]
|
||||||
|
selected_topic = random.choice(topics) if topics else ""
|
||||||
|
elif topic != "":
|
||||||
|
selected_topic = topic
|
||||||
|
else:
|
||||||
|
# Decide on whether to include a topic (e.g., 30% chance to include)
|
||||||
|
topics = [t.strip() for t in user_config["comfyui"]["topics"].split(",") if t.strip()]
|
||||||
|
if random.random() < 0.3 and topics:
|
||||||
|
selected_topic = random.choice(topics)
|
||||||
|
if selected_topic != "":
|
||||||
|
topic_instruction = f" Incorporate the theme of '{selected_topic}' into the new prompt."
|
||||||
|
|
||||||
|
user_content = (
|
||||||
|
"Can you generate me a really random image idea, Do not exceed 10 words. Use clear language, not poetic metaphors."
|
||||||
|
+ topic_instruction
|
||||||
|
+ "Avoid prompts similar to the following:"
|
||||||
|
+ "\n".join(f"{i+1}. {p}" for i, p in enumerate(recent_prompts))
|
||||||
|
)
|
||||||
|
|
||||||
|
# Use the specified model or select a random model from the configured OpenRouter models
|
||||||
|
if model:
|
||||||
|
# Use the specified model
|
||||||
|
model = model
|
||||||
|
else:
|
||||||
|
# Select a random model from the configured OpenRouter models
|
||||||
|
models = [m.strip() for m in user_config["openrouter"]["models"].split(",") if m.strip()]
|
||||||
|
if not models:
|
||||||
|
logging.error("No OpenRouter models configured.")
|
||||||
|
return ""
|
||||||
|
|
||||||
|
model = random.choice(models)
|
||||||
|
|
||||||
|
try:
|
||||||
|
client = OpenAI(
|
||||||
|
base_url="https://openrouter.ai/api/v1",
|
||||||
|
api_key=user_config["openrouter"]["api_key"],
|
||||||
|
)
|
||||||
|
|
||||||
|
completion = client.chat.completions.create(
|
||||||
|
model=model,
|
||||||
|
messages=[
|
||||||
|
{
|
||||||
|
"role": "system",
|
||||||
|
"content": (
|
||||||
|
"You are a prompt generator for Stable Diffusion. "
|
||||||
|
"Generate a detailed and imaginative prompt with a strong visual theme. "
|
||||||
|
"Focus on lighting, atmosphere, and artistic style. "
|
||||||
|
"Keep the prompt concise, no extra commentary or formatting."
|
||||||
|
),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": user_content,
|
||||||
|
},
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
prompt = completion.choices[0].message.content.strip('"')
|
||||||
|
match = re.search(r'"([^"]+)"', prompt)
|
||||||
|
if not match:
|
||||||
|
match = re.search(r":\s*\n*\s*(.+)", prompt)
|
||||||
|
if match:
|
||||||
|
prompt = match.group(1)
|
||||||
|
logging.debug(prompt)
|
||||||
|
return prompt
|
||||||
|
except Exception as e:
|
||||||
|
logging.error(f"Error generating prompt with OpenRouter: {e}")
|
||||||
|
return ""
|
99
libs/openwebui.py
Normal file
99
libs/openwebui.py
Normal file
@ -0,0 +1,99 @@
|
|||||||
|
import random
|
||||||
|
import logging
|
||||||
|
import nest_asyncio
|
||||||
|
from libs.generic import load_recent_prompts, load_config
|
||||||
|
import re
|
||||||
|
from openwebui_chat_client import OpenWebUIClient
|
||||||
|
from datetime import datetime
|
||||||
|
|
||||||
|
nest_asyncio.apply()
|
||||||
|
|
||||||
|
logging.basicConfig(level=logging.INFO)
|
||||||
|
|
||||||
|
LOG_FILE = "./prompts_log.jsonl"
|
||||||
|
|
||||||
|
user_config = load_config()
|
||||||
|
output_folder = user_config["comfyui"]["output_dir"]
|
||||||
|
|
||||||
|
def create_prompt_on_openwebui(prompt: str, topic: str = "random", model: str = None) -> str:
|
||||||
|
"""Sends prompt to OpenWebui and returns the generated response."""
|
||||||
|
topic_instruction = ""
|
||||||
|
selected_topic = ""
|
||||||
|
# Unique list of recent prompts
|
||||||
|
recent_prompts = list(set(load_recent_prompts()))
|
||||||
|
if topic == "random":
|
||||||
|
topics = [t.strip() for t in user_config["comfyui"]["topics"].split(",") if t.strip()]
|
||||||
|
selected_topic = random.choice(topics)
|
||||||
|
elif topic != "":
|
||||||
|
selected_topic = topic
|
||||||
|
else:
|
||||||
|
# Decide on whether to include a topic (e.g., 30% chance to include)
|
||||||
|
topics = [t.strip() for t in user_config["comfyui"]["topics"].split(",") if t.strip()]
|
||||||
|
if random.random() < 0.3 and topics:
|
||||||
|
selected_topic = random.choice(topics)
|
||||||
|
if selected_topic != "":
|
||||||
|
topic_instruction = f" Incorporate the theme of '{selected_topic}' into the new prompt."
|
||||||
|
|
||||||
|
user_content = (
|
||||||
|
"Can you generate me a really random image idea, Do not exceed 10 words. Use clear language, not poetic metaphors."
|
||||||
|
+ topic_instruction
|
||||||
|
+ "Avoid prompts similar to the following:"
|
||||||
|
+ "\n".join(f"{i+1}. {p}" for i, p in enumerate(recent_prompts))
|
||||||
|
)
|
||||||
|
|
||||||
|
if model:
|
||||||
|
# Use the specified model
|
||||||
|
model = model
|
||||||
|
else:
|
||||||
|
# Select a random model
|
||||||
|
model = random.choice(user_config["openwebui"]["models"].split(",")).strip()
|
||||||
|
|
||||||
|
# Create OpenWebUI client
|
||||||
|
client = OpenWebUIClient(
|
||||||
|
base_url=user_config["openwebui"]["base_url"],
|
||||||
|
token=user_config["openwebui"]["api_key"],
|
||||||
|
default_model_id=model
|
||||||
|
)
|
||||||
|
|
||||||
|
# Prepare messages for the chat
|
||||||
|
messages = [
|
||||||
|
{
|
||||||
|
"role": "system",
|
||||||
|
"content": (
|
||||||
|
"You are a prompt generator for Stable Diffusion. "
|
||||||
|
"Generate a detailed and imaginative prompt with a strong visual theme. "
|
||||||
|
"Focus on lighting, atmosphere, and artistic style. "
|
||||||
|
"Keep the prompt concise, no extra commentary or formatting."
|
||||||
|
),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": user_content,
|
||||||
|
},
|
||||||
|
]
|
||||||
|
|
||||||
|
# Send the chat request
|
||||||
|
try:
|
||||||
|
result = client.chat(
|
||||||
|
question=user_content,
|
||||||
|
chat_title=datetime.now().strftime("%Y-%m-%d %H:%M"),
|
||||||
|
folder_name="ai-frame-image-server"
|
||||||
|
)
|
||||||
|
|
||||||
|
if result:
|
||||||
|
prompt = result["response"].strip('"')
|
||||||
|
else:
|
||||||
|
# Return None if the request fails
|
||||||
|
logging.warning(f"OpenWebUI request failed with model: {model}")
|
||||||
|
return None
|
||||||
|
except Exception as e:
|
||||||
|
logging.error(f"Error in OpenWebUI request with model {model}: {e}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
match = re.search(r'"([^"]+)"', prompt)
|
||||||
|
if not match:
|
||||||
|
match = re.search(r":\s*\n*\s*(.+)", prompt)
|
||||||
|
if match:
|
||||||
|
prompt = match.group(1)
|
||||||
|
logging.debug(prompt)
|
||||||
|
return prompt
|
BIN
requirements.txt
BIN
requirements.txt
Binary file not shown.
@ -1,8 +1,8 @@
|
|||||||
from flask import Blueprint, request, render_template, redirect, url_for, session
|
from flask import Blueprint, request, render_template, redirect, url_for, session
|
||||||
import threading
|
import threading
|
||||||
from libs.comfyui import create_image, select_model, get_available_models
|
from libs.comfyui import create_image, select_model, get_available_models
|
||||||
from libs.ollama import create_prompt_on_openwebui
|
from libs.openwebui import create_prompt_on_openwebui
|
||||||
from libs.generic import load_models_from_config, load_topics_from_config
|
from libs.generic import load_models_from_config, load_topics_from_config, load_openrouter_models_from_config, load_openwebui_models_from_config, create_prompt_with_random_model
|
||||||
import os
|
import os
|
||||||
|
|
||||||
bp = Blueprint("create_routes", __name__)
|
bp = Blueprint("create_routes", __name__)
|
||||||
@ -12,28 +12,67 @@ user_config = None # will be set in init_app
|
|||||||
def create():
|
def create():
|
||||||
if request.method == "POST":
|
if request.method == "POST":
|
||||||
prompt = request.form.get("prompt")
|
prompt = request.form.get("prompt")
|
||||||
selected_workflow, model = select_model(request.form.get("model") or "Random")
|
image_model = request.form.get("model") or "Random Image Model"
|
||||||
|
selected_workflow, model = select_model(image_model)
|
||||||
topic = request.form.get("topic")
|
topic = request.form.get("topic")
|
||||||
|
|
||||||
if not prompt:
|
if not prompt:
|
||||||
prompt = create_prompt_on_openwebui(user_config["comfyui"]["prompt"], topic)
|
# Get the prompt model from the form data
|
||||||
|
prompt_model = request.form.get("prompt_model") or ""
|
||||||
|
if prompt_model and prompt_model != "Random Prompt Model":
|
||||||
|
# Use the specified prompt model
|
||||||
|
service, service_model = prompt_model.split(":", 1) if ":" in prompt_model else (prompt_model, "")
|
||||||
|
if service == "openwebui":
|
||||||
|
from libs.openwebui import create_prompt_on_openwebui
|
||||||
|
prompt = create_prompt_on_openwebui(user_config["comfyui"]["prompt"], topic, service_model)
|
||||||
|
elif service == "openrouter":
|
||||||
|
from libs.openrouter import create_prompt_on_openrouter
|
||||||
|
prompt = create_prompt_on_openrouter(user_config["comfyui"]["prompt"], topic, service_model)
|
||||||
|
else:
|
||||||
|
# Use a random prompt model
|
||||||
|
prompt = create_prompt_with_random_model(user_config["comfyui"]["prompt"], topic)
|
||||||
|
|
||||||
threading.Thread(target=lambda: create_image(prompt, model)).start()
|
threading.Thread(target=lambda: create_image(prompt, model)).start()
|
||||||
return redirect(url_for("create_routes.image_queued", prompt=prompt, model=model.split(".")[0]))
|
return redirect(url_for("create_routes.image_queued", prompt=prompt, model=model.split(".")[0]))
|
||||||
|
|
||||||
return render_template("create_image.html", models=load_models_from_config()[0]+load_models_from_config()[1], topics=load_topics_from_config())
|
# Load all models (SDXL and FLUX only)
|
||||||
|
sdxl_models, flux_models = load_models_from_config()
|
||||||
|
openwebui_models = load_openwebui_models_from_config()
|
||||||
|
openrouter_models = load_openrouter_models_from_config()
|
||||||
|
|
||||||
|
return render_template("create_image.html",
|
||||||
|
sdxl_models=sdxl_models,
|
||||||
|
flux_models=flux_models,
|
||||||
|
openwebui_models=openwebui_models,
|
||||||
|
openrouter_models=openrouter_models,
|
||||||
|
topics=load_topics_from_config())
|
||||||
|
|
||||||
@bp.route("/image_queued")
|
@bp.route("/image_queued")
|
||||||
def image_queued():
|
def image_queued():
|
||||||
prompt = request.args.get("prompt", "No prompt provided.")
|
prompt = request.args.get("prompt", "No prompt provided.")
|
||||||
model = request.args.get("model", "No model selected.").split(".")[0]
|
model = request.args.get("model", "No model selected.")
|
||||||
|
if model == "Random Image Model":
|
||||||
|
model = "Random"
|
||||||
|
else:
|
||||||
|
model = model.split(".")[0]
|
||||||
return render_template("image_queued.html", prompt=prompt, model=model)
|
return render_template("image_queued.html", prompt=prompt, model=model)
|
||||||
|
|
||||||
@bp.route("/create_image", methods=["GET"])
|
@bp.route("/create_image", methods=["GET"])
|
||||||
def create_image_page():
|
def create_image_page():
|
||||||
if user_config["frame"]["create_requires_auth"] == "True" and not session.get("authenticated"):
|
if user_config["frame"]["create_requires_auth"] == "True" and not session.get("authenticated"):
|
||||||
return redirect(url_for("auth_routes.login", next=request.path))
|
return redirect(url_for("auth_routes.login", next=request.path))
|
||||||
return render_template("create_image.html", models=load_models_from_config()[0]+load_models_from_config()[1], topics=load_topics_from_config())
|
|
||||||
|
# Load all models (SDXL and FLUX only)
|
||||||
|
sdxl_models, flux_models = load_models_from_config()
|
||||||
|
openwebui_models = load_openwebui_models_from_config()
|
||||||
|
openrouter_models = load_openrouter_models_from_config()
|
||||||
|
|
||||||
|
return render_template("create_image.html",
|
||||||
|
sdxl_models=sdxl_models,
|
||||||
|
flux_models=flux_models,
|
||||||
|
openwebui_models=openwebui_models,
|
||||||
|
openrouter_models=openrouter_models,
|
||||||
|
topics=load_topics_from_config())
|
||||||
|
|
||||||
|
|
||||||
def init_app(config):
|
def init_app(config):
|
||||||
|
@ -11,12 +11,10 @@ def index():
|
|||||||
image_filename = "./image.png"
|
image_filename = "./image.png"
|
||||||
image_path = os.path.join(image_folder, image_filename)
|
image_path = os.path.join(image_folder, image_filename)
|
||||||
prompt = get_details_from_png(image_path)["p"]
|
prompt = get_details_from_png(image_path)["p"]
|
||||||
version = get_current_version()
|
|
||||||
|
|
||||||
return render_template(
|
return render_template(
|
||||||
"index.html",
|
"index.html",
|
||||||
image=image_filename,
|
image=image_filename,
|
||||||
prompt=prompt,
|
prompt=prompt,
|
||||||
reload_interval=user_config["frame"]["reload_interval"],
|
reload_interval=user_config["frame"]["reload_interval"],
|
||||||
version=version,
|
|
||||||
)
|
)
|
||||||
|
@ -12,7 +12,7 @@
|
|||||||
|
|
||||||
<!-- Version number at bottom right -->
|
<!-- Version number at bottom right -->
|
||||||
<div class="version">
|
<div class="version">
|
||||||
<a href="{{ url_for('settings_route.config_editor') }}">v{{ version }}</a>
|
<a href="{{ url_for('settings_route.config_editor') }}">{% if version and version != 'unknown' %}v{{ version }}{% else %}v?.?.?{% endif %}</a>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
{% block scripts %}{% endblock %}
|
{% block scripts %}{% endblock %}
|
||||||
|
@ -33,6 +33,28 @@
|
|||||||
align-items: center;
|
align-items: center;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
.model-selection {
|
||||||
|
display: flex;
|
||||||
|
flex-wrap: wrap;
|
||||||
|
gap: 20px;
|
||||||
|
justify-content: center;
|
||||||
|
margin: 20px 0;
|
||||||
|
width: 100%;
|
||||||
|
max-width: 800px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.model-group {
|
||||||
|
display: flex;
|
||||||
|
flex-direction: column;
|
||||||
|
align-items: flex-start;
|
||||||
|
gap: 5px;
|
||||||
|
}
|
||||||
|
|
||||||
|
.model-group label {
|
||||||
|
font-weight: bold;
|
||||||
|
color: #ddd;
|
||||||
|
}
|
||||||
|
|
||||||
button,
|
button,
|
||||||
select {
|
select {
|
||||||
background: #333;
|
background: #333;
|
||||||
@ -43,6 +65,7 @@
|
|||||||
font-size: 16px;
|
font-size: 16px;
|
||||||
cursor: pointer;
|
cursor: pointer;
|
||||||
transition: background 0.3s;
|
transition: background 0.3s;
|
||||||
|
min-width: 150px;
|
||||||
}
|
}
|
||||||
|
|
||||||
button:hover,
|
button:hover,
|
||||||
@ -90,6 +113,15 @@
|
|||||||
width: 100%;
|
width: 100%;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
.model-selection {
|
||||||
|
flex-direction: column;
|
||||||
|
align-items: stretch;
|
||||||
|
}
|
||||||
|
|
||||||
|
.model-group {
|
||||||
|
align-items: stretch;
|
||||||
|
}
|
||||||
|
|
||||||
button,
|
button,
|
||||||
select {
|
select {
|
||||||
width: 100%;
|
width: 100%;
|
||||||
@ -109,34 +141,65 @@
|
|||||||
|
|
||||||
<div class="button-group">
|
<div class="button-group">
|
||||||
<button onclick="showSpinner(); location.href='/'">Back</button>
|
<button onclick="showSpinner(); location.href='/'">Back</button>
|
||||||
|
|
||||||
<button onclick="sendPrompt()">Send Prompt</button>
|
<button onclick="sendPrompt()">Send Prompt</button>
|
||||||
|
|
||||||
<button onclick="randomPrompt()">Random Prompt</button>
|
<button onclick="randomPrompt()">Random Prompt</button>
|
||||||
|
</div>
|
||||||
|
|
||||||
<select id="model-select">
|
<div class="model-selection">
|
||||||
<option value="" selected>Random</option>
|
<div class="model-group">
|
||||||
<optgroup label="FLUX">
|
<label for="model-select">Image Model:</label>
|
||||||
{% for m in models if 'flux' in m|lower %}
|
<select id="model-select">
|
||||||
<option value="{{ m }}">{{ m.rsplit('.', 1)[0] }}</option>
|
<option value="" selected>Random Image Model</option>
|
||||||
{% endfor %}
|
{% if flux_models %}
|
||||||
</optgroup>
|
<optgroup label="FLUX">
|
||||||
<optgroup label="SDXL">
|
{% for m in flux_models %}
|
||||||
{% for m in models if 'flux' not in m|lower %}
|
<option value="{{ m }}">{{ m.rsplit('.', 1)[0] if '.' in m else m }}</option>
|
||||||
<option value="{{ m }}">{{ m.rsplit('.', 1)[0] }}</option>
|
{% endfor %}
|
||||||
{% endfor %}
|
</optgroup>
|
||||||
</optgroup>
|
{% endif %}
|
||||||
</select>
|
{% if sdxl_models %}
|
||||||
|
<optgroup label="SDXL">
|
||||||
<select id="topic-select">
|
{% for m in sdxl_models %}
|
||||||
<option value="">No Topic</option>
|
<option value="{{ m }}">{{ m.rsplit('.', 1)[0] if '.' in m else m }}</option>
|
||||||
<option value="random">Random</option>
|
{% endfor %}
|
||||||
<optgroup label="Topics">
|
</optgroup>
|
||||||
{% for t in topics %}
|
{% endif %}
|
||||||
<option value="{{ t }}">{{ t }}</option>
|
</select>
|
||||||
{% endfor %}
|
</div>
|
||||||
</optgroup>
|
|
||||||
</select>
|
<div class="model-group">
|
||||||
|
<label for="prompt-model-select">Prompt Model:</label>
|
||||||
|
<select id="prompt-model-select">
|
||||||
|
<option value="" selected>Random Prompt Model</option>
|
||||||
|
{% if openwebui_models %}
|
||||||
|
<optgroup label="OpenWebUI">
|
||||||
|
{% for m in openwebui_models %}
|
||||||
|
<option value="openwebui:{{ m }}">{{ m }}</option>
|
||||||
|
{% endfor %}
|
||||||
|
</optgroup>
|
||||||
|
{% endif %}
|
||||||
|
{% if openrouter_models %}
|
||||||
|
<optgroup label="OpenRouter">
|
||||||
|
{% for m in openrouter_models %}
|
||||||
|
<option value="openrouter:{{ m }}">{{ m }}</option>
|
||||||
|
{% endfor %}
|
||||||
|
</optgroup>
|
||||||
|
{% endif %}
|
||||||
|
</select>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="model-group">
|
||||||
|
<label for="topic-select">Topic:</label>
|
||||||
|
<select id="topic-select">
|
||||||
|
<option value="">No Topic</option>
|
||||||
|
<option value="random">Random</option>
|
||||||
|
<optgroup label="Topics">
|
||||||
|
{% for t in topics %}
|
||||||
|
<option value="{{ t }}">{{ t }}</option>
|
||||||
|
{% endfor %}
|
||||||
|
</optgroup>
|
||||||
|
</select>
|
||||||
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
<div id="spinner-overlay">
|
<div id="spinner-overlay">
|
||||||
@ -154,10 +217,12 @@
|
|||||||
showSpinner();
|
showSpinner();
|
||||||
const prompt = document.getElementById('prompt-box').value;
|
const prompt = document.getElementById('prompt-box').value;
|
||||||
const model = document.getElementById('model-select').value;
|
const model = document.getElementById('model-select').value;
|
||||||
|
const promptModel = document.getElementById('prompt-model-select').value;
|
||||||
|
|
||||||
const formData = new URLSearchParams();
|
const formData = new URLSearchParams();
|
||||||
formData.append('prompt', prompt);
|
formData.append('prompt', prompt);
|
||||||
formData.append('model', model);
|
formData.append('model', model);
|
||||||
|
formData.append('prompt_model', promptModel);
|
||||||
|
|
||||||
fetch('/create', {
|
fetch('/create', {
|
||||||
method: 'POST',
|
method: 'POST',
|
||||||
@ -176,10 +241,12 @@
|
|||||||
function randomPrompt() {
|
function randomPrompt() {
|
||||||
showSpinner();
|
showSpinner();
|
||||||
const model = document.getElementById('model-select').value;
|
const model = document.getElementById('model-select').value;
|
||||||
|
const promptModel = document.getElementById('prompt-model-select').value;
|
||||||
const topic = document.getElementById('topic-select').value;
|
const topic = document.getElementById('topic-select').value;
|
||||||
|
|
||||||
const formData = new URLSearchParams();
|
const formData = new URLSearchParams();
|
||||||
formData.append('model', model);
|
formData.append('model', model);
|
||||||
|
formData.append('prompt_model', promptModel);
|
||||||
formData.append('topic', topic);
|
formData.append('topic', topic);
|
||||||
|
|
||||||
fetch('/create', {
|
fetch('/create', {
|
||||||
|
@ -24,4 +24,9 @@ models = flux1-dev-Q4_0.gguf,flux1-schnell-Q4_0.gguf
|
|||||||
[openwebui]
|
[openwebui]
|
||||||
base_url = https://openwebui
|
base_url = https://openwebui
|
||||||
api_key = sk-
|
api_key = sk-
|
||||||
models = llama3:latest,cogito:14b,gemma3:12b
|
models = llama3:latest,cogito:14b,gemma3:12b
|
||||||
|
|
||||||
|
[openrouter]
|
||||||
|
enabled = False
|
||||||
|
api_key =
|
||||||
|
models = mistralai/mistral-7b-instruct:free,google/gemma-7b-it:free,meta-llama/llama-3.1-8b-instruct:free
|
Loading…
x
Reference in New Issue
Block a user