diff --git a/.gitignore b/.gitignore index f88f74e..aeb7675 100644 --- a/.gitignore +++ b/.gitignore @@ -11,3 +11,4 @@ publish.sh test.py .vscode/launch.json favourites.json +.vscode/launch.json diff --git a/.vscode/launch.json b/.vscode/launch.json index 8605db7..ab19c21 100644 --- a/.vscode/launch.json +++ b/.vscode/launch.json @@ -8,7 +8,7 @@ "name": "Python Debugger: Current File", "type": "debugpy", "request": "launch", - "program": "${file}", + "program": "ai_frame_image_server.py", "console": "integratedTerminal", "justMyCode": false, "env": {"SECRET_KEY":"dkdkdk"} diff --git a/libs/comfyui.py b/libs/comfyui.py index 52aa6c0..4c0e0b6 100644 --- a/libs/comfyui.py +++ b/libs/comfyui.py @@ -123,14 +123,14 @@ def select_model(model: str) -> tuple[str, str]: use_flux = json.loads(user_config["comfyui"].get("FLUX", "false").lower()) only_flux = json.loads(user_config["comfyui"].get("ONLY_FLUX", "false").lower()) - if model == "Random": + if model == "Random Image Model": selected_workflow = "FLUX" if (use_flux and (only_flux or random.choice([True, False]))) else "SDXL" elif "flux" in model.lower(): selected_workflow = "FLUX" else: selected_workflow = "SDXL" - if model == "Random": + if model == "Random Image Model": if selected_workflow == "FLUX": valid_models = user_config["comfyui:flux"]["models"].split(",") else: # SDXL @@ -141,7 +141,7 @@ def select_model(model: str) -> tuple[str, str]: return selected_workflow, model -def create_image(prompt: str | None = None, model: str = "Random") -> None: +def create_image(prompt: str | None = None, model: str = "Random Image Model") -> None: """Generate an image with a chosen workflow (Random, FLUX*, or SDXL*).""" if prompt is None: diff --git a/libs/generic.py b/libs/generic.py index ca7a0e8..ae6af92 100644 --- a/libs/generic.py +++ b/libs/generic.py @@ -4,6 +4,7 @@ import logging import sys import time import os +import random from PIL import Image import nest_asyncio import json @@ -130,6 +131,14 @@ def load_openrouter_models_from_config(): models = config["openrouter"]["models"].split(",") return sorted([model.strip() for model in models if model.strip()], key=str.lower) return [] + +def load_openwebui_models_from_config(): + config = load_config() + if "openwebui" in config and "models" in config["openwebui"]: + models = config["openwebui"]["models"].split(",") + return sorted([model.strip() for model in models if model.strip()], key=str.lower) + return [] + def load_prompt_models_from_config(): """Load and return a list of available prompt generation models (both OpenWebUI and OpenRouter).""" config = load_config() diff --git a/libs/ollama.py b/libs/ollama.py index 6ae4161..2aeb967 100644 --- a/libs/ollama.py +++ b/libs/ollama.py @@ -13,7 +13,7 @@ LOG_FILE = "./prompts_log.jsonl" user_config = load_config() output_folder = user_config["comfyui"]["output_dir"] -def create_prompt_on_openwebui(prompt: str, topic: str = "random") -> str: +def create_prompt_on_openwebui(prompt: str, topic: str = "random", model: str = None) -> str: """Sends prompt to OpenWebui and returns the generated response.""" topic_instruction = "" selected_topic = "" @@ -40,7 +40,12 @@ def create_prompt_on_openwebui(prompt: str, topic: str = "random") -> str: ) - model = random.choice(user_config["openwebui"]["models"].split(",")) + if model: + # Use the specified model + model = model + else: + # Select a random model + model = random.choice(user_config["openwebui"]["models"].split(",")) response = litellm.completion( api_base=user_config["openwebui"]["base_url"], model="openai/" + model, diff --git a/routes/create_routes.py b/routes/create_routes.py index 5bdaa51..0fdd02f 100644 --- a/routes/create_routes.py +++ b/routes/create_routes.py @@ -2,7 +2,7 @@ from flask import Blueprint, request, render_template, redirect, url_for, sessio import threading from libs.comfyui import create_image, select_model, get_available_models from libs.ollama import create_prompt_on_openwebui -from libs.generic import load_models_from_config, load_topics_from_config, load_openrouter_models_from_config, create_prompt_with_random_model +from libs.generic import load_models_from_config, load_topics_from_config, load_openrouter_models_from_config, load_openwebui_models_from_config, create_prompt_with_random_model import os bp = Blueprint("create_routes", __name__) @@ -12,27 +12,49 @@ user_config = None # will be set in init_app def create(): if request.method == "POST": prompt = request.form.get("prompt") - selected_workflow, model = select_model(request.form.get("model") or "Random") + image_model = request.form.get("model") or "Random Image Model" + selected_workflow, model = select_model(image_model) topic = request.form.get("topic") if not prompt: - prompt = create_prompt_with_random_model(user_config["comfyui"]["prompt"], topic) + # Get the prompt model from the form data + prompt_model = request.form.get("prompt_model") or "" + if prompt_model and prompt_model != "Random Prompt Model": + # Use the specified prompt model + service, service_model = prompt_model.split(":", 1) if ":" in prompt_model else (prompt_model, "") + if service == "openwebui": + from libs.ollama import create_prompt_on_openwebui + prompt = create_prompt_on_openwebui(user_config["comfyui"]["prompt"], topic, service_model) + elif service == "openrouter": + from libs.openrouter import create_prompt_on_openrouter + prompt = create_prompt_on_openrouter(user_config["comfyui"]["prompt"], topic, service_model) + else: + # Use a random prompt model + prompt = create_prompt_with_random_model(user_config["comfyui"]["prompt"], topic) threading.Thread(target=lambda: create_image(prompt, model)).start() return redirect(url_for("create_routes.image_queued", prompt=prompt, model=model.split(".")[0])) # Load all models (SDXL and FLUX only) sdxl_models, flux_models = load_models_from_config() + openwebui_models = load_openwebui_models_from_config() + openrouter_models = load_openrouter_models_from_config() return render_template("create_image.html", sdxl_models=sdxl_models, flux_models=flux_models, + openwebui_models=openwebui_models, + openrouter_models=openrouter_models, topics=load_topics_from_config()) @bp.route("/image_queued") def image_queued(): prompt = request.args.get("prompt", "No prompt provided.") - model = request.args.get("model", "No model selected.").split(".")[0] + model = request.args.get("model", "No model selected.") + if model == "Random Image Model": + model = "Random" + else: + model = model.split(".")[0] return render_template("image_queued.html", prompt=prompt, model=model) @bp.route("/create_image", methods=["GET"]) @@ -42,10 +64,14 @@ def create_image_page(): # Load all models (SDXL and FLUX only) sdxl_models, flux_models = load_models_from_config() + openwebui_models = load_openwebui_models_from_config() + openrouter_models = load_openrouter_models_from_config() return render_template("create_image.html", sdxl_models=sdxl_models, flux_models=flux_models, + openwebui_models=openwebui_models, + openrouter_models=openrouter_models, topics=load_topics_from_config()) diff --git a/templates/create_image.html b/templates/create_image.html index 6c39375..6188233 100644 --- a/templates/create_image.html +++ b/templates/create_image.html @@ -33,6 +33,28 @@ align-items: center; } + .model-selection { + display: flex; + flex-wrap: wrap; + gap: 20px; + justify-content: center; + margin: 20px 0; + width: 100%; + max-width: 800px; + } + + .model-group { + display: flex; + flex-direction: column; + align-items: flex-start; + gap: 5px; + } + + .model-group label { + font-weight: bold; + color: #ddd; + } + button, select { background: #333; @@ -43,6 +65,7 @@ font-size: 16px; cursor: pointer; transition: background 0.3s; + min-width: 150px; } button:hover, @@ -90,6 +113,15 @@ width: 100%; } + .model-selection { + flex-direction: column; + align-items: stretch; + } + + .model-group { + align-items: stretch; + } + button, select { width: 100%; @@ -109,38 +141,65 @@
- - +
- - - +
+
+ + +
+ +
+ + +
+ +
+ + +
@@ -158,10 +217,12 @@ showSpinner(); const prompt = document.getElementById('prompt-box').value; const model = document.getElementById('model-select').value; + const promptModel = document.getElementById('prompt-model-select').value; const formData = new URLSearchParams(); formData.append('prompt', prompt); formData.append('model', model); + formData.append('prompt_model', promptModel); fetch('/create', { method: 'POST', @@ -180,10 +241,12 @@ function randomPrompt() { showSpinner(); const model = document.getElementById('model-select').value; + const promptModel = document.getElementById('prompt-model-select').value; const topic = document.getElementById('topic-select').value; const formData = new URLSearchParams(); formData.append('model', model); + formData.append('prompt_model', promptModel); formData.append('topic', topic); fetch('/create', {