From f7f049aacbbc64aadaca1bdb1db1d87a7ee3daf7 Mon Sep 17 00:00:00 2001 From: Karl Date: Fri, 6 Jun 2025 15:35:50 +0100 Subject: [PATCH] split out selecting model so can be returned to the ui --- ai_frame_image_server.py | 10 ++++----- libs/comfyui.py | 44 +++++++++++++++++++++---------------- templates/image_queued.html | 3 ++- 3 files changed, 32 insertions(+), 25 deletions(-) diff --git a/ai_frame_image_server.py b/ai_frame_image_server.py index 48d3dc9..7209681 100644 --- a/ai_frame_image_server.py +++ b/ai_frame_image_server.py @@ -12,7 +12,7 @@ import time import threading from apscheduler.schedulers.background import BackgroundScheduler from libs.generic import load_config, load_recent_prompts, get_details_from_png, get_current_version, load_models_from_config -from libs.comfyui import cancel_current_job, create_image +from libs.comfyui import cancel_current_job, create_image, select_model from libs.ollama import create_prompt_on_openwebui #workflow test commit @@ -98,7 +98,7 @@ def cancel_job() -> None: def create(): if request.method == "POST": prompt = request.form.get("prompt") - model = request.form.get("model") or "Random" + selected_workflow, model = select_model(request.form.get("model") or "Random") if not prompt: prompt = create_prompt_on_openwebui(user_config["comfyui"]["prompt"]) @@ -106,8 +106,7 @@ def create(): # Start generation in background threading.Thread(target=lambda: create_image(prompt, model)).start() - # store prompt in query string temporarily - return redirect(url_for("image_queued", prompt=prompt)) + return redirect(url_for("image_queued", prompt=prompt, model=model.split(".")[0])) # For GET requests, just show the form to enter prompt return render_template("create_image.html", models=load_models_from_config()) @@ -116,7 +115,8 @@ def create(): @app.route("/image_queued") def image_queued(): prompt = request.args.get("prompt", "No prompt provided.") - return render_template("image_queued.html", prompt=prompt) + model = request.args.get("model", "No model selected.").split(".")[0] + return render_template("image_queued.html", prompt=prompt, model=model) def scheduled_task() -> None: """Executes the scheduled image generation task.""" diff --git a/libs/comfyui.py b/libs/comfyui.py index 781a13e..5bf6731 100644 --- a/libs/comfyui.py +++ b/libs/comfyui.py @@ -121,8 +121,29 @@ def generate_image( except Exception as e: logging.error(f"Failed to generate image for UID: {file_name}. Error: {e}") raise - - + +def select_model(model: str) -> tuple[str, str]: + use_flux = json.loads(user_config["comfyui"].get("FLUX", "false").lower()) + only_flux = json.loads(user_config["comfyui"].get("ONLY_FLUX", "false").lower()) + + if model == "Random": + selected_workflow = "FLUX" if (use_flux and (only_flux or random.choice([True, False]))) else "SDXL" + elif "flux" in model.lower(): + selected_workflow = "FLUX" + else: + selected_workflow = "SDXL" + + if model == "Random": + if selected_workflow == "FLUX": + valid_models = user_config["comfyui:flux"]["models"].split(",") + else: # SDXL + available_model_list = user_config["comfyui"]["models"].split(",") + valid_models = list(set(get_available_models()) & set(available_model_list)) + model = random.choice(valid_models) + + return selected_workflow, model + + def create_image(prompt: str | None = None, model: str = "Random") -> None: """Generate an image with a chosen workflow (Random, FLUX*, or SDXL*).""" @@ -134,18 +155,9 @@ def create_image(prompt: str | None = None, model: str = "Random") -> None: return save_prompt(prompt) - use_flux = json.loads(user_config["comfyui"].get("FLUX", "false").lower()) - only_flux = json.loads(user_config["comfyui"].get("ONLY_FLUX", "false").lower()) - if model == "Random": - selected_workflow = "FLUX" if (use_flux and (only_flux or random.choice([True, False]))) else "SDXL" - elif "flux" in model.lower(): - selected_workflow = "FLUX" - else: - selected_workflow = "SDXL" + selected_workflow, model = select_model(model) + if selected_workflow == "FLUX": - if model == "Random": - valid_models = user_config["comfyui:flux"]["models"].split(",") - model = random.choice(valid_models) generate_image( file_name="image", comfy_prompt=prompt, @@ -160,12 +172,6 @@ def create_image(prompt: str | None = None, model: str = "Random") -> None: model=model ) else: # SDXL - if model == "Random": - available_model_list = user_config["comfyui"]["models"].split(",") - valid_models = list(set(get_available_models()) & set(available_model_list)) - model = random.choice(valid_models) generate_image("image", comfy_prompt=prompt, model=model) logging.info(f"{selected_workflow} generation started with prompt: {prompt}") - - \ No newline at end of file diff --git a/templates/image_queued.html b/templates/image_queued.html index 54803d3..9c8eed5 100644 --- a/templates/image_queued.html +++ b/templates/image_queued.html @@ -52,10 +52,11 @@ -
Image will be made using prompt:
+
Image will be made with {{ model }} using prompt:
{{ prompt }}
+