split out selecting model so can be returned to the ui

This commit is contained in:
Karl 2025-06-06 15:35:50 +01:00
parent 669dad4044
commit f7f049aacb
3 changed files with 32 additions and 25 deletions

View File

@ -12,7 +12,7 @@ import time
import threading
from apscheduler.schedulers.background import BackgroundScheduler
from libs.generic import load_config, load_recent_prompts, get_details_from_png, get_current_version, load_models_from_config
from libs.comfyui import cancel_current_job, create_image
from libs.comfyui import cancel_current_job, create_image, select_model
from libs.ollama import create_prompt_on_openwebui
#workflow test commit
@ -98,7 +98,7 @@ def cancel_job() -> None:
def create():
if request.method == "POST":
prompt = request.form.get("prompt")
model = request.form.get("model") or "Random"
selected_workflow, model = select_model(request.form.get("model") or "Random")
if not prompt:
prompt = create_prompt_on_openwebui(user_config["comfyui"]["prompt"])
@ -106,8 +106,7 @@ def create():
# Start generation in background
threading.Thread(target=lambda: create_image(prompt, model)).start()
# store prompt in query string temporarily
return redirect(url_for("image_queued", prompt=prompt))
return redirect(url_for("image_queued", prompt=prompt, model=model.split(".")[0]))
# For GET requests, just show the form to enter prompt
return render_template("create_image.html", models=load_models_from_config())
@ -116,7 +115,8 @@ def create():
@app.route("/image_queued")
def image_queued():
prompt = request.args.get("prompt", "No prompt provided.")
return render_template("image_queued.html", prompt=prompt)
model = request.args.get("model", "No model selected.").split(".")[0]
return render_template("image_queued.html", prompt=prompt, model=model)
def scheduled_task() -> None:
"""Executes the scheduled image generation task."""

View File

@ -121,8 +121,29 @@ def generate_image(
except Exception as e:
logging.error(f"Failed to generate image for UID: {file_name}. Error: {e}")
raise
def select_model(model: str) -> tuple[str, str]:
use_flux = json.loads(user_config["comfyui"].get("FLUX", "false").lower())
only_flux = json.loads(user_config["comfyui"].get("ONLY_FLUX", "false").lower())
if model == "Random":
selected_workflow = "FLUX" if (use_flux and (only_flux or random.choice([True, False]))) else "SDXL"
elif "flux" in model.lower():
selected_workflow = "FLUX"
else:
selected_workflow = "SDXL"
if model == "Random":
if selected_workflow == "FLUX":
valid_models = user_config["comfyui:flux"]["models"].split(",")
else: # SDXL
available_model_list = user_config["comfyui"]["models"].split(",")
valid_models = list(set(get_available_models()) & set(available_model_list))
model = random.choice(valid_models)
return selected_workflow, model
def create_image(prompt: str | None = None, model: str = "Random") -> None:
"""Generate an image with a chosen workflow (Random, FLUX*, or SDXL*)."""
@ -134,18 +155,9 @@ def create_image(prompt: str | None = None, model: str = "Random") -> None:
return
save_prompt(prompt)
use_flux = json.loads(user_config["comfyui"].get("FLUX", "false").lower())
only_flux = json.loads(user_config["comfyui"].get("ONLY_FLUX", "false").lower())
if model == "Random":
selected_workflow = "FLUX" if (use_flux and (only_flux or random.choice([True, False]))) else "SDXL"
elif "flux" in model.lower():
selected_workflow = "FLUX"
else:
selected_workflow = "SDXL"
selected_workflow, model = select_model(model)
if selected_workflow == "FLUX":
if model == "Random":
valid_models = user_config["comfyui:flux"]["models"].split(",")
model = random.choice(valid_models)
generate_image(
file_name="image",
comfy_prompt=prompt,
@ -160,12 +172,6 @@ def create_image(prompt: str | None = None, model: str = "Random") -> None:
model=model
)
else: # SDXL
if model == "Random":
available_model_list = user_config["comfyui"]["models"].split(",")
valid_models = list(set(get_available_models()) & set(available_model_list))
model = random.choice(valid_models)
generate_image("image", comfy_prompt=prompt, model=model)
logging.info(f"{selected_workflow} generation started with prompt: {prompt}")

View File

@ -52,10 +52,11 @@
</style>
</head>
<body>
<div class="message">Image will be made using prompt:</div>
<div class="message">Image will be made with <i>{{ model }}</i> using prompt:</div>
<div class="prompt-text">
{{ prompt }}
</div>
<button onclick="location.href='/'">Home</button>
</body>
</html>