mirror of
https://github.com/karl0ss/ai_image_frame_server.git
synced 2025-06-08 03:25:06 +01:00
split out selecting model so can be returned to the ui
This commit is contained in:
parent
669dad4044
commit
f7f049aacb
@ -12,7 +12,7 @@ import time
|
|||||||
import threading
|
import threading
|
||||||
from apscheduler.schedulers.background import BackgroundScheduler
|
from apscheduler.schedulers.background import BackgroundScheduler
|
||||||
from libs.generic import load_config, load_recent_prompts, get_details_from_png, get_current_version, load_models_from_config
|
from libs.generic import load_config, load_recent_prompts, get_details_from_png, get_current_version, load_models_from_config
|
||||||
from libs.comfyui import cancel_current_job, create_image
|
from libs.comfyui import cancel_current_job, create_image, select_model
|
||||||
from libs.ollama import create_prompt_on_openwebui
|
from libs.ollama import create_prompt_on_openwebui
|
||||||
|
|
||||||
#workflow test commit
|
#workflow test commit
|
||||||
@ -98,7 +98,7 @@ def cancel_job() -> None:
|
|||||||
def create():
|
def create():
|
||||||
if request.method == "POST":
|
if request.method == "POST":
|
||||||
prompt = request.form.get("prompt")
|
prompt = request.form.get("prompt")
|
||||||
model = request.form.get("model") or "Random"
|
selected_workflow, model = select_model(request.form.get("model") or "Random")
|
||||||
|
|
||||||
if not prompt:
|
if not prompt:
|
||||||
prompt = create_prompt_on_openwebui(user_config["comfyui"]["prompt"])
|
prompt = create_prompt_on_openwebui(user_config["comfyui"]["prompt"])
|
||||||
@ -106,8 +106,7 @@ def create():
|
|||||||
# Start generation in background
|
# Start generation in background
|
||||||
threading.Thread(target=lambda: create_image(prompt, model)).start()
|
threading.Thread(target=lambda: create_image(prompt, model)).start()
|
||||||
|
|
||||||
# store prompt in query string temporarily
|
return redirect(url_for("image_queued", prompt=prompt, model=model.split(".")[0]))
|
||||||
return redirect(url_for("image_queued", prompt=prompt))
|
|
||||||
|
|
||||||
# For GET requests, just show the form to enter prompt
|
# For GET requests, just show the form to enter prompt
|
||||||
return render_template("create_image.html", models=load_models_from_config())
|
return render_template("create_image.html", models=load_models_from_config())
|
||||||
@ -116,7 +115,8 @@ def create():
|
|||||||
@app.route("/image_queued")
|
@app.route("/image_queued")
|
||||||
def image_queued():
|
def image_queued():
|
||||||
prompt = request.args.get("prompt", "No prompt provided.")
|
prompt = request.args.get("prompt", "No prompt provided.")
|
||||||
return render_template("image_queued.html", prompt=prompt)
|
model = request.args.get("model", "No model selected.").split(".")[0]
|
||||||
|
return render_template("image_queued.html", prompt=prompt, model=model)
|
||||||
|
|
||||||
def scheduled_task() -> None:
|
def scheduled_task() -> None:
|
||||||
"""Executes the scheduled image generation task."""
|
"""Executes the scheduled image generation task."""
|
||||||
|
@ -122,6 +122,27 @@ def generate_image(
|
|||||||
logging.error(f"Failed to generate image for UID: {file_name}. Error: {e}")
|
logging.error(f"Failed to generate image for UID: {file_name}. Error: {e}")
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
def select_model(model: str) -> tuple[str, str]:
|
||||||
|
use_flux = json.loads(user_config["comfyui"].get("FLUX", "false").lower())
|
||||||
|
only_flux = json.loads(user_config["comfyui"].get("ONLY_FLUX", "false").lower())
|
||||||
|
|
||||||
|
if model == "Random":
|
||||||
|
selected_workflow = "FLUX" if (use_flux and (only_flux or random.choice([True, False]))) else "SDXL"
|
||||||
|
elif "flux" in model.lower():
|
||||||
|
selected_workflow = "FLUX"
|
||||||
|
else:
|
||||||
|
selected_workflow = "SDXL"
|
||||||
|
|
||||||
|
if model == "Random":
|
||||||
|
if selected_workflow == "FLUX":
|
||||||
|
valid_models = user_config["comfyui:flux"]["models"].split(",")
|
||||||
|
else: # SDXL
|
||||||
|
available_model_list = user_config["comfyui"]["models"].split(",")
|
||||||
|
valid_models = list(set(get_available_models()) & set(available_model_list))
|
||||||
|
model = random.choice(valid_models)
|
||||||
|
|
||||||
|
return selected_workflow, model
|
||||||
|
|
||||||
|
|
||||||
def create_image(prompt: str | None = None, model: str = "Random") -> None:
|
def create_image(prompt: str | None = None, model: str = "Random") -> None:
|
||||||
"""Generate an image with a chosen workflow (Random, FLUX*, or SDXL*)."""
|
"""Generate an image with a chosen workflow (Random, FLUX*, or SDXL*)."""
|
||||||
@ -134,18 +155,9 @@ def create_image(prompt: str | None = None, model: str = "Random") -> None:
|
|||||||
return
|
return
|
||||||
|
|
||||||
save_prompt(prompt)
|
save_prompt(prompt)
|
||||||
use_flux = json.loads(user_config["comfyui"].get("FLUX", "false").lower())
|
selected_workflow, model = select_model(model)
|
||||||
only_flux = json.loads(user_config["comfyui"].get("ONLY_FLUX", "false").lower())
|
|
||||||
if model == "Random":
|
|
||||||
selected_workflow = "FLUX" if (use_flux and (only_flux or random.choice([True, False]))) else "SDXL"
|
|
||||||
elif "flux" in model.lower():
|
|
||||||
selected_workflow = "FLUX"
|
|
||||||
else:
|
|
||||||
selected_workflow = "SDXL"
|
|
||||||
if selected_workflow == "FLUX":
|
if selected_workflow == "FLUX":
|
||||||
if model == "Random":
|
|
||||||
valid_models = user_config["comfyui:flux"]["models"].split(",")
|
|
||||||
model = random.choice(valid_models)
|
|
||||||
generate_image(
|
generate_image(
|
||||||
file_name="image",
|
file_name="image",
|
||||||
comfy_prompt=prompt,
|
comfy_prompt=prompt,
|
||||||
@ -160,12 +172,6 @@ def create_image(prompt: str | None = None, model: str = "Random") -> None:
|
|||||||
model=model
|
model=model
|
||||||
)
|
)
|
||||||
else: # SDXL
|
else: # SDXL
|
||||||
if model == "Random":
|
|
||||||
available_model_list = user_config["comfyui"]["models"].split(",")
|
|
||||||
valid_models = list(set(get_available_models()) & set(available_model_list))
|
|
||||||
model = random.choice(valid_models)
|
|
||||||
generate_image("image", comfy_prompt=prompt, model=model)
|
generate_image("image", comfy_prompt=prompt, model=model)
|
||||||
|
|
||||||
logging.info(f"{selected_workflow} generation started with prompt: {prompt}")
|
logging.info(f"{selected_workflow} generation started with prompt: {prompt}")
|
||||||
|
|
||||||
|
|
@ -52,10 +52,11 @@
|
|||||||
</style>
|
</style>
|
||||||
</head>
|
</head>
|
||||||
<body>
|
<body>
|
||||||
<div class="message">Image will be made using prompt:</div>
|
<div class="message">Image will be made with <i>{{ model }}</i> using prompt:</div>
|
||||||
<div class="prompt-text">
|
<div class="prompt-text">
|
||||||
{{ prompt }}
|
{{ prompt }}
|
||||||
</div>
|
</div>
|
||||||
<button onclick="location.href='/'">Home</button>
|
<button onclick="location.href='/'">Home</button>
|
||||||
</body>
|
</body>
|
||||||
</html>
|
</html>
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user