mirror of
https://github.com/karl0ss/ai_image_frame_server.git
synced 2025-06-08 11:25:07 +01:00
select model on create page
This commit is contained in:
parent
0b74672844
commit
b1646a4c6e
@ -100,14 +100,14 @@ def create() -> str:
|
|||||||
str: Redirect to the main page or a JSON response.
|
str: Redirect to the main page or a JSON response.
|
||||||
"""
|
"""
|
||||||
prompt = request.form.get("prompt") if request.method == "POST" else None
|
prompt = request.form.get("prompt") if request.method == "POST" else None
|
||||||
model = request.form.get("model") if request.method == "POST" else None
|
model = request.form.get("model") if request.method == "POST" else "Random"
|
||||||
|
|
||||||
|
|
||||||
if prompt is None:
|
if prompt is None:
|
||||||
prompt = create_prompt_on_openwebui(user_config["comfyui"]["prompt"])
|
prompt = create_prompt_on_openwebui(user_config["comfyui"]["prompt"])
|
||||||
|
|
||||||
def create_image_in_background():
|
def create_image_in_background():
|
||||||
create_image(prompt)
|
create_image(prompt, model)
|
||||||
|
|
||||||
threading.Thread(target=create_image_in_background).start()
|
threading.Thread(target=create_image_in_background).start()
|
||||||
return render_template('image_queued.html', prompt=prompt)
|
return render_template('image_queued.html', prompt=prompt)
|
||||||
|
@ -71,6 +71,7 @@ def generate_image(
|
|||||||
save_param: str = "filename_prefix",
|
save_param: str = "filename_prefix",
|
||||||
model_node: Optional[str] = "Load Checkpoint",
|
model_node: Optional[str] = "Load Checkpoint",
|
||||||
model_param: Optional[str] = "ckpt_name",
|
model_param: Optional[str] = "ckpt_name",
|
||||||
|
model: Optional[str] = "None",
|
||||||
) -> None:
|
) -> None:
|
||||||
"""Generates an image using the Comfy API with configurable workflow settings."""
|
"""Generates an image using the Comfy API with configurable workflow settings."""
|
||||||
try:
|
try:
|
||||||
@ -100,20 +101,6 @@ def generate_image(
|
|||||||
user_config["comfyui"]["height"],
|
user_config["comfyui"]["height"],
|
||||||
)
|
)
|
||||||
|
|
||||||
# Conditionally set model if node and param are provided
|
|
||||||
if model_node and model_param:
|
|
||||||
if "FLUX" in workflow_path:
|
|
||||||
valid_models = user_config["comfyui:flux"]["models"].split(",")
|
|
||||||
else:
|
|
||||||
available_model_list = user_config["comfyui"]["models"].split(",")
|
|
||||||
valid_models = list(
|
|
||||||
set(get_available_models()) & set(available_model_list)
|
|
||||||
)
|
|
||||||
|
|
||||||
if not valid_models:
|
|
||||||
raise Exception("No valid models available.")
|
|
||||||
|
|
||||||
model = random.choice(valid_models)
|
|
||||||
wf.set_node_param(model_node, model_param, model)
|
wf.set_node_param(model_node, model_param, model)
|
||||||
|
|
||||||
# Generate image
|
# Generate image
|
||||||
@ -136,24 +123,29 @@ def generate_image(
|
|||||||
raise
|
raise
|
||||||
|
|
||||||
|
|
||||||
def create_image(prompt: str | None = None) -> None:
|
def create_image(prompt: str | None = None, model: str = "Random") -> None:
|
||||||
"""Main function for generating images."""
|
"""Generate an image with a chosen workflow (Random, FLUX*, or SDXL*)."""
|
||||||
|
|
||||||
if prompt is None:
|
if prompt is None:
|
||||||
prompt = create_prompt_on_openwebui(user_config["comfyui"]["prompt"])
|
prompt = create_prompt_on_openwebui(user_config["comfyui"]["prompt"])
|
||||||
|
|
||||||
if not prompt:
|
if not prompt:
|
||||||
logging.error("No prompt generated.")
|
logging.error("No prompt generated.")
|
||||||
return
|
return
|
||||||
|
|
||||||
save_prompt(prompt)
|
save_prompt(prompt)
|
||||||
|
use_flux = json.loads(user_config["comfyui"].get("FLUX", "false").lower())
|
||||||
use_flux = json.loads((user_config["comfyui"].get("FLUX", False)).lower())
|
only_flux = json.loads(user_config["comfyui"].get("ONLY_FLUX", "false").lower())
|
||||||
only_flux = json.loads((user_config["comfyui"].get("ONLY_FLUX", False)).lower())
|
if model == "Random":
|
||||||
|
selected_workflow = "FLUX" if (use_flux and (only_flux or random.choice([True, False]))) else "SDXL"
|
||||||
|
elif "flux" in model.lower():
|
||||||
|
selected_workflow = "FLUX"
|
||||||
|
else:
|
||||||
selected_workflow = "SDXL"
|
selected_workflow = "SDXL"
|
||||||
if use_flux:
|
|
||||||
selected_workflow = "FLUX" if only_flux else random.choice(["FLUX", "SDXL"])
|
|
||||||
|
|
||||||
if selected_workflow == "FLUX":
|
if selected_workflow == "FLUX":
|
||||||
|
if model == "Random":
|
||||||
|
valid_models = user_config["comfyui:flux"]["models"].split(",")
|
||||||
|
model = random.choice(valid_models)
|
||||||
generate_image(
|
generate_image(
|
||||||
file_name="image",
|
file_name="image",
|
||||||
comfy_prompt=prompt,
|
comfy_prompt=prompt,
|
||||||
@ -165,9 +157,14 @@ def create_image(prompt: str | None = None) -> None:
|
|||||||
save_param="filename",
|
save_param="filename",
|
||||||
model_node="CivitAI Image Saver",
|
model_node="CivitAI Image Saver",
|
||||||
model_param="modelname",
|
model_param="modelname",
|
||||||
|
model=model
|
||||||
)
|
)
|
||||||
else:
|
else: # SDXL
|
||||||
generate_image("image", prompt)
|
if model == "Random":
|
||||||
|
available_model_list = user_config["comfyui"]["models"].split(",")
|
||||||
|
valid_models = list(set(get_available_models()) & set(available_model_list))
|
||||||
|
model = random.choice(valid_models)
|
||||||
|
generate_image("image", comfy_prompt=prompt, model=model)
|
||||||
|
|
||||||
logging.info(f"{selected_workflow} generation started with prompt: {prompt}")
|
logging.info(f"{selected_workflow} generation started with prompt: {prompt}")
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user