add support for multiple models and posting a prompt to /create

This commit is contained in:
Karl Hudgell 2025-03-29 15:50:38 +00:00
parent 0e471d779a
commit d97ef3f171
3 changed files with 31 additions and 10 deletions

View File

@ -1,4 +1,4 @@
from flask import Flask, render_template, send_from_directory, redirect, url_for from flask import Flask, render_template, send_from_directory, redirect, url_for, request, jsonify
import os import os
from lib import create_image, load_config from lib import create_image, load_config
@ -10,19 +10,23 @@ image_folder = "./output"
@app.route('/') @app.route('/')
def index(): def index():
# latest_image = get_latest_image()
return render_template("index.html", image="./image.png", reload_interval=user_config["frame"]["reload_interval"]) return render_template("index.html", image="./image.png", reload_interval=user_config["frame"]["reload_interval"])
@app.route('/images/<filename>') @app.route('/images/<filename>')
def images(filename): def images(filename):
return send_from_directory(image_folder, filename) return send_from_directory(image_folder, filename)
@app.route('/create') @app.route('/create', methods=["GET", "POST"])
def create(): def create():
"""Endpoint to create a new image.""" """Endpoint to create a new image. Supports optional prompt via POST."""
create_image() prompt = request.form.get("prompt") if request.method == "POST" else None
create_image(prompt) # Pass prompt to create_image()
if request.method == "POST":
return jsonify({"message": "Image created", "prompt": prompt}), 200
return redirect(url_for("index")) return redirect(url_for("index"))
if __name__ == '__main__': if __name__ == '__main__':
os.makedirs(image_folder, exist_ok=True) # Ensure the folder exists os.makedirs(image_folder, exist_ok=True) # Ensure the folder exists
app.run(host="0.0.0.0", port=user_config["frame"]["port"], debug=True) app.run(host="0.0.0.0", port=user_config["frame"]["port"], debug=True)

25
lib.py
View File

@ -5,9 +5,19 @@ import sys
import litellm import litellm
import time import time
import os import os
import requests
from comfy_api_simplified import ComfyApiWrapper, ComfyWorkflowWrapper from comfy_api_simplified import ComfyApiWrapper, ComfyWorkflowWrapper
def get_available_models():
url = user_config["comfyui"]["comfyui_url"] + "/object_info"
response = requests.get(url)
if response.status_code == 200:
data = response.json()
return data.get("CheckpointLoaderSimple", {}).get("input", {}).get("required", {}).get("ckpt_name", [])[0]
else:
print(f"Failed to fetch models: {response.status_code}")
return []
def load_config(): def load_config():
user_config = configparser.ConfigParser() user_config = configparser.ConfigParser()
try: try:
@ -18,6 +28,7 @@ def load_config():
logging.error(f"Missing configuration key: {e}") logging.error(f"Missing configuration key: {e}")
sys.exit(1) sys.exit(1)
def rename_image(): def rename_image():
"""Rename 'image.png' to a timestamped filename if it exists in the output folder.""" """Rename 'image.png' to a timestamped filename if it exists in the output folder."""
old_path = os.path.join(user_config["comfyui"]["output_dir"], "image.png") old_path = os.path.join(user_config["comfyui"]["output_dir"], "image.png")
@ -32,6 +43,7 @@ def rename_image():
print("No image.png found.") print("No image.png found.")
return None return None
def send_prompt_to_openwebui(prompt): def send_prompt_to_openwebui(prompt):
response = litellm.completion( response = litellm.completion(
api_base=user_config["openwebui"]["base_url"], api_base=user_config["openwebui"]["base_url"],
@ -62,8 +74,12 @@ def generate_image(file_name, comfy_prompt):
wf.set_node_param("Save Image", "filename_prefix", file_name) wf.set_node_param("Save Image", "filename_prefix", file_name)
wf.set_node_param("Empty Latent Image", "width", user_config["comfyui"]["width"]) wf.set_node_param("Empty Latent Image", "width", user_config["comfyui"]["width"])
wf.set_node_param("Empty Latent Image", "height", user_config["comfyui"]["height"]) wf.set_node_param("Empty Latent Image", "height", user_config["comfyui"]["height"])
valid_models = list(set(get_available_models()) & set(user_config["comfyui"]["models"].split(",")))
if not valid_models:
raise Exception("No valid options available.")
model = random.choice(valid_models)
wf.set_node_param( wf.set_node_param(
"Load Checkpoint", "ckpt_name", user_config["comfyui"]["model"] "Load Checkpoint", "ckpt_name", model
) )
# Queue your workflow for completion # Queue your workflow for completion
logging.debug(f"Generating image: {file_name}") logging.debug(f"Generating image: {file_name}")
@ -79,9 +95,10 @@ def generate_image(file_name, comfy_prompt):
logging.error(f"Failed to generate image for UID: {file_name}. Error: {e}") logging.error(f"Failed to generate image for UID: {file_name}. Error: {e}")
def create_image(): def create_image(prompt):
"""Main function for generating images.""" """Main function for generating images."""
prompt = send_prompt_to_openwebui(user_config["comfyui"]["prompt"]) if prompt is None:
prompt = send_prompt_to_openwebui(user_config["comfyui"]["prompt"])
print(f"Generated prompt: {prompt}") print(f"Generated prompt: {prompt}")
generate_image("image", prompt) generate_image("image", prompt)

View File

@ -4,7 +4,7 @@ port = 5000
[comfyui] [comfyui]
comfyui_url = http://comfyui comfyui_url = http://comfyui
model = zavychromaxl_v100.safetensors models = zavychromaxl_v100.safetensors
output_dir = ./output/ output_dir = ./output/
prompt = "Be explicit, only return the prompt and no other text, Generate a random detailed prompt for stable diffusion." prompt = "Be explicit, only return the prompt and no other text, Generate a random detailed prompt for stable diffusion."
width = 1568 width = 1568