diff --git a/ai_frame_image_server.py b/ai_frame_image_server.py index b32f627..2033784 100644 --- a/ai_frame_image_server.py +++ b/ai_frame_image_server.py @@ -17,53 +17,73 @@ app = Flask(__name__) image_folder = "./output" - -@app.route("/") -def index(): +@app.route("/", methods=["GET"]) +def index() -> str: + """ + Renders the main HTML template. + Args: + None + Returns: + str: The rendered HTML template. + """ return render_template( "index.html", image="./image.png", reload_interval=user_config["frame"]["reload_interval"], ) - -@app.route("/images") -def gallery(): +@app.route("/images", methods=["GET"]) +def gallery() -> str: + """ + Renders the gallery HTML template. + Args: + None + Returns: + str: The rendered HTML template. + """ images = [f for f in os.listdir(image_folder) if f.lower().endswith(('png', 'jpg', 'jpeg', 'gif'))] return render_template("gallery.html", images=images) -@app.route("/images/") -def images(filename): +@app.route("/images/", methods=["GET"]) +def images(filename: str) -> None: + """ + Serves the requested image file. + Args: + filename (str): The name of the image file. + Returns: + None: Sends the image file. + """ return send_from_directory(image_folder, filename) @app.route("/create", methods=["GET", "POST"]) -def create(): - """Endpoint to create a new image. Supports optional prompt via POST.""" +def create() -> str: + """Handles image creation requests. + Args: + None + Returns: + str: Redirect to the main page or a JSON response. + """ prompt = request.form.get("prompt") if request.method == "POST" else None - create_image(prompt) # Pass prompt to create_image() - + create_image(prompt) if request.method == "POST": return jsonify({"message": "Image created", "prompt": prompt}), 200 return redirect(url_for("index")) -def scheduled_task(): +def scheduled_task() -> None: + """Executes the scheduled image generation task.""" print(f"Executing scheduled task at {time.strftime('%Y-%m-%d %H:%M:%S')}") create_image(None) if user_config["frame"]["auto_regen"] == "True": scheduler = BackgroundScheduler() - regen_time = user_config["frame"]["regen_time"].split(":") scheduler.add_job(scheduled_task, "cron", hour=regen_time[0], minute=regen_time[1]) scheduler.start() -if __name__ == "__main__": - os.makedirs(image_folder, exist_ok=True) # Ensure the folder exists - try: + os.makedirs(image_folder, exist_ok=True) app.run(host="0.0.0.0", port=user_config["frame"]["port"], debug=True) - except KeyboardInterrupt: - scheduler.shutdown() # Ensure graceful shutdown of scheduler + diff --git a/lib.py b/lib.py index a57e52a..018a299 100644 --- a/lib.py +++ b/lib.py @@ -8,17 +8,26 @@ import os import requests from comfy_api_simplified import ComfyApiWrapper, ComfyWorkflowWrapper -def get_available_models(): + +def get_available_models() -> list: + """Fetches available models from ComfyUI.""" url = user_config["comfyui"]["comfyui_url"] + "/object_info" response = requests.get(url) if response.status_code == 200: data = response.json() - return data.get("CheckpointLoaderSimple", {}).get("input", {}).get("required", {}).get("ckpt_name", [])[0] + return ( + data.get("CheckpointLoaderSimple", {}) + .get("input", {}) + .get("required", {}) + .get("ckpt_name", [])[0] + ) else: print(f"Failed to fetch models: {response.status_code}") return [] -def load_config(): + +def load_config() -> configparser.ConfigParser: + """Loads user configuration from ./user_config.cfg.""" user_config = configparser.ConfigParser() try: user_config.read("./user_config.cfg") @@ -29,10 +38,10 @@ def load_config(): sys.exit(1) -def rename_image(): - """Rename 'image.png' to a timestamped filename if it exists in the output folder.""" +def rename_image() -> str | None: + """Renames 'image.png' in the output folder to a timestamped filename if it exists.""" old_path = os.path.join(user_config["comfyui"]["output_dir"], "image.png") - + if os.path.exists(old_path): new_filename = f"{str(time.time())}.png" new_path = os.path.join(user_config["comfyui"]["output_dir"], new_filename) @@ -44,7 +53,8 @@ def rename_image(): return None -def send_prompt_to_openwebui(prompt): +def send_prompt_to_openwebui(prompt: str) -> str: + """Sends prompt to OpenWebui and returns the generated response.""" response = litellm.completion( api_base=user_config["openwebui"]["base_url"], model="openai/" + user_config["openwebui"]["model"], @@ -60,31 +70,36 @@ def send_prompt_to_openwebui(prompt): return response["choices"][0]["message"]["content"].strip('"') -def generate_image(file_name, comfy_prompt): - """Generate an image using the Comfy API.""" +def generate_image(file_name: str, comfy_prompt: str) -> None: + """Generates an image using the Comfy API.""" try: - # Initialize API and workflow + # Initialize ComfyUI API and workflow api = ComfyApiWrapper(user_config["comfyui"]["comfyui_url"]) wf = ComfyWorkflowWrapper("./workflow_api.json") - # Set workflow parameters - wf.set_node_param("KSampler", "seed", random.getrandbits(32)) - # wf.set_node_param("KSampler", "steps", steps) - wf.set_node_param("CLIP Text Encode (Prompt)", "text", comfy_prompt) - wf.set_node_param("Save Image", "filename_prefix", file_name) - wf.set_node_param("Empty Latent Image", "width", user_config["comfyui"]["width"]) - wf.set_node_param("Empty Latent Image", "height", user_config["comfyui"]["height"]) - valid_models = list(set(get_available_models()) & set(user_config["comfyui"]["models"].split(","))) + wf.set_node_param("KSampler", "seed", random.getrandbits(32)) # Set a random seed for the sampler + wf.set_node_param("CLIP Text Encode (Prompt)", "text", comfy_prompt) # Set the prompt to be used for image generation + wf.set_node_param("Save Image", "filename_prefix", file_name) # Set the filename prefix for the generated image + wf.set_node_param( # Set image dimensions + "Empty Latent Image", "width", user_config["comfyui"]["width"] + ) + wf.set_node_param( + "Empty Latent Image", "height", user_config["comfyui"]["height"] + ) + # Validate available models and choose a random one + valid_models = list( + set(get_available_models()) # Get all available models from ComfyUI + & set(user_config["comfyui"]["models"].split(",")) + ) if not valid_models: raise Exception("No valid options available.") model = random.choice(valid_models) - wf.set_node_param( - "Load Checkpoint", "ckpt_name", model - ) - # Queue your workflow for completion + wf.set_node_param("Load Checkpoint", "ckpt_name", model) # Set the model to be used for image generation + # Generate the image using the workflow and wait for completion logging.debug(f"Generating image: {file_name}") - results = api.queue_and_wait_images(wf, "Save Image") - rename_image() + results = api.queue_and_wait_images(wf, "Save Image") # Queue the workflow and wait for image generation to complete + rename_image() # Rename the generated image file if it exists + # Save the generated image to disk for filename, image_data in results.items(): with open( user_config["comfyui"]["output_dir"] + file_name + ".png", "wb+" @@ -95,7 +110,7 @@ def generate_image(file_name, comfy_prompt): logging.error(f"Failed to generate image for UID: {file_name}. Error: {e}") -def create_image(prompt): +def create_image(prompt: str | None = None) -> None: """Main function for generating images.""" if prompt is None: prompt = send_prompt_to_openwebui(user_config["comfyui"]["prompt"])