From 4b52e5c713c3745cc382b7804821f63044243a65 Mon Sep 17 00:00:00 2001 From: Karl Date: Fri, 6 Jun 2025 11:15:43 +0100 Subject: [PATCH] code updates for date, fix update flux guff, new prompt logic --- ai_frame_image_server.py | 3 ++- libs/comfyui.py | 4 ++-- libs/generic.py | 3 ++- libs/ollama.py | 8 ++++---- templates/gallery.html | 2 +- 5 files changed, 11 insertions(+), 9 deletions(-) diff --git a/ai_frame_image_server.py b/ai_frame_image_server.py index c83b17f..79bea8e 100644 --- a/ai_frame_image_server.py +++ b/ai_frame_image_server.py @@ -60,7 +60,8 @@ def image_details(filename): details = get_details_from_png(path) return { "prompt": details["p"], - "model": details["m"] + "model": details["m"], + "date": details["d"] } diff --git a/libs/comfyui.py b/libs/comfyui.py index c686f61..781a13e 100644 --- a/libs/comfyui.py +++ b/libs/comfyui.py @@ -155,8 +155,8 @@ def create_image(prompt: str | None = None, model: str = "Random") -> None: seed_param="seed", save_node="CivitAI Image Saver", save_param="filename", - model_node="CivitAI Image Saver", - model_param="modelname", + model_node="Unet Loader (GGUF)", + model_param="unet_name", model=model ) else: # SDXL diff --git a/libs/generic.py b/libs/generic.py index 1c6cd10..e80504d 100644 --- a/libs/generic.py +++ b/libs/generic.py @@ -66,6 +66,7 @@ def rename_image() -> str | None: def get_details_from_png(path): try: + date = datetime.fromtimestamp(os.path.getctime(path)).strftime("%d-%m-%Y") with Image.open(path) as img: try: # Flux workflow @@ -77,7 +78,7 @@ def get_details_from_png(path): data = json.loads(img.info["prompt"]) prompt = data['6']['inputs']['text'] model = data['4']['inputs']['ckpt_name'] - return {"p":prompt,"m":model} or {"p":"","m":""} + return {"p":prompt,"m":model,"d":date} or {"p":"","m":"","c":""} except Exception as e: print(f"Error reading metadata from {path}: {e}") return "" diff --git a/libs/ollama.py b/libs/ollama.py index ab55969..1494c7b 100644 --- a/libs/ollama.py +++ b/libs/ollama.py @@ -24,13 +24,13 @@ def create_prompt_on_openwebui(prompt: str) -> str: topic_instruction = f" Incorporate the theme of '{selected_topic}' into the new prompt." user_content = ( - "Here are the prompts from the last 7 days:\n\n" - + "\n".join(f"{i+1}. {p}" for i, p in enumerate(recent_prompts)) - + "\n\nDo not repeat ideas, themes, or settings from the above. " - "Now generate a new, completely original Stable Diffusion prompt that hasn't been done yet." + "Can you generate me a really random image idea, Do not exceed 10 words. Use clear language, not poetic metaphors.”" + topic_instruction + + "Avoid prompts similar to the following:" + + "\n".join(f"{i+1}. {p}" for i, p in enumerate(recent_prompts)) ) + model = random.choice(user_config["openwebui"]["models"].split(",")) response = litellm.completion( api_base=user_config["openwebui"]["base_url"], diff --git a/templates/gallery.html b/templates/gallery.html index 895b3a7..e3f7abf 100644 --- a/templates/gallery.html +++ b/templates/gallery.html @@ -263,7 +263,7 @@ if (detailsCache[filename]) { document.getElementById("lightbox-prompt").textContent = - `Model: ${detailsCache[filename].model}\n\n${detailsCache[filename].prompt}`; + `Created On: ${detailsCache[filename].date}\n\n Model: ${detailsCache[filename].model}\n\n${detailsCache[filename].prompt}`; } else { document.getElementById("lightbox-prompt").textContent = "Loading…";