re working into libs, and use thumbs in gallery

This commit is contained in:
Karl Hudgell 2025-04-25 10:13:03 +01:00
parent 020c2c2f1b
commit 41fd1444eb
9 changed files with 394 additions and 5 deletions

2
.gitignore vendored
View File

@ -5,6 +5,6 @@ script.log
build/ build/
dist/ dist/
user_config.cfg user_config.cfg
output/**.* output/
prompts_log.jsonl prompts_log.jsonl
publish.sh publish.sh

View File

@ -9,7 +9,10 @@ import os
import time import time
import threading import threading
from apscheduler.schedulers.background import BackgroundScheduler from apscheduler.schedulers.background import BackgroundScheduler
from lib import create_image, load_config, create_prompt_on_openwebui, cancel_current_job, get_prompt_from_png # from lib import create_image, load_config, create_prompt_on_openwebui, cancel_current_job, get_prompt_from_png
from libs.generic import load_config, load_recent_prompts, get_prompt_from_png
from libs.comfyui import cancel_current_job, create_image
from libs.ollama import create_prompt_on_openwebui
user_config = load_config() user_config = load_config()
app = Flask(__name__) app = Flask(__name__)
@ -52,6 +55,11 @@ def gallery() -> str:
return render_template("gallery.html", images=images) return render_template("gallery.html", images=images)
@app.route('/images/thumbnails/<path:filename>')
def serve_thumbnail(filename):
return send_from_directory('output/thumbnails', filename)
@app.route("/images/<filename>", methods=["GET"]) @app.route("/images/<filename>", methods=["GET"])
def images(filename: str) -> None: def images(filename: str) -> None:
""" """

28
create_thumbs_from_old.py Normal file
View File

@ -0,0 +1,28 @@
import os
from PIL import Image
# Define paths
input_folder = "output"
thumbs_folder = "output/thumbnails"
thumb_width = 500
# Create the thumbs folder if it doesn't exist
os.makedirs(thumbs_folder, exist_ok=True)
# Supported image extensions
image_extensions = (".png", ".jpg", ".jpeg", ".webp")
# Loop through files
for filename in os.listdir(input_folder):
if filename.lower().endswith(image_extensions):
input_path = os.path.join(input_folder, filename)
output_path = os.path.join(thumbs_folder, filename)
try:
with Image.open(input_path) as img:
# Maintain aspect ratio
img.thumbnail((thumb_width, img.height), Image.LANCZOS)
img.save(output_path)
print(f"✅ Thumbnail saved: {output_path}")
except Exception as e:
print(f"❌ Error processing {filename}: {e}")

3
lib.py
View File

@ -19,7 +19,7 @@ from tenacity import (
import nest_asyncio import nest_asyncio
import json import json
from datetime import datetime from datetime import datetime
from libs.create_thumbnail import generate_thumbnail
nest_asyncio.apply() nest_asyncio.apply()
logging.basicConfig(level=logging.INFO) logging.basicConfig(level=logging.INFO)
@ -96,6 +96,7 @@ def rename_image() -> str | None:
new_filename = f"{str(time.time())}.png" new_filename = f"{str(time.time())}.png"
new_path = os.path.join(user_config["comfyui"]["output_dir"], new_filename) new_path = os.path.join(user_config["comfyui"]["output_dir"], new_filename)
os.rename(old_path, new_path) os.rename(old_path, new_path)
generate_thumbnail(new_path)
print(f"Renamed 'image.png' to '{new_filename}'") print(f"Renamed 'image.png' to '{new_filename}'")
return new_filename return new_filename
else: else:

163
libs/comfyui.py Normal file
View File

@ -0,0 +1,163 @@
import random
import logging
import os
import requests
from typing import Optional
from comfy_api_simplified import ComfyApiWrapper, ComfyWorkflowWrapper
from tenacity import (
retry,
stop_after_attempt,
wait_fixed,
before_log,
retry_if_exception_type,
)
import nest_asyncio
from libs.generic import rename_image, load_config, save_prompt
from libs.create_thumbnail import generate_thumbnail
from libs.ollama import create_prompt_on_openwebui
nest_asyncio.apply()
logging.basicConfig(level=logging.INFO)
LOG_FILE = "./prompts_log.jsonl"
user_config = load_config()
output_folder = user_config["comfyui"]["output_dir"]
def get_available_models() -> list:
"""Fetches available models from ComfyUI."""
url = user_config["comfyui"]["comfyui_url"] + "/object_info"
response = requests.get(url)
if response.status_code == 200:
data = response.json()
return (
data.get("CheckpointLoaderSimple", {})
.get("input", {})
.get("required", {})
.get("ckpt_name", [])[0]
)
else:
print(f"Failed to fetch models: {response.status_code}")
return []
def cancel_current_job() -> list:
"""Fetches available models from ComfyUI."""
url = user_config["comfyui"]["comfyui_url"] + "/interrupt"
response = requests.post(url)
if response.status_code == 200:
return "Cancelled"
else:
return "Failed to cancel"
# Define the retry logic using Tenacity
@retry(
stop=stop_after_attempt(3),
wait=wait_fixed(5),
before=before_log(logging.getLogger(), logging.DEBUG),
retry=retry_if_exception_type(Exception),
)
def generate_image(
file_name: str,
comfy_prompt: str,
workflow_path: str = "./workflow_api.json",
prompt_node: str = "CLIP Text Encode (Prompt)",
seed_node: str = "KSampler",
seed_param: str = "seed",
save_node: str = "Save Image",
save_param: str = "filename_prefix",
model_node: Optional[str] = "Load Checkpoint",
model_param: Optional[str] = "ckpt_name",
) -> None:
"""Generates an image using the Comfy API with configurable workflow settings."""
try:
api = ComfyApiWrapper(user_config["comfyui"]["comfyui_url"])
wf = ComfyWorkflowWrapper(workflow_path)
# Set workflow parameters
wf.set_node_param(seed_node, seed_param, random.getrandbits(32))
wf.set_node_param(prompt_node, "text", comfy_prompt)
wf.set_node_param(save_node, save_param, file_name)
wf.set_node_param(
(
"Empty Latent Image"
if workflow_path.endswith("workflow_api.json")
else "CR Aspect Ratio"
),
"width",
user_config["comfyui"]["width"],
)
wf.set_node_param(
(
"Empty Latent Image"
if workflow_path.endswith("workflow_api.json")
else "CR Aspect Ratio"
),
"height",
user_config["comfyui"]["height"],
)
# Conditionally set model if node and param are provided
if model_node and model_param:
if user_config["comfyui"].get("FLUX"):
valid_models = user_config["comfyui:flux"]["models"].split(",")
else:
available_model_list = user_config["comfyui"]["models"].split(",")
valid_models = list(
set(get_available_models()) & set(available_model_list)
)
if not valid_models:
raise Exception("No valid models available.")
model = random.choice(valid_models)
wf.set_node_param(model_node, model_param, model)
# Generate image
logging.debug(f"Generating image: {file_name}")
results = api.queue_and_wait_images(wf, save_node)
rename_image()
for _, image_data in results.items():
output_path = os.path.join(
user_config["comfyui"]["output_dir"], f"{file_name}.png"
)
with open(output_path, "wb+") as f:
f.write(image_data)
generate_thumbnail(output_path)
logging.debug(f"Image generated successfully for UID: {file_name}")
except Exception as e:
logging.error(f"Failed to generate image for UID: {file_name}. Error: {e}")
raise
def create_image(prompt: str | None = None) -> None:
"""Main function for generating images."""
if prompt is None:
prompt = create_prompt_on_openwebui(user_config["comfyui"]["prompt"])
if prompt:
logging.info(f"Generated prompt: {prompt}") # Log generated prompt
save_prompt(prompt)
if user_config["comfyui"]["FLUX"]:
generate_image(
file_name="image",
comfy_prompt=prompt,
workflow_path="./FLUX.json",
prompt_node="Positive Prompt T5",
seed_node="Seed",
seed_param="seed",
save_node="CivitAI Image Saver",
save_param="filename",
model_node="CivitAI Image Saver",
model_param="modelname",
)
else:
generate_image("image", prompt)
print(f"Image generation started with prompt: {prompt}")
else:
logging.error("No prompt generated.")

34
libs/create_thumbnail.py Normal file
View File

@ -0,0 +1,34 @@
from PIL import Image
import os
def generate_thumbnail(image_path: str, size=(500, 500)) -> str:
"""
Generates a thumbnail for a given image with a max size of 500x500,
and saves it in a 'thumbnails' subdirectory alongside the original.
Args:
image_path (str): Path to the original image.
size (tuple): Maximum width and height of the thumbnail.
Returns:
str: Path to the thumbnail image.
"""
image_dir = os.path.dirname(image_path)
thumbnail_dir = os.path.join(image_dir, "thumbnails")
os.makedirs(thumbnail_dir, exist_ok=True)
filename = os.path.basename(image_path)
thumbnail_path = os.path.join(thumbnail_dir, filename)
if not os.path.exists(thumbnail_path):
try:
img = Image.open(image_path)
img.thumbnail(size, Image.Resampling.LANCZOS)
img.save(thumbnail_path, optimize=True)
print(f"Created thumbnail: {thumbnail_path}")
except Exception as e:
print(f"Error creating thumbnail for {image_path}: {e}")
else:
print(f"Thumbnail already exists: {thumbnail_path}")
return thumbnail_path

82
libs/generic.py Normal file
View File

@ -0,0 +1,82 @@
import configparser
import logging
import sys
import time
import os
from PIL import Image
import nest_asyncio
import json
from datetime import datetime
from libs.create_thumbnail import generate_thumbnail
nest_asyncio.apply()
logging.basicConfig(level=logging.INFO)
LOG_FILE = "./prompts_log.jsonl"
def load_recent_prompts(count=7):
recent_prompts = []
try:
with open(LOG_FILE, "r") as f:
lines = f.readlines()
for line in lines[-count:]:
data = json.loads(line.strip())
recent_prompts.append(data["prompt"])
except FileNotFoundError:
pass # No prompts yet
return recent_prompts
def save_prompt(prompt):
entry = {"date": datetime.now().strftime("%Y-%m-%d"), "prompt": prompt}
with open(LOG_FILE, "a") as f:
f.write(json.dumps(entry) + "\n")
def load_config() -> configparser.ConfigParser:
"""Loads user configuration from ./user_config.cfg."""
user_config = configparser.ConfigParser()
try:
user_config.read("./user_config.cfg")
logging.debug("Configuration loaded successfully.")
return user_config
except KeyError as e:
logging.error(f"Missing configuration key: {e}")
sys.exit(1)
def rename_image() -> str | None:
"""Renames 'image.png' in the output folder to a timestamped filename if it exists."""
old_path = os.path.join(user_config["comfyui"]["output_dir"], "image.png")
if os.path.exists(old_path):
new_filename = f"{str(time.time())}.png"
new_path = os.path.join(user_config["comfyui"]["output_dir"], new_filename)
os.rename(old_path, new_path)
generate_thumbnail(new_path)
print(f"Renamed 'image.png' to '{new_filename}'")
return new_filename
else:
print("No image.png found.")
return None
def get_prompt_from_png(path):
try:
with Image.open(path) as img:
try:
# Flux workflow
meta = json.loads(img.info["prompt"])['44']['inputs']['text']
except KeyError:
# SDXL workflow
meta = json.loads(img.info["prompt"])['6']['inputs']['text']
return meta or ""
except Exception as e:
print(f"Error reading metadata from {path}: {e}")
return ""
user_config = load_config()
output_folder = user_config["comfyui"]["output_dir"]

70
libs/ollama.py Normal file
View File

@ -0,0 +1,70 @@
import random
import logging
import litellm
import nest_asyncio
from libs.generic import load_recent_prompts, load_config
nest_asyncio.apply()
logging.basicConfig(level=logging.INFO)
LOG_FILE = "./prompts_log.jsonl"
user_config = load_config()
output_folder = user_config["comfyui"]["output_dir"]
def create_prompt_on_openwebui(prompt: str) -> str:
"""Sends prompt to OpenWebui and returns the generated response."""
# Unique list of recent prompts
recent_prompts = list(set(load_recent_prompts()))
# Decide on whether to include a topic (e.g., 30% chance to include)
topics = [t.strip() for t in user_config["comfyui"]["topics"].split(",") if t.strip()]
topic_instruction = ""
if random.random() < 0.3 and topics:
selected_topic = random.choice(topics)
topic_instruction = f" Incorporate the theme of '{selected_topic}' into the new prompt."
user_content = (
"Here are the prompts from the last 7 days:\n\n"
+ "\n".join(f"{i+1}. {p}" for i, p in enumerate(recent_prompts))
+ "\n\nDo not repeat ideas, themes, or settings from the above. "
"Now generate a new, completely original Stable Diffusion prompt that hasn't been done yet."
+ topic_instruction
)
model = random.choice(user_config["openwebui"]["models"].split(","))
response = litellm.completion(
api_base=user_config["openwebui"]["base_url"],
model="openai/" + model,
messages=[
{
"role": "system",
"content": (
"You are a prompt generator for Stable Diffusion. "
"Generate a detailed and imaginative prompt with a strong visual theme. "
"Focus on lighting, atmosphere, and artistic style. "
"Keep the prompt concise, no extra commentary or formatting."
),
},
{
"role": "user",
"content": user_content,
},
],
api_key=user_config["openwebui"]["api_key"],
)
prompt = response["choices"][0]["message"]["content"].strip('"')
# response = litellm.completion(
# api_base=user_config["openwebui"]["base_url"],
# model="openai/brxce/stable-diffusion-prompt-generator:latest",
# messages=[
# {
# "role": "user",
# "content": prompt,
# },
# ],
# api_key=user_config["openwebui"]["api_key"],
# )
# prompt = response["choices"][0]["message"]["content"].strip('"')
logging.debug(prompt)
return prompt

View File

@ -96,7 +96,9 @@
<h1>Image Archive</h1> <h1>Image Archive</h1>
<div class="gallery"> <div class="gallery">
{% for image in images %} {% for image in images %}
<img src="{{ url_for('images', filename=image.filename) }}" alt="Image" loading="lazy" onclick="openLightbox({{ loop.index0 }})"> <!-- <img src="{{ url_for('images', filename=image.thumbnail_filename) }}" alt="Image" loading="lazy" onclick="openLightbox({{ loop.index0 }})"> -->
<img src="{{ url_for('images', filename='thumbnails/' + image.filename) }}" data-fullsrc="{{ url_for('images', filename=image.filename) }}" onclick="openLightbox({{ loop.index0 }})">
{% endfor %} {% endfor %}
</div> </div>
@ -122,7 +124,8 @@
function openLightbox(index) { function openLightbox(index) {
currentIndex = index; currentIndex = index;
document.getElementById("lightbox-img").src = images[currentIndex].src; <!-- document.getElementById("lightbox-img").src = images[currentIndex].src; -->
document.getElementById("lightbox-img").src = document.querySelectorAll('.gallery img')[currentIndex].dataset.fullsrc;
document.getElementById("lightbox-prompt").textContent = images[currentIndex].prompt; document.getElementById("lightbox-prompt").textContent = images[currentIndex].prompt;
document.getElementById("lightbox").style.display = "flex"; document.getElementById("lightbox").style.display = "flex";
} }