working flux and sdxl

This commit is contained in:
Karl Hudgell 2025-04-19 17:33:20 +01:00
parent 2bbb2fe15b
commit a180a7bd4b
3 changed files with 69 additions and 24 deletions

1
.gitignore vendored
View File

@ -6,3 +6,4 @@ build/
dist/ dist/
user_config.cfg user_config.cfg
output/**.* output/**.*
prompts_log.jsonl

View File

@ -105,7 +105,7 @@
}, },
"class_type": "CR Aspect Ratio", "class_type": "CR Aspect Ratio",
"_meta": { "_meta": {
"title": "🔳 CR Aspect Ratio" "title": "CR Aspect Ratio"
} }
}, },
"42": { "42": {
@ -183,7 +183,7 @@
}, },
"44": { "44": {
"inputs": { "inputs": {
"text": "A council of wise owls wearing tiny glasses and wizard hats, gathered around an ancient floating book in the middle of an enchanted forest at twilight, glowing mushrooms providing light, whimsical and magical, highly detailed, children's book illustration style, soft colors, hand-drawn look", "text": "",
"speak_and_recognation": { "speak_and_recognation": {
"__value__": [ "__value__": [
false, false,

62
lib.py
View File

@ -7,12 +7,46 @@ import time
import os import os
import requests import requests
from comfy_api_simplified import ComfyApiWrapper, ComfyWorkflowWrapper from comfy_api_simplified import ComfyApiWrapper, ComfyWorkflowWrapper
from tenacity import retry, stop_after_attempt, wait_fixed, before_log, retry_if_exception_type from tenacity import (
retry,
stop_after_attempt,
wait_fixed,
before_log,
retry_if_exception_type,
)
import nest_asyncio import nest_asyncio
import json
from datetime import datetime, timedelta
nest_asyncio.apply() nest_asyncio.apply()
logging.basicConfig(level=logging.INFO) logging.basicConfig(level=logging.INFO)
LOG_FILE = "./prompts_log.jsonl"
def load_recent_prompts(days=7):
recent_prompts = []
cutoff_date = datetime.now().date() - timedelta(days=days)
try:
with open(LOG_FILE, "r") as f:
for line in f:
data = json.loads(line.strip())
prompt_date = datetime.strptime(data["date"], "%Y-%m-%d").date()
if prompt_date >= cutoff_date:
recent_prompts.append(data["prompt"])
except FileNotFoundError:
pass # No prompts yet
return recent_prompts
def save_prompt(prompt):
entry = {"date": datetime.now().strftime("%Y-%m-%d"), "prompt": prompt}
with open(LOG_FILE, "a") as f:
f.write(json.dumps(entry) + "\n")
def get_available_models() -> list: def get_available_models() -> list:
"""Fetches available models from ComfyUI.""" """Fetches available models from ComfyUI."""
@ -70,6 +104,13 @@ def rename_image() -> str | None:
def create_prompt_on_openwebui(prompt: str) -> str: def create_prompt_on_openwebui(prompt: str) -> str:
"""Sends prompt to OpenWebui and returns the generated response.""" """Sends prompt to OpenWebui and returns the generated response."""
recent_prompts = load_recent_prompts()
user_content = (
"Here are the prompts from the last 7 days:\n\n"
+ "\n".join(f"{i+1}. {p}" for i, p in enumerate(recent_prompts))
+ "\n\nDo not repeat ideas, themes, or settings from the above. Now generate a new, completely original Stable Diffusion prompt that hasn't been done yet."
)
model = random.choice(user_config["openwebui"]["models"].split(",")) model = random.choice(user_config["openwebui"]["models"].split(","))
response = litellm.completion( response = litellm.completion(
api_base=user_config["openwebui"]["base_url"], api_base=user_config["openwebui"]["base_url"],
@ -86,7 +127,7 @@ def create_prompt_on_openwebui(prompt: str) -> str:
}, },
{ {
"role": "user", "role": "user",
"content": prompt, "content": user_content,
}, },
], ],
api_key=user_config["openwebui"]["api_key"], api_key=user_config["openwebui"]["api_key"],
@ -171,6 +212,7 @@ def generate_image(file_name: str, comfy_prompt: str) -> None:
logging.error(f"Failed to generate image for UID: {file_name}. Error: {e}") logging.error(f"Failed to generate image for UID: {file_name}. Error: {e}")
raise # Re-raise the exception for Tenacity to handle retries raise # Re-raise the exception for Tenacity to handle retries
def generate_image_flux(file_name: str, comfy_prompt: str) -> None: def generate_image_flux(file_name: str, comfy_prompt: str) -> None:
"""Generates an image using the Comfy API with retry logic.""" """Generates an image using the Comfy API with retry logic."""
try: try:
@ -188,12 +230,12 @@ def generate_image_flux(file_name: str, comfy_prompt: str) -> None:
wf.set_node_param( wf.set_node_param(
"CivitAI Image Saver", "filename", file_name "CivitAI Image Saver", "filename", file_name
) # Set the filename prefix for the generated image ) # Set the filename prefix for the generated image
# wf.set_node_param( # Set image dimensions wf.set_node_param( # Set image dimensions
# "Empty Latent Image", "width", user_config["comfyui"]["width"] "CR Aspect Ratio", "width", user_config["comfyui"]["width"]
# ) )
# wf.set_node_param( wf.set_node_param(
# "Empty Latent Image", "height", user_config["comfyui"]["height"] "CR Aspect Ratio", "height", user_config["comfyui"]["height"]
# ) )
# # Validate available models and choose a random one # # Validate available models and choose a random one
# valid_models = list( # valid_models = list(
@ -211,7 +253,8 @@ def generate_image_flux(file_name: str, comfy_prompt: str) -> None:
logging.debug(f"Generating image: {file_name}") logging.debug(f"Generating image: {file_name}")
results = api.queue_and_wait_images( results = api.queue_and_wait_images(
# wf, "Save Image" # wf, "Save Image"
wf, "CivitAI Image Saver" wf,
"CivitAI Image Saver",
) # Queue the workflow and wait for image generation to complete ) # Queue the workflow and wait for image generation to complete
rename_image() # Rename the generated image file if it exists rename_image() # Rename the generated image file if it exists
@ -234,6 +277,7 @@ def create_image(prompt: str | None = None) -> None:
prompt = create_prompt_on_openwebui(user_config["comfyui"]["prompt"]) prompt = create_prompt_on_openwebui(user_config["comfyui"]["prompt"])
if prompt: if prompt:
logging.info(f"Generated prompt: {prompt}") # Log generated prompt logging.info(f"Generated prompt: {prompt}") # Log generated prompt
save_prompt(prompt)
if user_config["comfyui"]["FLUX"]: if user_config["comfyui"]["FLUX"]:
generate_image_flux("image", prompt) generate_image_flux("image", prompt)
else: else: