mirror of
https://github.com/karl0ss/ai-frame-image-server.git
synced 2025-04-26 01:50:11 +01:00
working flux and sdxl
This commit is contained in:
parent
2bbb2fe15b
commit
a180a7bd4b
3
.gitignore
vendored
3
.gitignore
vendored
@ -5,4 +5,5 @@ script.log
|
||||
build/
|
||||
dist/
|
||||
user_config.cfg
|
||||
output/**.*
|
||||
output/**.*
|
||||
prompts_log.jsonl
|
||||
|
@ -105,7 +105,7 @@
|
||||
},
|
||||
"class_type": "CR Aspect Ratio",
|
||||
"_meta": {
|
||||
"title": "🔳 CR Aspect Ratio"
|
||||
"title": "CR Aspect Ratio"
|
||||
}
|
||||
},
|
||||
"42": {
|
||||
@ -183,7 +183,7 @@
|
||||
},
|
||||
"44": {
|
||||
"inputs": {
|
||||
"text": "A council of wise owls wearing tiny glasses and wizard hats, gathered around an ancient floating book in the middle of an enchanted forest at twilight, glowing mushrooms providing light, whimsical and magical, highly detailed, children's book illustration style, soft colors, hand-drawn look",
|
||||
"text": "",
|
||||
"speak_and_recognation": {
|
||||
"__value__": [
|
||||
false,
|
||||
|
86
lib.py
86
lib.py
@ -7,12 +7,46 @@ import time
|
||||
import os
|
||||
import requests
|
||||
from comfy_api_simplified import ComfyApiWrapper, ComfyWorkflowWrapper
|
||||
from tenacity import retry, stop_after_attempt, wait_fixed, before_log, retry_if_exception_type
|
||||
from tenacity import (
|
||||
retry,
|
||||
stop_after_attempt,
|
||||
wait_fixed,
|
||||
before_log,
|
||||
retry_if_exception_type,
|
||||
)
|
||||
import nest_asyncio
|
||||
import json
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
nest_asyncio.apply()
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
LOG_FILE = "./prompts_log.jsonl"
|
||||
|
||||
|
||||
def load_recent_prompts(days=7):
|
||||
recent_prompts = []
|
||||
cutoff_date = datetime.now().date() - timedelta(days=days)
|
||||
|
||||
try:
|
||||
with open(LOG_FILE, "r") as f:
|
||||
for line in f:
|
||||
data = json.loads(line.strip())
|
||||
prompt_date = datetime.strptime(data["date"], "%Y-%m-%d").date()
|
||||
if prompt_date >= cutoff_date:
|
||||
recent_prompts.append(data["prompt"])
|
||||
except FileNotFoundError:
|
||||
pass # No prompts yet
|
||||
|
||||
return recent_prompts
|
||||
|
||||
|
||||
def save_prompt(prompt):
|
||||
entry = {"date": datetime.now().strftime("%Y-%m-%d"), "prompt": prompt}
|
||||
with open(LOG_FILE, "a") as f:
|
||||
f.write(json.dumps(entry) + "\n")
|
||||
|
||||
|
||||
def get_available_models() -> list:
|
||||
"""Fetches available models from ComfyUI."""
|
||||
@ -70,6 +104,13 @@ def rename_image() -> str | None:
|
||||
|
||||
def create_prompt_on_openwebui(prompt: str) -> str:
|
||||
"""Sends prompt to OpenWebui and returns the generated response."""
|
||||
recent_prompts = load_recent_prompts()
|
||||
user_content = (
|
||||
"Here are the prompts from the last 7 days:\n\n"
|
||||
+ "\n".join(f"{i+1}. {p}" for i, p in enumerate(recent_prompts))
|
||||
+ "\n\nDo not repeat ideas, themes, or settings from the above. Now generate a new, completely original Stable Diffusion prompt that hasn't been done yet."
|
||||
)
|
||||
|
||||
model = random.choice(user_config["openwebui"]["models"].split(","))
|
||||
response = litellm.completion(
|
||||
api_base=user_config["openwebui"]["base_url"],
|
||||
@ -86,7 +127,7 @@ def create_prompt_on_openwebui(prompt: str) -> str:
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": prompt,
|
||||
"content": user_content,
|
||||
},
|
||||
],
|
||||
api_key=user_config["openwebui"]["api_key"],
|
||||
@ -111,10 +152,10 @@ def create_prompt_on_openwebui(prompt: str) -> str:
|
||||
|
||||
# Define the retry logic using Tenacity
|
||||
# @retry(
|
||||
# stop=stop_after_attempt(3),
|
||||
# wait=wait_fixed(5),
|
||||
# stop=stop_after_attempt(3),
|
||||
# wait=wait_fixed(5),
|
||||
# before=before_log(logging.getLogger(), logging.DEBUG),
|
||||
# retry=retry_if_exception_type(Exception)
|
||||
# retry=retry_if_exception_type(Exception)
|
||||
# )
|
||||
def generate_image(file_name: str, comfy_prompt: str) -> None:
|
||||
"""Generates an image using the Comfy API with retry logic."""
|
||||
@ -122,7 +163,7 @@ def generate_image(file_name: str, comfy_prompt: str) -> None:
|
||||
# Initialize ComfyUI API and workflow
|
||||
api = ComfyApiWrapper(user_config["comfyui"]["comfyui_url"])
|
||||
wf = ComfyWorkflowWrapper("./workflow_api.json")
|
||||
|
||||
|
||||
# Set workflow parameters
|
||||
wf.set_node_param(
|
||||
"KSampler", "seed", random.getrandbits(32)
|
||||
@ -139,7 +180,7 @@ def generate_image(file_name: str, comfy_prompt: str) -> None:
|
||||
wf.set_node_param(
|
||||
"Empty Latent Image", "height", user_config["comfyui"]["height"]
|
||||
)
|
||||
|
||||
|
||||
# Validate available models and choose a random one
|
||||
valid_models = list(
|
||||
set(get_available_models()) # Get all available models from ComfyUI
|
||||
@ -151,14 +192,14 @@ def generate_image(file_name: str, comfy_prompt: str) -> None:
|
||||
wf.set_node_param(
|
||||
"Load Checkpoint", "ckpt_name", model
|
||||
) # Set the model to be used for image generation
|
||||
|
||||
|
||||
# Generate the image using the workflow and wait for completion
|
||||
logging.debug(f"Generating image: {file_name}")
|
||||
results = api.queue_and_wait_images(
|
||||
wf, "Save Image"
|
||||
) # Queue the workflow and wait for image generation to complete
|
||||
rename_image() # Rename the generated image file if it exists
|
||||
|
||||
|
||||
# Save the generated image to disk
|
||||
for filename, image_data in results.items():
|
||||
with open(
|
||||
@ -166,11 +207,12 @@ def generate_image(file_name: str, comfy_prompt: str) -> None:
|
||||
) as f:
|
||||
f.write(image_data)
|
||||
logging.debug(f"Image generated successfully for UID: {file_name}")
|
||||
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to generate image for UID: {file_name}. Error: {e}")
|
||||
raise # Re-raise the exception for Tenacity to handle retries
|
||||
|
||||
|
||||
def generate_image_flux(file_name: str, comfy_prompt: str) -> None:
|
||||
"""Generates an image using the Comfy API with retry logic."""
|
||||
try:
|
||||
@ -188,13 +230,13 @@ def generate_image_flux(file_name: str, comfy_prompt: str) -> None:
|
||||
wf.set_node_param(
|
||||
"CivitAI Image Saver", "filename", file_name
|
||||
) # Set the filename prefix for the generated image
|
||||
# wf.set_node_param( # Set image dimensions
|
||||
# "Empty Latent Image", "width", user_config["comfyui"]["width"]
|
||||
# )
|
||||
# wf.set_node_param(
|
||||
# "Empty Latent Image", "height", user_config["comfyui"]["height"]
|
||||
# )
|
||||
|
||||
wf.set_node_param( # Set image dimensions
|
||||
"CR Aspect Ratio", "width", user_config["comfyui"]["width"]
|
||||
)
|
||||
wf.set_node_param(
|
||||
"CR Aspect Ratio", "height", user_config["comfyui"]["height"]
|
||||
)
|
||||
|
||||
# # Validate available models and choose a random one
|
||||
# valid_models = list(
|
||||
# set(get_available_models()) # Get all available models from ComfyUI
|
||||
@ -206,15 +248,16 @@ def generate_image_flux(file_name: str, comfy_prompt: str) -> None:
|
||||
# wf.set_node_param(
|
||||
# "Load Checkpoint", "ckpt_name", model
|
||||
# ) # Set the model to be used for image generation
|
||||
|
||||
|
||||
# Generate the image using the workflow and wait for completion
|
||||
logging.debug(f"Generating image: {file_name}")
|
||||
results = api.queue_and_wait_images(
|
||||
# wf, "Save Image"
|
||||
wf, "CivitAI Image Saver"
|
||||
wf,
|
||||
"CivitAI Image Saver",
|
||||
) # Queue the workflow and wait for image generation to complete
|
||||
rename_image() # Rename the generated image file if it exists
|
||||
|
||||
|
||||
# Save the generated image to disk
|
||||
for filename, image_data in results.items():
|
||||
with open(
|
||||
@ -222,7 +265,7 @@ def generate_image_flux(file_name: str, comfy_prompt: str) -> None:
|
||||
) as f:
|
||||
f.write(image_data)
|
||||
logging.debug(f"Image generated successfully for UID: {file_name}")
|
||||
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to generate image for UID: {file_name}. Error: {e}")
|
||||
raise # Re-raise the exception for Tenacity to handle retries
|
||||
@ -234,6 +277,7 @@ def create_image(prompt: str | None = None) -> None:
|
||||
prompt = create_prompt_on_openwebui(user_config["comfyui"]["prompt"])
|
||||
if prompt:
|
||||
logging.info(f"Generated prompt: {prompt}") # Log generated prompt
|
||||
save_prompt(prompt)
|
||||
if user_config["comfyui"]["FLUX"]:
|
||||
generate_image_flux("image", prompt)
|
||||
else:
|
||||
|
Loading…
x
Reference in New Issue
Block a user