mirror of
https://github.com/karl0ss/ai_image_frame_server.git
synced 2025-04-29 03:33:39 +01:00
working flux and sdxl
This commit is contained in:
parent
2bbb2fe15b
commit
a180a7bd4b
3
.gitignore
vendored
3
.gitignore
vendored
@ -5,4 +5,5 @@ script.log
|
|||||||
build/
|
build/
|
||||||
dist/
|
dist/
|
||||||
user_config.cfg
|
user_config.cfg
|
||||||
output/**.*
|
output/**.*
|
||||||
|
prompts_log.jsonl
|
||||||
|
@ -105,7 +105,7 @@
|
|||||||
},
|
},
|
||||||
"class_type": "CR Aspect Ratio",
|
"class_type": "CR Aspect Ratio",
|
||||||
"_meta": {
|
"_meta": {
|
||||||
"title": "🔳 CR Aspect Ratio"
|
"title": "CR Aspect Ratio"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"42": {
|
"42": {
|
||||||
@ -183,7 +183,7 @@
|
|||||||
},
|
},
|
||||||
"44": {
|
"44": {
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"text": "A council of wise owls wearing tiny glasses and wizard hats, gathered around an ancient floating book in the middle of an enchanted forest at twilight, glowing mushrooms providing light, whimsical and magical, highly detailed, children's book illustration style, soft colors, hand-drawn look",
|
"text": "",
|
||||||
"speak_and_recognation": {
|
"speak_and_recognation": {
|
||||||
"__value__": [
|
"__value__": [
|
||||||
false,
|
false,
|
||||||
|
86
lib.py
86
lib.py
@ -7,12 +7,46 @@ import time
|
|||||||
import os
|
import os
|
||||||
import requests
|
import requests
|
||||||
from comfy_api_simplified import ComfyApiWrapper, ComfyWorkflowWrapper
|
from comfy_api_simplified import ComfyApiWrapper, ComfyWorkflowWrapper
|
||||||
from tenacity import retry, stop_after_attempt, wait_fixed, before_log, retry_if_exception_type
|
from tenacity import (
|
||||||
|
retry,
|
||||||
|
stop_after_attempt,
|
||||||
|
wait_fixed,
|
||||||
|
before_log,
|
||||||
|
retry_if_exception_type,
|
||||||
|
)
|
||||||
import nest_asyncio
|
import nest_asyncio
|
||||||
|
import json
|
||||||
|
from datetime import datetime, timedelta
|
||||||
|
|
||||||
nest_asyncio.apply()
|
nest_asyncio.apply()
|
||||||
|
|
||||||
logging.basicConfig(level=logging.INFO)
|
logging.basicConfig(level=logging.INFO)
|
||||||
|
|
||||||
|
LOG_FILE = "./prompts_log.jsonl"
|
||||||
|
|
||||||
|
|
||||||
|
def load_recent_prompts(days=7):
|
||||||
|
recent_prompts = []
|
||||||
|
cutoff_date = datetime.now().date() - timedelta(days=days)
|
||||||
|
|
||||||
|
try:
|
||||||
|
with open(LOG_FILE, "r") as f:
|
||||||
|
for line in f:
|
||||||
|
data = json.loads(line.strip())
|
||||||
|
prompt_date = datetime.strptime(data["date"], "%Y-%m-%d").date()
|
||||||
|
if prompt_date >= cutoff_date:
|
||||||
|
recent_prompts.append(data["prompt"])
|
||||||
|
except FileNotFoundError:
|
||||||
|
pass # No prompts yet
|
||||||
|
|
||||||
|
return recent_prompts
|
||||||
|
|
||||||
|
|
||||||
|
def save_prompt(prompt):
|
||||||
|
entry = {"date": datetime.now().strftime("%Y-%m-%d"), "prompt": prompt}
|
||||||
|
with open(LOG_FILE, "a") as f:
|
||||||
|
f.write(json.dumps(entry) + "\n")
|
||||||
|
|
||||||
|
|
||||||
def get_available_models() -> list:
|
def get_available_models() -> list:
|
||||||
"""Fetches available models from ComfyUI."""
|
"""Fetches available models from ComfyUI."""
|
||||||
@ -70,6 +104,13 @@ def rename_image() -> str | None:
|
|||||||
|
|
||||||
def create_prompt_on_openwebui(prompt: str) -> str:
|
def create_prompt_on_openwebui(prompt: str) -> str:
|
||||||
"""Sends prompt to OpenWebui and returns the generated response."""
|
"""Sends prompt to OpenWebui and returns the generated response."""
|
||||||
|
recent_prompts = load_recent_prompts()
|
||||||
|
user_content = (
|
||||||
|
"Here are the prompts from the last 7 days:\n\n"
|
||||||
|
+ "\n".join(f"{i+1}. {p}" for i, p in enumerate(recent_prompts))
|
||||||
|
+ "\n\nDo not repeat ideas, themes, or settings from the above. Now generate a new, completely original Stable Diffusion prompt that hasn't been done yet."
|
||||||
|
)
|
||||||
|
|
||||||
model = random.choice(user_config["openwebui"]["models"].split(","))
|
model = random.choice(user_config["openwebui"]["models"].split(","))
|
||||||
response = litellm.completion(
|
response = litellm.completion(
|
||||||
api_base=user_config["openwebui"]["base_url"],
|
api_base=user_config["openwebui"]["base_url"],
|
||||||
@ -86,7 +127,7 @@ def create_prompt_on_openwebui(prompt: str) -> str:
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"role": "user",
|
"role": "user",
|
||||||
"content": prompt,
|
"content": user_content,
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
api_key=user_config["openwebui"]["api_key"],
|
api_key=user_config["openwebui"]["api_key"],
|
||||||
@ -111,10 +152,10 @@ def create_prompt_on_openwebui(prompt: str) -> str:
|
|||||||
|
|
||||||
# Define the retry logic using Tenacity
|
# Define the retry logic using Tenacity
|
||||||
# @retry(
|
# @retry(
|
||||||
# stop=stop_after_attempt(3),
|
# stop=stop_after_attempt(3),
|
||||||
# wait=wait_fixed(5),
|
# wait=wait_fixed(5),
|
||||||
# before=before_log(logging.getLogger(), logging.DEBUG),
|
# before=before_log(logging.getLogger(), logging.DEBUG),
|
||||||
# retry=retry_if_exception_type(Exception)
|
# retry=retry_if_exception_type(Exception)
|
||||||
# )
|
# )
|
||||||
def generate_image(file_name: str, comfy_prompt: str) -> None:
|
def generate_image(file_name: str, comfy_prompt: str) -> None:
|
||||||
"""Generates an image using the Comfy API with retry logic."""
|
"""Generates an image using the Comfy API with retry logic."""
|
||||||
@ -122,7 +163,7 @@ def generate_image(file_name: str, comfy_prompt: str) -> None:
|
|||||||
# Initialize ComfyUI API and workflow
|
# Initialize ComfyUI API and workflow
|
||||||
api = ComfyApiWrapper(user_config["comfyui"]["comfyui_url"])
|
api = ComfyApiWrapper(user_config["comfyui"]["comfyui_url"])
|
||||||
wf = ComfyWorkflowWrapper("./workflow_api.json")
|
wf = ComfyWorkflowWrapper("./workflow_api.json")
|
||||||
|
|
||||||
# Set workflow parameters
|
# Set workflow parameters
|
||||||
wf.set_node_param(
|
wf.set_node_param(
|
||||||
"KSampler", "seed", random.getrandbits(32)
|
"KSampler", "seed", random.getrandbits(32)
|
||||||
@ -139,7 +180,7 @@ def generate_image(file_name: str, comfy_prompt: str) -> None:
|
|||||||
wf.set_node_param(
|
wf.set_node_param(
|
||||||
"Empty Latent Image", "height", user_config["comfyui"]["height"]
|
"Empty Latent Image", "height", user_config["comfyui"]["height"]
|
||||||
)
|
)
|
||||||
|
|
||||||
# Validate available models and choose a random one
|
# Validate available models and choose a random one
|
||||||
valid_models = list(
|
valid_models = list(
|
||||||
set(get_available_models()) # Get all available models from ComfyUI
|
set(get_available_models()) # Get all available models from ComfyUI
|
||||||
@ -151,14 +192,14 @@ def generate_image(file_name: str, comfy_prompt: str) -> None:
|
|||||||
wf.set_node_param(
|
wf.set_node_param(
|
||||||
"Load Checkpoint", "ckpt_name", model
|
"Load Checkpoint", "ckpt_name", model
|
||||||
) # Set the model to be used for image generation
|
) # Set the model to be used for image generation
|
||||||
|
|
||||||
# Generate the image using the workflow and wait for completion
|
# Generate the image using the workflow and wait for completion
|
||||||
logging.debug(f"Generating image: {file_name}")
|
logging.debug(f"Generating image: {file_name}")
|
||||||
results = api.queue_and_wait_images(
|
results = api.queue_and_wait_images(
|
||||||
wf, "Save Image"
|
wf, "Save Image"
|
||||||
) # Queue the workflow and wait for image generation to complete
|
) # Queue the workflow and wait for image generation to complete
|
||||||
rename_image() # Rename the generated image file if it exists
|
rename_image() # Rename the generated image file if it exists
|
||||||
|
|
||||||
# Save the generated image to disk
|
# Save the generated image to disk
|
||||||
for filename, image_data in results.items():
|
for filename, image_data in results.items():
|
||||||
with open(
|
with open(
|
||||||
@ -166,11 +207,12 @@ def generate_image(file_name: str, comfy_prompt: str) -> None:
|
|||||||
) as f:
|
) as f:
|
||||||
f.write(image_data)
|
f.write(image_data)
|
||||||
logging.debug(f"Image generated successfully for UID: {file_name}")
|
logging.debug(f"Image generated successfully for UID: {file_name}")
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logging.error(f"Failed to generate image for UID: {file_name}. Error: {e}")
|
logging.error(f"Failed to generate image for UID: {file_name}. Error: {e}")
|
||||||
raise # Re-raise the exception for Tenacity to handle retries
|
raise # Re-raise the exception for Tenacity to handle retries
|
||||||
|
|
||||||
|
|
||||||
def generate_image_flux(file_name: str, comfy_prompt: str) -> None:
|
def generate_image_flux(file_name: str, comfy_prompt: str) -> None:
|
||||||
"""Generates an image using the Comfy API with retry logic."""
|
"""Generates an image using the Comfy API with retry logic."""
|
||||||
try:
|
try:
|
||||||
@ -188,13 +230,13 @@ def generate_image_flux(file_name: str, comfy_prompt: str) -> None:
|
|||||||
wf.set_node_param(
|
wf.set_node_param(
|
||||||
"CivitAI Image Saver", "filename", file_name
|
"CivitAI Image Saver", "filename", file_name
|
||||||
) # Set the filename prefix for the generated image
|
) # Set the filename prefix for the generated image
|
||||||
# wf.set_node_param( # Set image dimensions
|
wf.set_node_param( # Set image dimensions
|
||||||
# "Empty Latent Image", "width", user_config["comfyui"]["width"]
|
"CR Aspect Ratio", "width", user_config["comfyui"]["width"]
|
||||||
# )
|
)
|
||||||
# wf.set_node_param(
|
wf.set_node_param(
|
||||||
# "Empty Latent Image", "height", user_config["comfyui"]["height"]
|
"CR Aspect Ratio", "height", user_config["comfyui"]["height"]
|
||||||
# )
|
)
|
||||||
|
|
||||||
# # Validate available models and choose a random one
|
# # Validate available models and choose a random one
|
||||||
# valid_models = list(
|
# valid_models = list(
|
||||||
# set(get_available_models()) # Get all available models from ComfyUI
|
# set(get_available_models()) # Get all available models from ComfyUI
|
||||||
@ -206,15 +248,16 @@ def generate_image_flux(file_name: str, comfy_prompt: str) -> None:
|
|||||||
# wf.set_node_param(
|
# wf.set_node_param(
|
||||||
# "Load Checkpoint", "ckpt_name", model
|
# "Load Checkpoint", "ckpt_name", model
|
||||||
# ) # Set the model to be used for image generation
|
# ) # Set the model to be used for image generation
|
||||||
|
|
||||||
# Generate the image using the workflow and wait for completion
|
# Generate the image using the workflow and wait for completion
|
||||||
logging.debug(f"Generating image: {file_name}")
|
logging.debug(f"Generating image: {file_name}")
|
||||||
results = api.queue_and_wait_images(
|
results = api.queue_and_wait_images(
|
||||||
# wf, "Save Image"
|
# wf, "Save Image"
|
||||||
wf, "CivitAI Image Saver"
|
wf,
|
||||||
|
"CivitAI Image Saver",
|
||||||
) # Queue the workflow and wait for image generation to complete
|
) # Queue the workflow and wait for image generation to complete
|
||||||
rename_image() # Rename the generated image file if it exists
|
rename_image() # Rename the generated image file if it exists
|
||||||
|
|
||||||
# Save the generated image to disk
|
# Save the generated image to disk
|
||||||
for filename, image_data in results.items():
|
for filename, image_data in results.items():
|
||||||
with open(
|
with open(
|
||||||
@ -222,7 +265,7 @@ def generate_image_flux(file_name: str, comfy_prompt: str) -> None:
|
|||||||
) as f:
|
) as f:
|
||||||
f.write(image_data)
|
f.write(image_data)
|
||||||
logging.debug(f"Image generated successfully for UID: {file_name}")
|
logging.debug(f"Image generated successfully for UID: {file_name}")
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logging.error(f"Failed to generate image for UID: {file_name}. Error: {e}")
|
logging.error(f"Failed to generate image for UID: {file_name}. Error: {e}")
|
||||||
raise # Re-raise the exception for Tenacity to handle retries
|
raise # Re-raise the exception for Tenacity to handle retries
|
||||||
@ -234,6 +277,7 @@ def create_image(prompt: str | None = None) -> None:
|
|||||||
prompt = create_prompt_on_openwebui(user_config["comfyui"]["prompt"])
|
prompt = create_prompt_on_openwebui(user_config["comfyui"]["prompt"])
|
||||||
if prompt:
|
if prompt:
|
||||||
logging.info(f"Generated prompt: {prompt}") # Log generated prompt
|
logging.info(f"Generated prompt: {prompt}") # Log generated prompt
|
||||||
|
save_prompt(prompt)
|
||||||
if user_config["comfyui"]["FLUX"]:
|
if user_config["comfyui"]["FLUX"]:
|
||||||
generate_image_flux("image", prompt)
|
generate_image_flux("image", prompt)
|
||||||
else:
|
else:
|
||||||
|
Loading…
x
Reference in New Issue
Block a user