Compare commits

...

52 Commits
0.1.5 ... main

Author SHA1 Message Date
4ea81daa47 Bump version: 0.2.3 → 0.2.4 2025-06-25 08:35:52 +01:00
070279cd78 new flux and working settings 2025-06-25 08:35:41 +01:00
6390199cb8 new flux wf 2025-06-24 19:57:57 +01:00
078c8aec34 Bump version: 0.2.2 → 0.2.3 2025-06-24 16:44:10 +01:00
d7883af040 working settings page logic for all cfg items 2025-06-24 16:44:02 +01:00
e68a9ac75d Bump version: 0.2.1 → 0.2.2 2025-06-24 15:04:34 +01:00
1da2288104 include negative prompt in flux wf 2025-06-24 15:04:25 +01:00
7a5b41b5b5 Bump version: 0.2.0 → 0.2.1 2025-06-24 14:54:23 +01:00
3e86eb1880 fix workflow for flux json 2025-06-24 14:54:11 +01:00
bec1ae2893 Bump version: 0.1.21 → 0.2.0 2025-06-24 13:02:11 +01:00
7946e5dce5 split out routes and add settings page 2025-06-24 13:01:39 +01:00
c4e86a5433 Bump version: 0.1.20 → 0.1.21 2025-06-20 10:22:07 +01:00
d791c8b4ed live reload of topics and models 2025-06-20 10:21:58 +01:00
9d80741da1 Bump version: 0.1.19 → 0.1.20 2025-06-18 16:58:36 +01:00
08f3a80169 add selecting the topic when doing random prompt 2025-06-18 16:58:20 +01:00
c7d71bfd03 Bump version: 0.1.18 → 0.1.19 2025-06-06 17:27:09 +01:00
1a0542861c basic working auth for create_image page 2025-06-06 17:26:37 +01:00
0fc549c199 Bump version: 0.1.17 → 0.1.18 2025-06-06 15:36:00 +01:00
f7f049aacb split out selecting model so can be returned to the ui 2025-06-06 15:35:50 +01:00
669dad4044 Bump version: 0.1.16 → 0.1.17 2025-06-06 15:13:20 +01:00
c726d23707 remove any preceeding text on the ollama prompt 2025-06-06 15:13:03 +01:00
2c9429d640 Bump version: 0.1.15 → 0.1.16 2025-06-06 13:10:43 +01:00
ac388b0f4e Add grouping to model list 2025-06-06 13:10:32 +01:00
4272e1d40e Bump version: 0.1.14 → 0.1.15 2025-06-06 12:55:22 +01:00
4e1e240e30 Bump version: 0.1.13 → 0.1.14 2025-06-06 12:52:15 +01:00
3d2524c2ba Bump version: 0.1.12 → 0.1.13 2025-06-06 12:52:03 +01:00
52574de881 hook doesn't work 2025-06-06 12:51:41 +01:00
ef9bf72b84 Bump version: 0.1.12 → 0.1.13 2025-06-06 12:49:35 +01:00
0cd6c6c5c8 updated bump config 2025-06-06 12:49:10 +01:00
cd6b8a8d25 Bump version: 0.1.11 → 0.1.12 2025-06-06 12:47:51 +01:00
1aa540fa03 update bump-my-version config 2025-06-06 12:47:40 +01:00
33bfee0220 remove extension from displayed models 2025-06-06 12:39:11 +01:00
f041a6afea add padding to the lightbox 2025-06-06 12:32:49 +01:00
336c03a888 allow navigation in lightbox over all images 2025-06-06 12:31:30 +01:00
9ce6ff25ea add padding to the index page 2025-06-06 12:28:39 +01:00
5be690f6c1 reformat the text on the gallery 2025-06-06 12:28:32 +01:00
4da908d0da
Update docker-publish.yml
only build on tag
2025-06-06 11:23:21 +01:00
7adaaf4491 bumped version 2025-06-06 11:21:42 +01:00
a75c7c554a better view on mobiles 2025-06-06 11:21:32 +01:00
2ff03fe101 upversion 2025-06-06 11:16:10 +01:00
4b52e5c713 code updates for date, fix update flux guff, new prompt logic 2025-06-06 11:15:43 +01:00
2a9a226dd1 bump version 2025-06-04 10:03:59 +01:00
3f2c59c5bb add mobile better mobile support 2025-06-04 10:03:44 +01:00
55ccd71383 fix the double generation 2025-06-04 10:03:30 +01:00
4b62b5cd07 was sending prompt to positive and negative 2025-06-04 10:03:17 +01:00
b8322e1fd8 updated view on gallery page 2025-05-27 18:43:58 +01:00
d749da148e scrollable text on index 2025-05-27 18:41:46 +01:00
d344674e02 bump tag 0.1.7 2025-05-19 11:15:04 +01:00
b1646a4c6e select model on create page 2025-05-19 11:14:03 +01:00
0b74672844 select model on create page 2025-05-18 21:16:11 +01:00
aabd19dd5f rework workflow to pickup the latest tag 2025-05-18 15:02:54 +01:00
423ca357f6 Bump version to 0.1.6 2025-05-18 14:59:21 +01:00
27 changed files with 1328 additions and 383 deletions

View File

@ -1,5 +1,5 @@
[tool.bumpversion]
current_version = "0.1.5"
current_version = "0.2.4"
parse = "(?P<major>\\d+)\\.(?P<minor>\\d+)\\.(?P<patch>\\d+)"
serialize = ["{major}.{minor}.{patch}"]
search = "{current_version}"
@ -7,12 +7,12 @@ replace = "{new_version}"
regex = false
ignore_missing_version = false
ignore_missing_files = false
tag = false
tag = true
sign_tags = false
tag_name = "v{new_version}"
tag_name = "{new_version}"
tag_message = "Bump version: {current_version} → {new_version}"
allow_dirty = false
commit = false
commit = true
message = "Bump version: {current_version} → {new_version}"
moveable_tags = []
commit_args = ""

View File

@ -2,8 +2,7 @@ name: Build and Publish Docker Image
on:
push:
branches: [main]
tags: ['*'] # triggers on any tag push
tags: ["*"] # Only triggers on tag pushes
workflow_dispatch:
jobs:
@ -13,6 +12,8 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0 # ensures tags are fetched
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
@ -25,23 +26,15 @@ jobs:
IMAGE_NAME="ai-frame-image-server"
REGISTRY="${{ secrets.REGISTRY }}"
USERNAME="${{ secrets.USERNAME }}"
GIT_TAG="${GITHUB_REF#refs/tags/}"
IMAGE_TAGGED="$REGISTRY/$USERNAME/$IMAGE_NAME:$GIT_TAG"
IMAGE_LATEST="$REGISTRY/$USERNAME/$IMAGE_NAME:latest"
# Always build and tag as latest
echo "🔧 Building $IMAGE_LATEST"
docker build -t $IMAGE_LATEST .
echo "🔧 Building $IMAGE_TAGGED and $IMAGE_LATEST"
docker build -t $IMAGE_LATEST -t $IMAGE_TAGGED .
echo "📤 Pushing $IMAGE_TAGGED"
docker push $IMAGE_TAGGED
echo "📤 Pushing $IMAGE_LATEST"
docker push $IMAGE_LATEST
# If this is a tag push, tag the image accordingly
if [[ "${GITHUB_REF}" == refs/tags/* ]]; then
GIT_TAG="${GITHUB_REF#refs/tags/}"
IMAGE_TAGGED="$REGISTRY/$USERNAME/$IMAGE_NAME:$GIT_TAG"
echo "🏷️ Also tagging as $IMAGE_TAGGED"
docker tag $IMAGE_LATEST $IMAGE_TAGGED
echo "📤 Pushing $IMAGE_TAGGED"
docker push $IMAGE_TAGGED
fi

1
.gitignore vendored
View File

@ -9,3 +9,4 @@ output/
prompts_log.jsonl
publish.sh
test.py
.vscode/launch.json

1
.vscode/launch.json vendored
View File

@ -11,6 +11,7 @@
"program": "${file}",
"console": "integratedTerminal",
"justMyCode": false,
"env": {"SECRET_KEY":"dkdkdk"}
// "args": [
// "--num_inference_steps",
// "6",

View File

@ -1,148 +1,50 @@
from flask import (
Flask,
render_template,
send_from_directory,
request,
jsonify,
)
from flask import Flask
from libs.generic import load_config
import os
import time
import threading
from apscheduler.schedulers.background import BackgroundScheduler
from libs.generic import load_config, load_recent_prompts, get_details_from_png, get_current_version
from libs.comfyui import cancel_current_job, create_image
from libs.ollama import create_prompt_on_openwebui
#workflow test commit
from routes import (
auth_routes,
gallery_routes,
image_routes,
index_routes,
job_routes,
create_routes,
settings_routes
)
user_config = load_config()
app = Flask(__name__)
app.secret_key = os.environ.get("SECRET_KEY")
image_folder = "./output"
# Inject config into routes that need it
create_routes.init_app(user_config)
auth_routes.init_app(user_config)
@app.route("/", methods=["GET"])
def index() -> str:
"""
Renders the main HTML template with image and prompt.
"""
image_filename = "./image.png"
image_path = os.path.join(image_folder, image_filename)
# Register blueprints
app.register_blueprint(index_routes.bp)
app.register_blueprint(auth_routes.bp)
app.register_blueprint(gallery_routes.bp)
app.register_blueprint(image_routes.bp)
app.register_blueprint(job_routes.bp)
app.register_blueprint(create_routes.bp)
app.register_blueprint(settings_routes.bp)
prompt = get_details_from_png(image_path)["p"]
# Optional: scheduler setup
from apscheduler.schedulers.background import BackgroundScheduler
import time
from libs.comfyui import create_image
version = get_current_version()
return render_template(
"index.html",
image=image_filename,
prompt=prompt,
reload_interval=user_config["frame"]["reload_interval"],
version=version
)
@app.route("/images", methods=["GET"])
def gallery() -> str:
images = []
for f in os.listdir(image_folder):
if f.lower().endswith(('png', 'jpg', 'jpeg', 'gif')):
images.append({'filename': f})
images = sorted(images, key=lambda x: os.path.getmtime(os.path.join(image_folder, x['filename'])), reverse=True)
return render_template("gallery.html", images=images)
@app.route("/image-details/<filename>", methods=["GET"])
def image_details(filename):
path = os.path.join(image_folder, filename)
if not os.path.exists(path):
return {"error": "File not found"}, 404
details = get_details_from_png(path)
return {
"prompt": details["p"],
"model": details["m"]
}
@app.route('/images/thumbnails/<path:filename>')
def serve_thumbnail(filename):
return send_from_directory('output/thumbnails', filename)
@app.route("/images/<filename>", methods=["GET"])
def images(filename: str) -> None:
"""
Serves the requested image file.
Args:
filename (str): The name of the image file.
Returns:
None: Sends the image file.
"""
return send_from_directory(image_folder, filename)
@app.route("/cancel", methods=["GET"])
def cancel_job() -> None:
"""
Serves the requested image file.
Args:
filename (str): The name of the image file.
Returns:
None: Sends the image file.
"""
return cancel_current_job()
@app.route("/create", methods=["GET", "POST"])
def create() -> str:
"""Handles image creation requests.
Args:
None
Returns:
str: Redirect to the main page or a JSON response.
"""
prompt = request.form.get("prompt") if request.method == "POST" else None
if prompt is None:
prompt = create_prompt_on_openwebui(user_config["comfyui"]["prompt"])
def create_image_in_background():
create_image(prompt)
threading.Thread(target=create_image_in_background).start()
return render_template('image_queued.html', prompt=prompt)
def scheduled_task() -> None:
"""Executes the scheduled image generation task."""
def scheduled_task():
print(f"Executing scheduled task at {time.strftime('%Y-%m-%d %H:%M:%S')}")
create_image(None)
@app.route("/create_image", methods=["GET"])
def create_image_endpoint() -> str:
"""
Renders the create image template with image and prompt.
"""
return render_template(
"create_image.html"
)
if user_config["frame"]["auto_regen"] == "True":
if os.environ.get("WERKZEUG_RUN_MAIN") == "true":
scheduler = BackgroundScheduler()
regen_time = user_config["frame"]["regen_time"].split(":")
scheduler.add_job(
scheduled_task,
"cron",
hour=regen_time[0],
minute=regen_time[1],
id="scheduled_task",
max_instances=1, # prevent overlapping
replace_existing=True # don't double-schedule
)
h, m = user_config["frame"]["regen_time"].split(":")
scheduler.add_job(scheduled_task, "cron", hour=h, minute=m, id="scheduled_task", max_instances=1, replace_existing=True)
scheduler.start()
os.makedirs(image_folder, exist_ok=True)
app.run(host="0.0.0.0", port=user_config["frame"]["port"], debug=True)
os.makedirs("./output", exist_ok=True)
app.run(host="0.0.0.0", port=user_config["frame"]["port"], debug=True)

6
config.cfg Normal file
View File

@ -0,0 +1,6 @@
[settings]
my_items = chelsea fc, stamford bridge, charpi dog, soccer, IT, computing, cybernetic insects, alien, predator, the simpsons, south park, nintendo, sega, xbox, playstation, pixelart, bmw e90, test
[comfyui]
topics = chelsea fc, stamford bridge, charpi dog, soccer, IT, computing, cybernetic insects, alien, predator, the simpsons, south park, nintendo, sega, xbox, playstation, pixelart, bmw e90, gg

View File

@ -32,12 +32,9 @@ def get_available_models() -> list:
response = requests.get(url)
if response.status_code == 200:
data = response.json()
return (
data.get("CheckpointLoaderSimple", {})
.get("input", {})
.get("required", {})
.get("ckpt_name", [])[0]
)
general = data.get("CheckpointLoaderSimple", {}).get("input", {}).get("required", {}).get("ckpt_name", [])[0]
flux = data.get("UnetLoaderGGUF", {}).get("input", {}).get("required", {}).get("unet_name", [])[0]
return general + flux
else:
print(f"Failed to fetch models: {response.status_code}")
return []
@ -64,13 +61,14 @@ def generate_image(
file_name: str,
comfy_prompt: str,
workflow_path: str = "./workflow_sdxl.json",
prompt_node: str = "CLIP Text Encode (Prompt)",
prompt_node: str = "Positive",
seed_node: str = "KSampler",
seed_param: str = "seed",
save_node: str = "Save Image",
save_param: str = "filename_prefix",
model_node: Optional[str] = "Load Checkpoint",
model_param: Optional[str] = "ckpt_name",
model: Optional[str] = "None",
) -> None:
"""Generates an image using the Comfy API with configurable workflow settings."""
try:
@ -100,21 +98,7 @@ def generate_image(
user_config["comfyui"]["height"],
)
# Conditionally set model if node and param are provided
if model_node and model_param:
if "FLUX" in workflow_path:
valid_models = user_config["comfyui:flux"]["models"].split(",")
else:
available_model_list = user_config["comfyui"]["models"].split(",")
valid_models = list(
set(get_available_models()) & set(available_model_list)
)
if not valid_models:
raise Exception("No valid models available.")
model = random.choice(valid_models)
wf.set_node_param(model_node, model_param, model)
wf.set_node_param(model_node, model_param, model)
# Generate image
logging.debug(f"Generating image: {file_name}")
@ -134,24 +118,41 @@ def generate_image(
except Exception as e:
logging.error(f"Failed to generate image for UID: {file_name}. Error: {e}")
raise
def create_image(prompt: str | None = None) -> None:
"""Main function for generating images."""
def select_model(model: str) -> tuple[str, str]:
use_flux = json.loads(user_config["comfyui"].get("FLUX", "false").lower())
only_flux = json.loads(user_config["comfyui"].get("ONLY_FLUX", "false").lower())
if model == "Random":
selected_workflow = "FLUX" if (use_flux and (only_flux or random.choice([True, False]))) else "SDXL"
elif "flux" in model.lower():
selected_workflow = "FLUX"
else:
selected_workflow = "SDXL"
if model == "Random":
if selected_workflow == "FLUX":
valid_models = user_config["comfyui:flux"]["models"].split(",")
else: # SDXL
available_model_list = user_config["comfyui"]["models"].split(",")
valid_models = list(set(get_available_models()) & set(available_model_list))
model = random.choice(valid_models)
return selected_workflow, model
def create_image(prompt: str | None = None, model: str = "Random") -> None:
"""Generate an image with a chosen workflow (Random, FLUX*, or SDXL*)."""
if prompt is None:
prompt = create_prompt_on_openwebui(user_config["comfyui"]["prompt"])
if not prompt:
logging.error("No prompt generated.")
return
save_prompt(prompt)
use_flux = json.loads((user_config["comfyui"].get("FLUX", False)).lower())
only_flux = json.loads((user_config["comfyui"].get("ONLY_FLUX", False)).lower())
selected_workflow = "SDXL"
if use_flux:
selected_workflow = "FLUX" if only_flux else random.choice(["FLUX", "SDXL"])
selected_workflow, model = select_model(model)
if selected_workflow == "FLUX":
generate_image(
@ -163,12 +164,11 @@ def create_image(prompt: str | None = None) -> None:
seed_param="seed",
save_node="CivitAI Image Saver",
save_param="filename",
model_node="CivitAI Image Saver",
model_param="modelname",
model_node="UnetLoaderGGUFAdvancedDisTorchMultiGPU",
model_param="unet_name",
model=model
)
else:
generate_image("image", prompt)
else: # SDXL
generate_image("image", comfy_prompt=prompt, model=model)
logging.info(f"{selected_workflow} generation started with prompt: {prompt}")

View File

@ -66,6 +66,7 @@ def rename_image() -> str | None:
def get_details_from_png(path):
try:
date = datetime.fromtimestamp(os.path.getctime(path)).strftime("%d-%m-%Y")
with Image.open(path) as img:
try:
# Flux workflow
@ -77,7 +78,7 @@ def get_details_from_png(path):
data = json.loads(img.info["prompt"])
prompt = data['6']['inputs']['text']
model = data['4']['inputs']['ckpt_name']
return {"p":prompt,"m":model} or {"p":"","m":""}
return {"p":prompt,"m":model,"d":date} or {"p":"","m":"","c":""}
except Exception as e:
print(f"Error reading metadata from {path}: {e}")
return ""
@ -98,6 +99,18 @@ def get_current_version():
print("Error running bump-my-version:", e)
return None
def load_models_from_config():
flux_models = load_config()["comfyui:flux"]["models"].split(",")
sdxl_models = load_config()["comfyui"]["models"].split(",")
sorted_flux_models = sorted(flux_models, key=str.lower)
sorted_sdxl_models = sorted(sdxl_models, key=str.lower)
return sorted_sdxl_models, sorted_flux_models
def load_topics_from_config():
topics = load_config()["comfyui"]["topics"].split(",")
sorted_topics = sorted(topics, key=str.lower)
return sorted_topics
user_config = load_config()
output_folder = user_config["comfyui"]["output_dir"]

View File

@ -12,25 +12,33 @@ LOG_FILE = "./prompts_log.jsonl"
user_config = load_config()
output_folder = user_config["comfyui"]["output_dir"]
def create_prompt_on_openwebui(prompt: str) -> str:
def create_prompt_on_openwebui(prompt: str, topic: str = "random") -> str:
"""Sends prompt to OpenWebui and returns the generated response."""
topic_instruction = ""
selected_topic = ""
# Unique list of recent prompts
recent_prompts = list(set(load_recent_prompts()))
# Decide on whether to include a topic (e.g., 30% chance to include)
topics = [t.strip() for t in user_config["comfyui"]["topics"].split(",") if t.strip()]
topic_instruction = ""
if random.random() < 0.5 and topics:
if topic == "random":
topics = [t.strip() for t in user_config["comfyui"]["topics"].split(",") if t.strip()]
selected_topic = random.choice(topics)
elif topic != "":
selected_topic = topic
else:
# Decide on whether to include a topic (e.g., 30% chance to include)
topics = [t.strip() for t in user_config["comfyui"]["topics"].split(",") if t.strip()]
if random.random() < 0.3 and topics:
selected_topic = random.choice(topics)
if selected_topic != "":
topic_instruction = f" Incorporate the theme of '{selected_topic}' into the new prompt."
user_content = (
"Here are the prompts from the last 7 days:\n\n"
+ "\n".join(f"{i+1}. {p}" for i, p in enumerate(recent_prompts))
+ "\n\nDo not repeat ideas, themes, or settings from the above. "
"Now generate a new, completely original Stable Diffusion prompt that hasn't been done yet."
"Can you generate me a really random image idea, Do not exceed 10 words. Use clear language, not poetic metaphors.”"
+ topic_instruction
+ "Avoid prompts similar to the following:"
+ "\n".join(f"{i+1}. {p}" for i, p in enumerate(recent_prompts))
)
model = random.choice(user_config["openwebui"]["models"].split(","))
response = litellm.completion(
api_base=user_config["openwebui"]["base_url"],
@ -67,4 +75,4 @@ def create_prompt_on_openwebui(prompt: str) -> str:
# )
# prompt = response["choices"][0]["message"]["content"].strip('"')
logging.debug(prompt)
return prompt
return prompt.split(": ")[-1]

11
routes/__init__.py Normal file
View File

@ -0,0 +1,11 @@
from . import auth_routes, create_routes, gallery_routes, image_routes, index_routes, job_routes, settings_routes
__all__ = [
"auth_routes",
"create_routes",
"gallery_routes",
"image_routes",
"index_routes",
"job_routes",
"settings_routes"
]

29
routes/auth_routes.py Normal file
View File

@ -0,0 +1,29 @@
from flask import Blueprint, render_template, request, redirect, url_for, session
from libs.generic import load_models_from_config, load_topics_from_config
from urllib.parse import urlparse, urljoin
bp = Blueprint("auth_routes", __name__)
user_config = None
def init_app(config):
global user_config
user_config = config
def is_safe_url(target):
ref_url = urlparse(request.host_url)
test_url = urlparse(urljoin(request.host_url, target))
return test_url.scheme in ("http", "https") and ref_url.netloc == test_url.netloc
@bp.route("/login", methods=["GET", "POST"])
def login():
next_url = request.args.get("next") if request.method == "GET" else request.form.get("next")
if request.method == "POST":
if request.form["password"] == user_config["frame"]["password_for_auth"]:
session["authenticated"] = True
if next_url and is_safe_url(next_url):
return redirect(next_url)
return redirect(url_for("create_image")) # fallback
return redirect(url_for("auth_routes.login", next=next_url)) # retry with `next`
return render_template("login.html", next=next_url)

41
routes/create_routes.py Normal file
View File

@ -0,0 +1,41 @@
from flask import Blueprint, request, render_template, redirect, url_for, session
import threading
from libs.comfyui import create_image, select_model, get_available_models
from libs.ollama import create_prompt_on_openwebui
from libs.generic import load_models_from_config, load_topics_from_config
import os
bp = Blueprint("create_routes", __name__)
user_config = None # will be set in init_app
@bp.route("/create", methods=["GET", "POST"])
def create():
if request.method == "POST":
prompt = request.form.get("prompt")
selected_workflow, model = select_model(request.form.get("model") or "Random")
topic = request.form.get("topic")
if not prompt:
prompt = create_prompt_on_openwebui(user_config["comfyui"]["prompt"], topic)
threading.Thread(target=lambda: create_image(prompt, model)).start()
return redirect(url_for("create_routes.image_queued", prompt=prompt, model=model.split(".")[0]))
return render_template("create_image.html", models=load_models_from_config()[0]+load_models_from_config()[1], topics=load_topics_from_config())
@bp.route("/image_queued")
def image_queued():
prompt = request.args.get("prompt", "No prompt provided.")
model = request.args.get("model", "No model selected.").split(".")[0]
return render_template("image_queued.html", prompt=prompt, model=model)
@bp.route("/create_image", methods=["GET"])
def create_image_page():
if user_config["frame"]["create_requires_auth"] == "True" and not session.get("authenticated"):
return redirect(url_for("auth_routes.login", next=request.path))
return render_template("create_image.html", models=load_models_from_config()[0]+load_models_from_config()[1], topics=load_topics_from_config())
def init_app(config):
global user_config
user_config = config

15
routes/gallery_routes.py Normal file
View File

@ -0,0 +1,15 @@
from flask import Blueprint, render_template
import os
bp = Blueprint("gallery_routes", __name__)
image_folder = "./output"
@bp.route("/images", methods=["GET"])
def gallery():
images = [
{"filename": f}
for f in os.listdir(image_folder)
if f.lower().endswith(("png", "jpg", "jpeg", "gif"))
]
images = sorted(images, key=lambda x: os.path.getmtime(os.path.join(image_folder, x["filename"])), reverse=True)
return render_template("gallery.html", images=images)

22
routes/image_routes.py Normal file
View File

@ -0,0 +1,22 @@
from flask import Blueprint, send_from_directory, jsonify
import os
from libs.generic import get_details_from_png
bp = Blueprint("image_routes", __name__)
image_folder = "./output"
@bp.route("/images/<filename>", methods=["GET"])
def serve_image(filename):
return send_from_directory(image_folder, filename)
@bp.route("/images/thumbnails/<path:filename>")
def serve_thumbnail(filename):
return send_from_directory("output/thumbnails", filename)
@bp.route("/image-details/<filename>", methods=["GET"])
def image_details(filename):
path = os.path.join(image_folder, filename)
if not os.path.exists(path):
return jsonify({"error": "File not found"}), 404
details = get_details_from_png(path)
return jsonify({"prompt": details["p"], "model": details["m"], "date": details["d"]})

22
routes/index_routes.py Normal file
View File

@ -0,0 +1,22 @@
from flask import Blueprint, render_template
import os
from libs.generic import get_details_from_png, get_current_version, load_config
bp = Blueprint("index_routes", __name__)
image_folder = "./output"
user_config = load_config()
@bp.route("/", methods=["GET"])
def index():
image_filename = "./image.png"
image_path = os.path.join(image_folder, image_filename)
prompt = get_details_from_png(image_path)["p"]
version = get_current_version()
return render_template(
"index.html",
image=image_filename,
prompt=prompt,
reload_interval=user_config["frame"]["reload_interval"],
version=version,
)

8
routes/job_routes.py Normal file
View File

@ -0,0 +1,8 @@
from flask import Blueprint
from libs.comfyui import cancel_current_job
bp = Blueprint("job_routes", __name__)
@bp.route("/cancel", methods=["GET"])
def cancel_job():
return cancel_current_job()

98
routes/settings_routes.py Normal file
View File

@ -0,0 +1,98 @@
from flask import Blueprint, render_template, request, redirect, url_for, session
import configparser
from libs.generic import load_topics_from_config, load_models_from_config
bp = Blueprint('settings_route', __name__)
CONFIG_PATH = "./user_config.cfg"
@bp.route('/settings', methods=['GET', 'POST'])
def config_editor():
if not session.get("authenticated"):
return redirect(url_for("auth_routes.login", next=request.path))
config = configparser.ConfigParser()
config.read(CONFIG_PATH)
topics = config.get('comfyui', 'topics', fallback='').split(',')
general_models = config.get('comfyui', 'models', fallback='').split(',')
flux_models = config.get('comfyui:flux', 'models', fallback='').split(',')
topics = [t.strip() for t in topics if t.strip()]
general_models = [m.strip() for m in general_models if m.strip()]
flux_models = [m.strip() for m in flux_models if m.strip()]
if request.method == 'POST':
if 'new_topic' in request.form:
new_topic = request.form.get('new_topic', '').strip()
if new_topic and new_topic not in topics:
topics.append(new_topic)
if 'delete_topic' in request.form:
to_delete = request.form.getlist('delete_topic')
topics = [topic for topic in topics if topic not in to_delete]
if 'new_model' in request.form:
new_model = request.form.get('new_model', '').strip()
if new_model:
if 'flux' in new_model and new_model not in flux_models:
flux_models.append(new_model)
elif 'flux' not in new_model and new_model not in general_models:
general_models.append(new_model)
if 'delete_model' in request.form:
to_delete = request.form.getlist('delete_model')
general_models = [m for m in general_models if m not in to_delete]
flux_models = [m for m in flux_models if m not in to_delete]
# Save models/topics into the shared config object
if not config.has_section('comfyui'):
config.add_section('comfyui')
if not config.has_section('comfyui:flux'):
config.add_section('comfyui:flux')
config.set('comfyui', 'models', ','.join(general_models))
config.set('comfyui:flux', 'models', ','.join(flux_models))
config.set('comfyui', 'topics', ','.join(topics))
# Handle dynamic CFG field updates (excluding DEFAULT and protected keys)
for section in config.sections():
for key in config[section]:
if key == 'models' and section in ('comfyui', 'comfyui:flux'):
continue
if key == 'topics' and section == 'comfyui':
continue
form_key = f"{section}:{key}"
if form_key in request.form:
new_value = request.form[form_key]
# Prevent overwriting masked secrets unless actually changed
if key in ('password_for_auth', 'api_key') and new_value == "********":
continue # Skip overwriting
config[section][key] = new_value
# Save everything at once
with open(CONFIG_PATH, 'w') as configfile:
config.write(configfile)
return redirect(url_for('settings_route.config_editor'))
# Prepare filtered config for display
filtered_config = {}
for section in config.sections():
items = {
k: v for k, v in config[section].items()
if not (
(k == 'models' and section in ('comfyui', 'comfyui:flux')) or
(k == 'topics' and section == 'comfyui')
)
}
if items: # only include non-empty sections
filtered_config[section] = items
return render_template(
'settings.html',
topics=sorted(topics,key=str.lower),
models=sorted(general_models + flux_models,key=str.lower),
config_sections=filtered_config.keys(),
config_values=filtered_config
)

View File

@ -1,15 +1,19 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>Create An Image</title>
<style>
/* ---------- reset ---------- */
* {
margin: 0;
padding: 0;
box-sizing: border-box;
}
/* ---------- layout ---------- */
body {
display: flex;
flex-direction: column;
@ -21,6 +25,7 @@
font-family: Arial, sans-serif;
padding: 20px;
}
textarea {
width: 80vw;
height: 200px;
@ -34,11 +39,15 @@
color: #eee;
border: 1px solid #333;
}
.button-group {
display: flex;
gap: 20px;
align-items: center;
}
button {
button,
select {
background: #333;
color: white;
border: none;
@ -48,7 +57,9 @@
cursor: pointer;
transition: background 0.3s;
}
button:hover {
button:hover,
select:hover {
background: #555;
}
@ -60,9 +71,10 @@
align-items: center;
justify-content: center;
background: rgba(0, 0, 0, 0.6);
visibility: hidden; /* toggled in JS */
visibility: hidden;
z-index: 1000;
}
.spinner {
width: 50px;
height: 50px;
@ -71,16 +83,78 @@
border-radius: 50%;
animation: spin 0.8s linear infinite;
}
@keyframes spin { to { transform: rotate(360deg); } }
@keyframes spin {
to {
transform: rotate(360deg);
}
}
@media (max-width: 600px) {
body {
min-height: 100dvh;
height: auto;
justify-content: flex-start;
padding-top: 40px;
}
.button-group {
flex-direction: column;
align-items: stretch;
width: 100%;
}
button,
select {
width: 100%;
}
textarea {
height: 150px;
}
}
</style>
</head>
<body>
<h1 style="margin-bottom: 20px;">Create An Image</h1>
<textarea id="prompt-box" placeholder="Enter your custom prompt here..."></textarea>
<div class="button-group">
<button onclick="showSpinner(); location.href='/'">Back</button>
<button onclick="sendPrompt()">Send Prompt</button>
<button onclick="showSpinner(); location.href='/create'">Random Prompt</button>
<button onclick="randomPrompt()">Random Prompt</button>
<select id="model-select">
<option value="" selected>Random</option>
<!-- Group: FLUX -->
<optgroup label="FLUX">
{% for m in models if 'flux' in m|lower %}
<option value="{{ m }}">{{ m.rsplit('.', 1)[0] }}</option>
{% endfor %}
</optgroup>
<!-- Group: SDXL -->
<optgroup label="SDXL">
{% for m in models if 'flux' not in m|lower %}
<option value="{{ m }}">{{ m.rsplit('.', 1)[0] }}</option>
{% endfor %}
</optgroup>
</select>
<select id="topic-select">
<option value="">No Topic</option>
<option value="random">Random</option>
<optgroup label="Topics">
{% for t in topics %}
<option value="{{ t }}">{{ t }}</option>
{% endfor %}
</optgroup>
</select>
</div>
<!-- waiting overlay -->
@ -90,30 +164,56 @@
<script>
const overlay = document.getElementById('spinner-overlay');
function showSpinner() {
overlay.style.visibility = 'visible';
}
function showSpinner() { overlay.style.visibility = 'visible'; }
function sendPrompt() {
showSpinner();
const prompt = document.getElementById('prompt-box').value;
const model = document.getElementById('model-select').value;
const formData = new URLSearchParams();
formData.append('prompt', prompt);
formData.append('model', model);
fetch('/create', {
method: 'POST',
headers: { 'Content-Type': 'application/x-www-form-urlencoded' },
body: formData.toString()
})
.then(response => {
// If server redirects, follow it; otherwise go to /create
window.location.href = response.redirected ? response.url : '/create';
.then(response => {
window.location.href = response.redirected ? response.url : '/create';
})
.catch(error => {
overlay.style.visibility = 'hidden';
alert("Error sending prompt: " + error);
});
}
// wrapper for Random Prompt button so it also sends the model
function randomPrompt() {
showSpinner();
const model = document.getElementById('model-select').value;
const topic = document.getElementById('topic-select').value; // this line was missing
const formData = new URLSearchParams();
formData.append('model', model);
formData.append('topic', topic); // include topic in request
fetch('/create', {
method: 'POST',
headers: { 'Content-Type': 'application/x-www-form-urlencoded' },
body: formData.toString()
})
.catch(error => {
overlay.style.visibility = 'hidden'; // hide spinner on failure
alert("Error sending prompt: " + error);
});
.then(response => {
window.location.href = response.redirected ? response.url : '/create';
})
.catch(error => {
overlay.style.visibility = 'hidden';
alert("Error requesting random prompt: " + error);
});
}
</script>
</body>
</html>
</html>

View File

@ -55,6 +55,7 @@
align-items: center;
flex-direction: column;
z-index: 999;
padding: 20px 0;
}
.lightbox img {
@ -100,6 +101,10 @@
max-width: 80%;
text-align: left;
margin-top: 20px;
max-height: 25vh;
/* NEW: restrict height */
overflow-y: auto;
/* NEW: allow vertical scroll */
}
/* Back button fixed top right */
@ -158,6 +163,10 @@
font-size: 14px;
max-width: 90%;
padding: 8px 16px;
max-height: 20vh;
/* smaller height for mobile */
overflow-y: auto;
/* keep scroll on mobile too */
}
.button-link {
@ -255,7 +264,8 @@
if (detailsCache[filename]) {
document.getElementById("lightbox-prompt").textContent =
`Model: ${detailsCache[filename].model}\n\n${detailsCache[filename].prompt}`;
`Model:${detailsCache[filename].model} - Created:${detailsCache[filename].date}\n\n${detailsCache[filename].prompt}`;
} else {
document.getElementById("lightbox-prompt").textContent = "Loading…";
@ -267,7 +277,7 @@
.then(data => {
detailsCache[filename] = data; // Cache the data
document.getElementById("lightbox-prompt").textContent =
`Model: ${data.model}\n\n${data.prompt}`;
`Model:${data.model} - Created:${data.date}\n\n${data.prompt}`;
})
.catch(() => {
document.getElementById("lightbox-prompt").textContent = "Couldnt load details.";
@ -277,8 +287,17 @@
function nextImage() {
const images = getGalleryImages();
currentIndex = (currentIndex + 1) % images.length;
showImageAndLoadDetails(currentIndex);
if (currentIndex + 1 >= images.length && loadedCount < allImages.length) {
loadNextBatch();
// Wait briefly to ensure DOM updates
setTimeout(() => {
currentIndex++;
showImageAndLoadDetails(currentIndex);
}, 100);
} else {
currentIndex = (currentIndex + 1) % images.length;
showImageAndLoadDetails(currentIndex);
}
}
function prevImage() {
@ -287,6 +306,7 @@
showImageAndLoadDetails(currentIndex);
}
function closeLightbox() {
document.getElementById("lightbox").style.display = "none";
}

View File

@ -52,10 +52,11 @@
</style>
</head>
<body>
<div class="message">Image will be made using prompt:</div>
<div class="message">Image will be made with <i>{{ model }}</i> using prompt:</div>
<div class="prompt-text">
{{ prompt }}
</div>
<button onclick="location.href='/'">Home</button>
</body>
</html>

View File

@ -21,9 +21,12 @@
background: black;
color: white;
font-family: Arial, sans-serif;
position: relative; /* So fixed elements inside work well */
position: relative;
padding-top: 20px;
padding-bottom: 20px;
}
.image-container {
max-width: 90vw;
max-height: 80vh;
@ -50,6 +53,10 @@
border-radius: 10px;
max-width: 80vw;
text-align: left;
max-height: 30vh;
/* NEW: limit height */
overflow-y: auto;
/* NEW: allow scrolling */
}
.button-group {
@ -85,9 +92,47 @@
font-size: 12px;
font-family: monospace;
user-select: none;
pointer-events: none;
opacity: 0.6;
}
.version a {
color: inherit;
text-decoration: none;
cursor: pointer;
}
.version a:hover {
text-decoration: underline;
}
@media (max-width: 768px) {
.image-container {
max-width: 100vw;
max-height: 50vh;
}
img {
max-width: 100%;
max-height: 100%;
}
.prompt {
max-height: 20vh;
font-size: 14px;
padding: 10px 15px;
}
.button-group {
flex-direction: column;
gap: 10px;
}
.button-link {
font-size: 14px;
padding: 8px 16px;
}
}
</style>
<script>
setInterval(() => {
@ -99,7 +144,7 @@
<body>
{% if image %}
<div class="image-container">
<img src="{{ url_for('images', filename=image) }}" alt="Latest Image" />
<img src="{{ url_for('image_routes.serve_image', filename=image) }}" alt="Latest Image" />
</div>
{% if prompt %}
<div class="prompt">{{ prompt }}</div>
@ -113,7 +158,9 @@
{% endif %}
<!-- Version number at bottom right -->
<div class="version">v{{ version }}</div>
<div class="version">
<a href="{{ url_for('settings_route.config_editor') }}">v{{ version }}</a>
</div>
</body>
</html>
</html>

73
templates/login.html Normal file
View File

@ -0,0 +1,73 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Login</title>
<style>
* {
margin: 0;
padding: 0;
box-sizing: border-box;
}
body {
display: flex;
flex-direction: column;
align-items: center;
justify-content: center;
height: 100vh;
background: black;
color: white;
font-family: Arial, sans-serif;
padding: 20px;
text-align: center;
}
.message {
font-size: 22px;
margin-bottom: 20px;
}
.prompt-text {
font-size: 20px;
background: #111;
padding: 20px;
border-radius: 10px;
border: 1px solid #333;
max-width: 80vw;
margin-bottom: 30px;
}
input[type="password"] {
padding: 10px;
border-radius: 8px;
border: 1px solid #555;
background: #222;
color: white;
font-size: 16px;
margin-bottom: 20px;
width: 250px;
}
button {
background: #333;
color: white;
border: none;
padding: 10px 20px;
border-radius: 8px;
font-size: 16px;
cursor: pointer;
transition: background 0.3s;
}
button:hover {
background: #555;
}
</style>
</head>
<body>
<div class="message">Please enter the password to continue:</div>
<form method="post">
<div class="prompt-text">
<input type="password" name="password" placeholder="Password" required>
<input type="hidden" name="next" value="{{ next }}">
</div>
<button type="submit">Login</button>
</form>
</body>
</html>

192
templates/settings.html Normal file
View File

@ -0,0 +1,192 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>Config Editor</title>
<style>
* {
margin: 0;
padding: 0;
box-sizing: border-box;
}
body {
background: black;
color: white;
font-family: Arial, sans-serif;
min-height: 100vh;
padding: 40px 20px;
display: flex;
justify-content: space-between;
flex-wrap: wrap;
}
.box {
background: #1a1a1a;
padding: 20px;
border-radius: 12px;
width: 45%;
min-width: 300px;
margin-bottom: 20px;
}
h2 {
margin-bottom: 20px;
font-size: 24px;
}
form {
margin-bottom: 20px;
}
select,
input[type="text"] {
width: 100%;
padding: 10px;
margin-bottom: 10px;
font-size: 16px;
border-radius: 6px;
border: none;
outline: none;
}
button {
width: 100%;
background: #333;
color: white;
text-decoration: none;
padding: 10px;
border-radius: 8px;
font-size: 16px;
cursor: pointer;
transition: background 0.3s;
border: none;
}
button:hover {
background: #555;
}
.button-group {
text-align: center;
margin-top: 20px;
}
.button-link {
background: #333;
height: 40px;
color: white;
text-decoration: none;
padding: 10px 20px;
border-radius: 8px;
font-size: 16px;
transition: background 0.3s;
display: inline-block;
text-align: center;
}
.button-link:hover {
background: #555;
}
@media (max-width: 768px) {
body {
flex-direction: column;
align-items: center;
}
.box {
width: 90%;
margin-bottom: 30px;
}
}
.back-button-wrapper {
width: 100%;
display: flex;
justify-content: center;
margin-top: 20px;
}
</style>
</head>
<body>
<div class="box">
<h2>Topics</h2>
<form method="post">
<select name="delete_topic">
{% for item in topics %}
<option value="{{ item }}">{{ item }}</option>
{% endfor %}
</select>
<button name="delete_topic" type="submit">Delete Selected Topic</button>
</form>
<form method="post">
<input type="text" name="new_topic" placeholder="New topic">
<button name="new_topic" type="submit">Add Topic</button>
</form>
</div>
<div class="box">
<h2>Models</h2>
<form method="post">
<select name="delete_model">
{% for item in models %}
<option value="{{ item }}">{{ item }}</option>
{% endfor %}
</select>
<button name="delete_model" type="submit">Delete Selected Model</button>
</form>
<form method="post">
<input type="text" name="new_model" placeholder="New model">
<button name="new_model" type="submit">Add Model</button>
</form>
</div>
<div class="box" style="width: 100%;">
<h2>Config Values</h2>
<form method="post" style="display:flex; flex-wrap:wrap; justify-content:space-between;">
{% for section in config_sections %}
<div class="box">
<h2>[{{ section }}]</h2>
{% for key, value in config_values[section].items() %}
<label>{{ key }}</label>
{% if value.lower() in ['true', 'false'] %}
<select name="{{ section }}:{{ key }}">
<option value="True" {% if value.lower()=='true' %}selected{% endif %}>True</option>
<option value="False" {% if value.lower()=='false' %}selected{% endif %}>False</option>
</select>
{% else %}
{% if key in ['password_for_auth', 'api_key'] %}
<input type="text" name="{{ section }}:{{ key }}" value="********" placeholder="********">
{% elif value.lower() in ['true', 'false'] %}
<select name="{{ section }}:{{ key }}">
<option value="True" {% if value.lower()=='true' %}selected{% endif %}>True</option>
<option value="False" {% if value.lower()=='false' %}selected{% endif %}>False</option>
</select>
{% else %}
<input type="text" name="{{ section }}:{{ key }}" value="{{ value }}">
{% endif %}
{% endif %}
{% endfor %}
</div>
{% endfor %}
<div class="box" style="width: 100%;">
<button type="submit">Save Config</button>
</div>
</form>
</div>
<div class="back-button-wrapper">
<a href="/" class="button-link">Back to Home</a>
</div>
</body>
</html>

View File

@ -3,6 +3,8 @@ reload_interval = 30000
auto_regen = True
regen_time = 03:00
port = 5000
create_requires_auth = False
password_for_auth = create
[comfyui]
comfyui_url = http://comfyui

View File

@ -6,7 +6,7 @@
1
],
"vae": [
"27",
"73",
0
]
},
@ -15,75 +15,6 @@
"title": "VAE Decode"
}
},
"22": {
"inputs": {
"clip_name1": "t5/t5xxl_fp8_e4m3fn.safetensors",
"clip_name2": "clip_l.safetensors",
"type": "flux",
"device": "default"
},
"class_type": "DualCLIPLoader",
"_meta": {
"title": "DualCLIPLoader"
}
},
"27": {
"inputs": {
"vae_name": "FLUX1/ae.safetensors"
},
"class_type": "VAELoader",
"_meta": {
"title": "Load VAE"
}
},
"32": {
"inputs": {
"upscale_model": [
"33",
0
],
"image": [
"8",
0
]
},
"class_type": "ImageUpscaleWithModel",
"_meta": {
"title": "Upscale Image (using Model)"
}
},
"33": {
"inputs": {
"model_name": "4x-UltraSharp.pth"
},
"class_type": "UpscaleModelLoader",
"_meta": {
"title": "Load Upscale Model"
}
},
"34": {
"inputs": {
"upscale_method": "lanczos",
"scale_by": 0.5,
"image": [
"32",
0
]
},
"class_type": "ImageScaleBy",
"_meta": {
"title": "Half size"
}
},
"35": {
"inputs": {
"unet_name": "flux1-dev-Q4_0.gguf"
},
"class_type": "UnetLoaderGGUF",
"_meta": {
"title": "Unet Loader (GGUF)"
}
},
"40": {
"inputs": {
"int": 20
@ -126,10 +57,7 @@
"50",
1
],
"scheduler": [
"49",
1
],
"scheduler_name": "normal",
"positive": [
"44",
0
@ -172,7 +100,7 @@
]
},
"images": [
"34",
"8",
0
]
},
@ -183,7 +111,7 @@
},
"44": {
"inputs": {
"text": "",
"text": "Yautja Predator wielding flamethrower in smoky, cyberpunk alleyway darkness",
"speak_and_recognation": {
"__value__": [
false,
@ -198,7 +126,7 @@
},
"45": {
"inputs": {
"text": "",
"text": "text, watermark, deformed Avoid flat colors, poor lighting, and artificial elements. No unrealistic elements, low resolution, or flat colors. Avoid generic objects, poor lighting, and inconsistent styles, blurry, low-quality, distorted faces, overexposed lighting, extra limbs, bad anatomy, low contrast",
"speak_and_recognation": {
"__value__": [
false,
@ -224,18 +152,19 @@
]
},
"clip": [
"68",
1
"72",
0
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Prompt)"
"title": "Prompt Encoder"
}
},
"48": {
"inputs": {
"seed": 903006749445372
"seed": 47371998700984,
"increment": 1
},
"class_type": "Seed Generator (Image Saver)",
"_meta": {
@ -248,7 +177,7 @@
},
"class_type": "Scheduler Selector (Comfy) (Image Saver)",
"_meta": {
"title": "Scheduler Selector"
"title": "Scheduler"
}
},
"50": {
@ -257,66 +186,27 @@
},
"class_type": "Sampler Selector (Image Saver)",
"_meta": {
"title": "Sampler Selector (Image Saver)"
}
},
"51": {
"inputs": {
"images": [
"8",
0
]
},
"class_type": "PreviewImage",
"_meta": {
"title": "Preview Image"
"title": "Sampler"
}
},
"52": {
"inputs": {
"float": 3.5
"float": 3.500000000000001
},
"class_type": "Float Literal (Image Saver)",
"_meta": {
"title": "CFG"
"title": "CFG Scale"
}
},
"53": {
"inputs": {
"float": 1
"float": 1.0000000000000002
},
"class_type": "Float Literal (Image Saver)",
"_meta": {
"title": "Denoise"
}
},
"60": {
"inputs": {
"clip_l": "",
"t5xxl": [
"44",
0
],
"guidance": [
"52",
0
],
"speak_and_recognation": {
"__value__": [
false,
true
]
},
"clip": [
"68",
1
]
},
"class_type": "CLIPTextEncodeFlux",
"_meta": {
"title": "CLIPTextEncodeFlux"
}
},
"62": {
"inputs": {
"noise": [
@ -342,7 +232,7 @@
},
"class_type": "SamplerCustomAdvanced",
"_meta": {
"title": "SamplerCustomAdvanced"
"title": "Custom Sampler"
}
},
"63": {
@ -354,7 +244,7 @@
},
"class_type": "KSamplerSelect",
"_meta": {
"title": "KSamplerSelect"
"title": "KSampler Select"
}
},
"64": {
@ -372,13 +262,13 @@
0
],
"model": [
"68",
"35",
0
]
},
"class_type": "BasicScheduler",
"_meta": {
"title": "BasicScheduler"
"title": "Sigma Generator"
}
},
"65": {
@ -390,13 +280,13 @@
},
"class_type": "RandomNoise",
"_meta": {
"title": "RandomNoise"
"title": "Noise Generator"
}
},
"67": {
"inputs": {
"model": [
"68",
"35",
0
],
"conditioning": [
@ -406,31 +296,48 @@
},
"class_type": "BasicGuider",
"_meta": {
"title": "BasicGuider"
"title": "Prompt Guider"
}
},
"68": {
"72": {
"inputs": {
"lora_01": "None",
"strength_01": 1,
"lora_02": "None",
"strength_02": 1,
"lora_03": "None",
"strength_03": 1,
"lora_04": "None",
"strength_04": 1,
"model": [
"35",
0
],
"clip": [
"22",
0
]
"clip_name1": "t5-v1_1-xxl-encoder-Q4_K_M.gguf",
"clip_name2": "clip_l.safetensors",
"type": "flux",
"device": "cuda:0",
"virtual_vram_gb": 0,
"use_other_vram": false,
"expert_mode_allocations": ""
},
"class_type": "Lora Loader Stack (rgthree)",
"class_type": "DualCLIPLoaderGGUFDisTorchMultiGPU",
"_meta": {
"title": "Lora Loader Stack (rgthree)"
"title": "DualCLIPLoaderGGUFDisTorchMultiGPU"
}
},
"73": {
"inputs": {
"vae_name": "FLUX1/ae.safetensors",
"device": "cuda:0"
},
"class_type": "VAELoaderMultiGPU",
"_meta": {
"title": "VAELoaderMultiGPU"
}
},
"35": {
"inputs": {
"unet_name": "flux1-dev-Q4_0.gguf",
"dequant_dtype": "default",
"patch_dtype": "default",
"patch_on_device": false,
"device": "cuda:1",
"virtual_vram_gb": 0,
"use_other_vram": false,
"expert_mode_allocations": ""
},
"class_type": "UnetLoaderGGUFAdvancedDisTorchMultiGPU",
"_meta": {
"title": "UnetLoaderGGUFAdvancedDisTorchMultiGPU"
}
}
}

433
workflow_flux_original.json Normal file
View File

@ -0,0 +1,433 @@
{
"8": {
"inputs": {
"samples": [
"62",
1
],
"vae": [
"27",
0
]
},
"class_type": "VAEDecode",
"_meta": {
"title": "VAE Decode"
}
},
"22": {
"inputs": {
"clip_name1": "t5/t5xxl_fp8_e4m3fn.safetensors",
"clip_name2": "clip_l.safetensors",
"type": "flux",
"device": "default"
},
"class_type": "DualCLIPLoader",
"_meta": {
"title": "DualCLIPLoader"
}
},
"27": {
"inputs": {
"vae_name": "FLUX1/ae.safetensors"
},
"class_type": "VAELoader",
"_meta": {
"title": "Load VAE"
}
},
"32": {
"inputs": {
"upscale_model": [
"33",
0
],
"image": [
"8",
0
]
},
"class_type": "ImageUpscaleWithModel",
"_meta": {
"title": "Upscale Image (using Model)"
}
},
"33": {
"inputs": {
"model_name": "4x-UltraSharp.pth"
},
"class_type": "UpscaleModelLoader",
"_meta": {
"title": "Load Upscale Model"
}
},
"34": {
"inputs": {
"upscale_method": "lanczos",
"scale_by": 0.5,
"image": [
"32",
0
]
},
"class_type": "ImageScaleBy",
"_meta": {
"title": "Half size"
}
},
"35": {
"inputs": {
"unet_name": "flux1-dev-Q4_0.gguf"
},
"class_type": "UnetLoaderGGUF",
"_meta": {
"title": "Unet Loader (GGUF)"
}
},
"40": {
"inputs": {
"int": 20
},
"class_type": "Int Literal (Image Saver)",
"_meta": {
"title": "Generation Steps"
}
},
"41": {
"inputs": {
"width": 720,
"height": 1080,
"aspect_ratio": "custom",
"swap_dimensions": "Off",
"upscale_factor": 2,
"prescale_factor": 1,
"batch_size": 1
},
"class_type": "CR Aspect Ratio",
"_meta": {
"title": "CR Aspect Ratio"
}
},
"42": {
"inputs": {
"filename": "THISFILE",
"path": "",
"extension": "png",
"steps": [
"40",
0
],
"cfg": [
"52",
0
],
"modelname": "flux1-dev-Q4_0.gguf",
"sampler_name": [
"50",
1
],
"positive": [
"44",
0
],
"negative": [
"45",
0
],
"seed_value": [
"48",
0
],
"width": [
"41",
0
],
"height": [
"41",
1
],
"lossless_webp": true,
"quality_jpeg_or_webp": 100,
"optimize_png": false,
"counter": 0,
"denoise": [
"53",
0
],
"clip_skip": 0,
"time_format": "%Y-%m-%d-%H%M%S",
"save_workflow_as_json": true,
"embed_workflow": true,
"additional_hashes": "",
"download_civitai_data": true,
"easy_remix": true,
"speak_and_recognation": {
"__value__": [
false,
true
]
},
"images": [
"34",
0
]
},
"class_type": "Image Saver",
"_meta": {
"title": "CivitAI Image Saver"
}
},
"44": {
"inputs": {
"text": "",
"speak_and_recognation": {
"__value__": [
false,
true
]
}
},
"class_type": "ttN text",
"_meta": {
"title": "Positive Prompt T5"
}
},
"45": {
"inputs": {
"text": "text, watermark, deformed Avoid flat colors, poor lighting, and artificial elements. No unrealistic elements, low resolution, or flat colors. Avoid generic objects, poor lighting, and inconsistent styles, blurry, low-quality, distorted faces, overexposed lighting, extra limbs, bad anatomy, low contrast",
"speak_and_recognation": {
"__value__": [
false,
true
]
}
},
"class_type": "ttN text",
"_meta": {
"title": "Negative Prompt"
}
},
"47": {
"inputs": {
"text": [
"44",
0
],
"speak_and_recognation": {
"__value__": [
false,
true
]
},
"clip": [
"68",
1
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Prompt)"
}
},
"48": {
"inputs": {
"seed": 903006749445372,
"increment": 1
},
"class_type": "Seed Generator (Image Saver)",
"_meta": {
"title": "Seed"
}
},
"49": {
"inputs": {
"scheduler": "beta"
},
"class_type": "Scheduler Selector (Comfy) (Image Saver)",
"_meta": {
"title": "Scheduler Selector"
}
},
"50": {
"inputs": {
"sampler_name": "euler"
},
"class_type": "Sampler Selector (Image Saver)",
"_meta": {
"title": "Sampler Selector (Image Saver)"
}
},
"51": {
"inputs": {
"images": [
"8",
0
]
},
"class_type": "PreviewImage",
"_meta": {
"title": "Preview Image"
}
},
"52": {
"inputs": {
"float": 3.5
},
"class_type": "Float Literal (Image Saver)",
"_meta": {
"title": "CFG"
}
},
"53": {
"inputs": {
"float": 1
},
"class_type": "Float Literal (Image Saver)",
"_meta": {
"title": "Denoise"
}
},
"60": {
"inputs": {
"clip_l": "",
"t5xxl": [
"44",
0
],
"guidance": [
"52",
0
],
"speak_and_recognation": {
"__value__": [
false,
true
]
},
"clip": [
"68",
1
]
},
"class_type": "CLIPTextEncodeFlux",
"_meta": {
"title": "CLIPTextEncodeFlux"
}
},
"62": {
"inputs": {
"noise": [
"65",
0
],
"guider": [
"67",
0
],
"sampler": [
"63",
0
],
"sigmas": [
"64",
0
],
"latent_image": [
"41",
5
]
},
"class_type": "SamplerCustomAdvanced",
"_meta": {
"title": "SamplerCustomAdvanced"
}
},
"63": {
"inputs": {
"sampler_name": [
"50",
0
]
},
"class_type": "KSamplerSelect",
"_meta": {
"title": "KSamplerSelect"
}
},
"64": {
"inputs": {
"scheduler": [
"49",
0
],
"steps": [
"40",
0
],
"denoise": [
"53",
0
],
"model": [
"68",
0
]
},
"class_type": "BasicScheduler",
"_meta": {
"title": "BasicScheduler"
}
},
"65": {
"inputs": {
"noise_seed": [
"48",
0
]
},
"class_type": "RandomNoise",
"_meta": {
"title": "RandomNoise"
}
},
"67": {
"inputs": {
"model": [
"68",
0
],
"conditioning": [
"47",
0
]
},
"class_type": "BasicGuider",
"_meta": {
"title": "BasicGuider"
}
},
"68": {
"inputs": {
"lora_01": "None",
"strength_01": 1,
"lora_02": "None",
"strength_02": 1,
"lora_03": "None",
"strength_03": 1,
"lora_04": "None",
"strength_04": 1,
"model": [
"35",
0
],
"clip": [
"22",
0
]
},
"class_type": "Lora Loader Stack (rgthree)",
"_meta": {
"title": "Lora Loader Stack (rgthree)"
}
}
}

View File

@ -59,7 +59,7 @@
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Prompt)"
"title": "Positive"
}
},
"7": {
@ -72,7 +72,7 @@
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Prompt)"
"title": "Negative"
}
},
"8": {