Compare commits

...

44 Commits
0.1.2 ... main

Author SHA1 Message Date
c7d71bfd03 Bump version: 0.1.18 → 0.1.19 2025-06-06 17:27:09 +01:00
1a0542861c basic working auth for create_image page 2025-06-06 17:26:37 +01:00
0fc549c199 Bump version: 0.1.17 → 0.1.18 2025-06-06 15:36:00 +01:00
f7f049aacb split out selecting model so can be returned to the ui 2025-06-06 15:35:50 +01:00
669dad4044 Bump version: 0.1.16 → 0.1.17 2025-06-06 15:13:20 +01:00
c726d23707 remove any preceeding text on the ollama prompt 2025-06-06 15:13:03 +01:00
2c9429d640 Bump version: 0.1.15 → 0.1.16 2025-06-06 13:10:43 +01:00
ac388b0f4e Add grouping to model list 2025-06-06 13:10:32 +01:00
4272e1d40e Bump version: 0.1.14 → 0.1.15 2025-06-06 12:55:22 +01:00
4e1e240e30 Bump version: 0.1.13 → 0.1.14 2025-06-06 12:52:15 +01:00
3d2524c2ba Bump version: 0.1.12 → 0.1.13 2025-06-06 12:52:03 +01:00
52574de881 hook doesn't work 2025-06-06 12:51:41 +01:00
ef9bf72b84 Bump version: 0.1.12 → 0.1.13 2025-06-06 12:49:35 +01:00
0cd6c6c5c8 updated bump config 2025-06-06 12:49:10 +01:00
cd6b8a8d25 Bump version: 0.1.11 → 0.1.12 2025-06-06 12:47:51 +01:00
1aa540fa03 update bump-my-version config 2025-06-06 12:47:40 +01:00
33bfee0220 remove extension from displayed models 2025-06-06 12:39:11 +01:00
f041a6afea add padding to the lightbox 2025-06-06 12:32:49 +01:00
336c03a888 allow navigation in lightbox over all images 2025-06-06 12:31:30 +01:00
9ce6ff25ea add padding to the index page 2025-06-06 12:28:39 +01:00
5be690f6c1 reformat the text on the gallery 2025-06-06 12:28:32 +01:00
4da908d0da
Update docker-publish.yml
only build on tag
2025-06-06 11:23:21 +01:00
7adaaf4491 bumped version 2025-06-06 11:21:42 +01:00
a75c7c554a better view on mobiles 2025-06-06 11:21:32 +01:00
2ff03fe101 upversion 2025-06-06 11:16:10 +01:00
4b52e5c713 code updates for date, fix update flux guff, new prompt logic 2025-06-06 11:15:43 +01:00
2a9a226dd1 bump version 2025-06-04 10:03:59 +01:00
3f2c59c5bb add mobile better mobile support 2025-06-04 10:03:44 +01:00
55ccd71383 fix the double generation 2025-06-04 10:03:30 +01:00
4b62b5cd07 was sending prompt to positive and negative 2025-06-04 10:03:17 +01:00
b8322e1fd8 updated view on gallery page 2025-05-27 18:43:58 +01:00
d749da148e scrollable text on index 2025-05-27 18:41:46 +01:00
d344674e02 bump tag 0.1.7 2025-05-19 11:15:04 +01:00
b1646a4c6e select model on create page 2025-05-19 11:14:03 +01:00
0b74672844 select model on create page 2025-05-18 21:16:11 +01:00
aabd19dd5f rework workflow to pickup the latest tag 2025-05-18 15:02:54 +01:00
423ca357f6 Bump version to 0.1.6 2025-05-18 14:59:21 +01:00
a7a27696d2 Bump version to 0.1.5 2025-05-18 14:58:24 +01:00
790c149c61 Bump version to 0.1.4 2025-05-18 14:53:20 +01:00
205481c4d4 increase batch so gallery works full size on my monitor 2025-05-18 14:51:11 +01:00
80f535be45 create_image spinner 2025-05-18 14:46:48 +01:00
1abba32f18 floating home button on gallery page 2025-05-17 11:38:23 +01:00
9d60135ab5 show version on the index page 2025-05-17 11:34:13 +01:00
ec6ab8123d Bump version to 0.1.3 2025-05-17 11:18:21 +01:00
14 changed files with 461 additions and 136 deletions

View File

@ -1,5 +1,5 @@
[tool.bumpversion]
current_version = "0.1.2"
current_version = "0.1.19"
parse = "(?P<major>\\d+)\\.(?P<minor>\\d+)\\.(?P<patch>\\d+)"
serialize = ["{major}.{minor}.{patch}"]
search = "{current_version}"
@ -7,12 +7,12 @@ replace = "{new_version}"
regex = false
ignore_missing_version = false
ignore_missing_files = false
tag = false
tag = true
sign_tags = false
tag_name = "v{new_version}"
tag_name = "{new_version}"
tag_message = "Bump version: {current_version} → {new_version}"
allow_dirty = false
commit = false
commit = true
message = "Bump version: {current_version} → {new_version}"
moveable_tags = []
commit_args = ""

View File

@ -2,8 +2,7 @@ name: Build and Publish Docker Image
on:
push:
branches: [main]
tags: ['*'] # triggers on any tag push
tags: ["*"] # Only triggers on tag pushes
workflow_dispatch:
jobs:
@ -13,6 +12,8 @@ jobs:
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0 # ensures tags are fetched
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
@ -25,23 +26,15 @@ jobs:
IMAGE_NAME="ai-frame-image-server"
REGISTRY="${{ secrets.REGISTRY }}"
USERNAME="${{ secrets.USERNAME }}"
GIT_TAG="${GITHUB_REF#refs/tags/}"
IMAGE_TAGGED="$REGISTRY/$USERNAME/$IMAGE_NAME:$GIT_TAG"
IMAGE_LATEST="$REGISTRY/$USERNAME/$IMAGE_NAME:latest"
# Always build and tag as latest
echo "🔧 Building $IMAGE_LATEST"
docker build -t $IMAGE_LATEST .
echo "🔧 Building $IMAGE_TAGGED and $IMAGE_LATEST"
docker build -t $IMAGE_LATEST -t $IMAGE_TAGGED .
echo "📤 Pushing $IMAGE_TAGGED"
docker push $IMAGE_TAGGED
echo "📤 Pushing $IMAGE_LATEST"
docker push $IMAGE_LATEST
# If this is a tag push, tag the image accordingly
if [[ "${GITHUB_REF}" == refs/tags/* ]]; then
GIT_TAG="${GITHUB_REF#refs/tags/}"
IMAGE_TAGGED="$REGISTRY/$USERNAME/$IMAGE_NAME:$GIT_TAG"
echo "🏷️ Also tagging as $IMAGE_TAGGED"
docker tag $IMAGE_LATEST $IMAGE_TAGGED
echo "📤 Pushing $IMAGE_TAGGED"
docker push $IMAGE_TAGGED
fi

4
.gitignore vendored
View File

@ -7,4 +7,6 @@ dist/
user_config.cfg
output/
prompts_log.jsonl
publish.sh
publish.sh
test.py
.vscode/launch.json

View File

@ -4,22 +4,35 @@ from flask import (
send_from_directory,
request,
jsonify,
redirect,
url_for,
session,
render_template_string,
)
import os
import time
import threading
from apscheduler.schedulers.background import BackgroundScheduler
from libs.generic import load_config, load_recent_prompts, get_details_from_png
from libs.comfyui import cancel_current_job, create_image
from libs.generic import (
load_config,
load_recent_prompts,
get_details_from_png,
get_current_version,
load_models_from_config,
)
from libs.comfyui import cancel_current_job, create_image, select_model
from libs.ollama import create_prompt_on_openwebui
#workflow test commit
# workflow test commit
user_config = load_config()
app = Flask(__name__)
app.secret_key = os.environ.get('SECRET_KEY')
image_folder = "./output"
@app.route("/", methods=["GET"])
def index() -> str:
"""
@ -30,22 +43,41 @@ def index() -> str:
prompt = get_details_from_png(image_path)["p"]
version = get_current_version()
return render_template(
"index.html",
image=image_filename,
prompt=prompt,
reload_interval=user_config["frame"]["reload_interval"],
version=version,
)
@app.route('/login', methods=['GET', 'POST'])
def login():
if request.method == 'POST':
if request.form['password'] == user_config["frame"]["password_for_auth"]:
session['authenticated'] = True
return render_template("create_image.html", models=load_models_from_config())
else:
return redirect(url_for('login'))
return render_template('login.html')
@app.route("/images", methods=["GET"])
def gallery() -> str:
images = []
for f in os.listdir(image_folder):
if f.lower().endswith(('png', 'jpg', 'jpeg', 'gif')):
images.append({'filename': f})
images = sorted(images, key=lambda x: os.path.getmtime(os.path.join(image_folder, x['filename'])), reverse=True)
if f.lower().endswith(("png", "jpg", "jpeg", "gif")):
images.append({"filename": f})
images = sorted(
images,
key=lambda x: os.path.getmtime(os.path.join(image_folder, x["filename"])),
reverse=True,
)
return render_template("gallery.html", images=images)
@app.route("/image-details/<filename>", methods=["GET"])
def image_details(filename):
@ -53,15 +85,12 @@ def image_details(filename):
if not os.path.exists(path):
return {"error": "File not found"}, 404
details = get_details_from_png(path)
return {
"prompt": details["p"],
"model": details["m"]
}
return {"prompt": details["p"], "model": details["m"], "date": details["d"]}
@app.route('/images/thumbnails/<path:filename>')
@app.route("/images/thumbnails/<path:filename>")
def serve_thumbnail(filename):
return send_from_directory('output/thumbnails', filename)
return send_from_directory("output/thumbnails", filename)
@app.route("/images/<filename>", methods=["GET"])
@ -89,23 +118,30 @@ def cancel_job() -> None:
@app.route("/create", methods=["GET", "POST"])
def create() -> str:
"""Handles image creation requests.
Args:
None
Returns:
str: Redirect to the main page or a JSON response.
"""
prompt = request.form.get("prompt") if request.method == "POST" else None
def create():
if request.method == "POST":
prompt = request.form.get("prompt")
selected_workflow, model = select_model(request.form.get("model") or "Random")
if prompt is None:
prompt = create_prompt_on_openwebui(user_config["comfyui"]["prompt"])
def create_image_in_background():
create_image(prompt)
if not prompt:
prompt = create_prompt_on_openwebui(user_config["comfyui"]["prompt"])
threading.Thread(target=create_image_in_background).start()
return render_template('image_queued.html', prompt=prompt)
# Start generation in background
threading.Thread(target=lambda: create_image(prompt, model)).start()
return redirect(
url_for("image_queued", prompt=prompt, model=model.split(".")[0])
)
# For GET requests, just show the form to enter prompt
return render_template("create_image.html", models=load_models_from_config())
@app.route("/image_queued")
def image_queued():
prompt = request.args.get("prompt", "No prompt provided.")
model = request.args.get("model", "No model selected.").split(".")[0]
return render_template("image_queued.html", prompt=prompt, model=model)
def scheduled_task() -> None:
@ -113,16 +149,17 @@ def scheduled_task() -> None:
print(f"Executing scheduled task at {time.strftime('%Y-%m-%d %H:%M:%S')}")
create_image(None)
@app.route("/create_image", methods=["GET"])
def create_image_endpoint() -> str:
"""
Renders the create image template with image and prompt.
"""
if user_config["frame"]["create_requires_auth"] == "True" and not session.get('authenticated'):
return redirect(url_for("login"))
models = load_models_from_config()
return render_template(
"create_image.html"
)
return render_template("create_image.html", models=models)
if user_config["frame"]["auto_regen"] == "True":
@ -136,10 +173,9 @@ if user_config["frame"]["auto_regen"] == "True":
minute=regen_time[1],
id="scheduled_task",
max_instances=1, # prevent overlapping
replace_existing=True # don't double-schedule
replace_existing=True, # don't double-schedule
)
scheduler.start()
os.makedirs(image_folder, exist_ok=True)
app.run(host="0.0.0.0", port=user_config["frame"]["port"], debug=True)

View File

@ -64,13 +64,14 @@ def generate_image(
file_name: str,
comfy_prompt: str,
workflow_path: str = "./workflow_sdxl.json",
prompt_node: str = "CLIP Text Encode (Prompt)",
prompt_node: str = "Positive",
seed_node: str = "KSampler",
seed_param: str = "seed",
save_node: str = "Save Image",
save_param: str = "filename_prefix",
model_node: Optional[str] = "Load Checkpoint",
model_param: Optional[str] = "ckpt_name",
model: Optional[str] = "None",
) -> None:
"""Generates an image using the Comfy API with configurable workflow settings."""
try:
@ -100,21 +101,7 @@ def generate_image(
user_config["comfyui"]["height"],
)
# Conditionally set model if node and param are provided
if model_node and model_param:
if "FLUX" in workflow_path:
valid_models = user_config["comfyui:flux"]["models"].split(",")
else:
available_model_list = user_config["comfyui"]["models"].split(",")
valid_models = list(
set(get_available_models()) & set(available_model_list)
)
if not valid_models:
raise Exception("No valid models available.")
model = random.choice(valid_models)
wf.set_node_param(model_node, model_param, model)
wf.set_node_param(model_node, model_param, model)
# Generate image
logging.debug(f"Generating image: {file_name}")
@ -134,24 +121,41 @@ def generate_image(
except Exception as e:
logging.error(f"Failed to generate image for UID: {file_name}. Error: {e}")
raise
def create_image(prompt: str | None = None) -> None:
"""Main function for generating images."""
def select_model(model: str) -> tuple[str, str]:
use_flux = json.loads(user_config["comfyui"].get("FLUX", "false").lower())
only_flux = json.loads(user_config["comfyui"].get("ONLY_FLUX", "false").lower())
if model == "Random":
selected_workflow = "FLUX" if (use_flux and (only_flux or random.choice([True, False]))) else "SDXL"
elif "flux" in model.lower():
selected_workflow = "FLUX"
else:
selected_workflow = "SDXL"
if model == "Random":
if selected_workflow == "FLUX":
valid_models = user_config["comfyui:flux"]["models"].split(",")
else: # SDXL
available_model_list = user_config["comfyui"]["models"].split(",")
valid_models = list(set(get_available_models()) & set(available_model_list))
model = random.choice(valid_models)
return selected_workflow, model
def create_image(prompt: str | None = None, model: str = "Random") -> None:
"""Generate an image with a chosen workflow (Random, FLUX*, or SDXL*)."""
if prompt is None:
prompt = create_prompt_on_openwebui(user_config["comfyui"]["prompt"])
if not prompt:
logging.error("No prompt generated.")
return
save_prompt(prompt)
use_flux = json.loads((user_config["comfyui"].get("FLUX", False)).lower())
only_flux = json.loads((user_config["comfyui"].get("ONLY_FLUX", False)).lower())
selected_workflow = "SDXL"
if use_flux:
selected_workflow = "FLUX" if only_flux else random.choice(["FLUX", "SDXL"])
selected_workflow, model = select_model(model)
if selected_workflow == "FLUX":
generate_image(
@ -163,12 +167,11 @@ def create_image(prompt: str | None = None) -> None:
seed_param="seed",
save_node="CivitAI Image Saver",
save_param="filename",
model_node="CivitAI Image Saver",
model_param="modelname",
model_node="Unet Loader (GGUF)",
model_param="unet_name",
model=model
)
else:
generate_image("image", prompt)
else: # SDXL
generate_image("image", comfy_prompt=prompt, model=model)
logging.info(f"{selected_workflow} generation started with prompt: {prompt}")

View File

@ -1,3 +1,4 @@
import subprocess
import configparser
import logging
import sys
@ -65,6 +66,7 @@ def rename_image() -> str | None:
def get_details_from_png(path):
try:
date = datetime.fromtimestamp(os.path.getctime(path)).strftime("%d-%m-%Y")
with Image.open(path) as img:
try:
# Flux workflow
@ -76,11 +78,33 @@ def get_details_from_png(path):
data = json.loads(img.info["prompt"])
prompt = data['6']['inputs']['text']
model = data['4']['inputs']['ckpt_name']
return {"p":prompt,"m":model} or {"p":"","m":""}
return {"p":prompt,"m":model,"d":date} or {"p":"","m":"","c":""}
except Exception as e:
print(f"Error reading metadata from {path}: {e}")
return ""
def get_current_version():
try:
# Run the command and capture the output
result = subprocess.run(
['bump-my-version', 'show', 'current_version'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True, # to get string output instead of bytes
check=True # raises exception if command fails
)
version = result.stdout.strip()
return version
except subprocess.CalledProcessError as e:
print("Error running bump-my-version:", e)
return None
def load_models_from_config():
flux_models = user_config["comfyui:flux"]["models"].split(",")
sdxl_models = user_config["comfyui"]["models"].split(",")
all_models = flux_models + sdxl_models
return all_models
user_config = load_config()
output_folder = user_config["comfyui"]["output_dir"]

View File

@ -24,13 +24,13 @@ def create_prompt_on_openwebui(prompt: str) -> str:
topic_instruction = f" Incorporate the theme of '{selected_topic}' into the new prompt."
user_content = (
"Here are the prompts from the last 7 days:\n\n"
+ "\n".join(f"{i+1}. {p}" for i, p in enumerate(recent_prompts))
+ "\n\nDo not repeat ideas, themes, or settings from the above. "
"Now generate a new, completely original Stable Diffusion prompt that hasn't been done yet."
"Can you generate me a really random image idea, Do not exceed 10 words. Use clear language, not poetic metaphors.”"
+ topic_instruction
+ "Avoid prompts similar to the following:"
+ "\n".join(f"{i+1}. {p}" for i, p in enumerate(recent_prompts))
)
model = random.choice(user_config["openwebui"]["models"].split(","))
response = litellm.completion(
api_base=user_config["openwebui"]["base_url"],
@ -67,4 +67,4 @@ def create_prompt_on_openwebui(prompt: str) -> str:
# )
# prompt = response["choices"][0]["message"]["content"].strip('"')
logging.debug(prompt)
return prompt
return prompt.split(": ")[-1]

View File

@ -1,15 +1,19 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>Create An Image</title>
<style>
/* ---------- reset ---------- */
* {
margin: 0;
padding: 0;
box-sizing: border-box;
}
/* ---------- layout ---------- */
body {
display: flex;
flex-direction: column;
@ -21,6 +25,7 @@
font-family: Arial, sans-serif;
padding: 20px;
}
textarea {
width: 80vw;
height: 200px;
@ -34,11 +39,15 @@
color: #eee;
border: 1px solid #333;
}
.button-group {
display: flex;
gap: 20px;
align-items: center;
}
button {
button,
select {
background: #333;
color: white;
border: none;
@ -48,42 +57,150 @@
cursor: pointer;
transition: background 0.3s;
}
button:hover {
button:hover,
select:hover {
background: #555;
}
/* ---------- spinner ---------- */
#spinner-overlay {
position: fixed;
inset: 0;
display: flex;
align-items: center;
justify-content: center;
background: rgba(0, 0, 0, 0.6);
visibility: hidden;
z-index: 1000;
}
.spinner {
width: 50px;
height: 50px;
border: 6px solid #555;
border-top-color: white;
border-radius: 50%;
animation: spin 0.8s linear infinite;
}
@keyframes spin {
to {
transform: rotate(360deg);
}
}
@media (max-width: 600px) {
body {
min-height: 100dvh;
height: auto;
justify-content: flex-start;
padding-top: 40px;
}
.button-group {
flex-direction: column;
align-items: stretch;
width: 100%;
}
button,
select {
width: 100%;
}
textarea {
height: 150px;
}
}
</style>
</head>
<body>
<h1 style="margin-bottom: 20px;">Create An Image</h1>
<textarea id="prompt-box" placeholder="Enter your custom prompt here..."></textarea>
<div class="button-group">
<button onclick="location.href='/'">Back</button>
<button onclick="showSpinner(); location.href='/'">Back</button>
<button onclick="sendPrompt()">Send Prompt</button>
<button onclick="location.href='/create'">Random Prompt</button>
<button onclick="randomPrompt()">Random Prompt</button>
<select id="model-select">
<option value="" selected>Random</option>
<!-- Group: FLUX -->
<optgroup label="FLUX">
{% for m in models if 'flux' in m|lower %}
<option value="{{ m }}">{{ m.rsplit('.', 1)[0] }}</option>
{% endfor %}
</optgroup>
<!-- Group: SDXL -->
<optgroup label="SDXL">
{% for m in models if 'flux' not in m|lower %}
<option value="{{ m }}">{{ m.rsplit('.', 1)[0] }}</option>
{% endfor %}
</optgroup>
</select>
</div>
<!-- waiting overlay -->
<div id="spinner-overlay">
<div class="spinner"></div>
</div>
<script>
const overlay = document.getElementById('spinner-overlay');
function showSpinner() { overlay.style.visibility = 'visible'; }
function sendPrompt() {
showSpinner();
const prompt = document.getElementById('prompt-box').value;
const model = document.getElementById('model-select').value;
const formData = new URLSearchParams();
formData.append('prompt', prompt);
formData.append('model', model);
fetch('/create', {
method: 'POST',
headers: {
'Content-Type': 'application/x-www-form-urlencoded',
},
headers: { 'Content-Type': 'application/x-www-form-urlencoded' },
body: formData.toString()
}).then(response => {
if (response.redirected) {
window.location.href = response.url;
} else {
alert("Image creation request sent.");
}
}).catch(error => {
alert("Error sending prompt: " + error);
});
})
.then(response => {
window.location.href = response.redirected ? response.url : '/create';
})
.catch(error => {
overlay.style.visibility = 'hidden';
alert("Error sending prompt: " + error);
});
}
// wrapper for Random Prompt button so it also sends the model
function randomPrompt() {
showSpinner();
const model = document.getElementById('model-select').value;
const formData = new URLSearchParams();
formData.append('model', model);
fetch('/create', {
method: 'POST',
headers: { 'Content-Type': 'application/x-www-form-urlencoded' },
body: formData.toString()
})
.then(response => {
window.location.href = response.redirected ? response.url : '/create';
})
.catch(error => {
overlay.style.visibility = 'hidden';
alert("Error requesting random prompt: " + error);
});
}
</script>
</body>
</html>
</html>

View File

@ -55,6 +55,7 @@
align-items: center;
flex-direction: column;
z-index: 999;
padding: 20px 0;
}
.lightbox img {
@ -100,12 +101,19 @@
max-width: 80%;
text-align: left;
margin-top: 20px;
max-height: 25vh;
/* NEW: restrict height */
overflow-y: auto;
/* NEW: allow vertical scroll */
}
.button-group {
display: flex;
justify-content: center;
margin-top: 2rem;
/* Back button fixed top right */
.home-button {
position: fixed;
top: 20px;
right: 20px;
z-index: 500;
/* lower than lightbox (999) */
}
.button-link {
@ -155,6 +163,10 @@
font-size: 14px;
max-width: 90%;
padding: 8px 16px;
max-height: 20vh;
/* smaller height for mobile */
overflow-y: auto;
/* keep scroll on mobile too */
}
.button-link {
@ -166,15 +178,13 @@
</head>
<body>
<a href="/" class="button-link home-button">Home</a>
<h1>Image Archive</h1>
<!-- Empty gallery container; images will be loaded incrementally -->
<div class="gallery" id="gallery"></div>
<div class="button-group">
<a href="/" class="button-link">Back</a>
</div>
<!-- Lightbox -->
<div class="lightbox" id="lightbox">
<span class="close" onclick="closeLightbox()">&times;</span>
@ -195,10 +205,10 @@
<script>
const gallery = document.getElementById('gallery');
const batchSize = 6; // images to load per batch
const batchSize = 9; // images to load per batch
let loadedCount = 0;
let currentIndex = 0;
const detailsCache = {}; // Cache for image details
const detailsCache = {}; // Cache for image details
function createImageElement(image) {
const img = document.createElement('img');
@ -254,7 +264,8 @@
if (detailsCache[filename]) {
document.getElementById("lightbox-prompt").textContent =
`Model: ${detailsCache[filename].model}\n\n${detailsCache[filename].prompt}`;
`Model:${detailsCache[filename].model} - Created:${detailsCache[filename].date}\n\n${detailsCache[filename].prompt}`;
} else {
document.getElementById("lightbox-prompt").textContent = "Loading…";
@ -266,7 +277,7 @@
.then(data => {
detailsCache[filename] = data; // Cache the data
document.getElementById("lightbox-prompt").textContent =
`Model: ${data.model}\n\n${data.prompt}`;
`Model:${data.model} - Created:${data.date}\n\n${data.prompt}`;
})
.catch(() => {
document.getElementById("lightbox-prompt").textContent = "Couldnt load details.";
@ -276,8 +287,17 @@
function nextImage() {
const images = getGalleryImages();
currentIndex = (currentIndex + 1) % images.length;
showImageAndLoadDetails(currentIndex);
if (currentIndex + 1 >= images.length && loadedCount < allImages.length) {
loadNextBatch();
// Wait briefly to ensure DOM updates
setTimeout(() => {
currentIndex++;
showImageAndLoadDetails(currentIndex);
}, 100);
} else {
currentIndex = (currentIndex + 1) % images.length;
showImageAndLoadDetails(currentIndex);
}
}
function prevImage() {
@ -286,6 +306,7 @@
showImageAndLoadDetails(currentIndex);
}
function closeLightbox() {
document.getElementById("lightbox").style.display = "none";
}

View File

@ -52,10 +52,11 @@
</style>
</head>
<body>
<div class="message">Image will be made using prompt:</div>
<div class="message">Image will be made with <i>{{ model }}</i> using prompt:</div>
<div class="prompt-text">
{{ prompt }}
</div>
<button onclick="location.href='/'">Home</button>
</body>
</html>

View File

@ -2,8 +2,8 @@
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>AI Image of the Day</title>
<style>
* {
@ -21,8 +21,12 @@
background: black;
color: white;
font-family: Arial, sans-serif;
position: relative;
padding-top: 20px;
padding-bottom: 20px;
}
.image-container {
max-width: 90vw;
max-height: 80vh;
@ -49,6 +53,10 @@
border-radius: 10px;
max-width: 80vw;
text-align: left;
max-height: 30vh;
/* NEW: limit height */
overflow-y: auto;
/* NEW: allow scrolling */
}
.button-group {
@ -74,6 +82,47 @@
.button-link:hover {
background: #555;
}
/* New style for version number */
.version {
position: fixed;
bottom: 8px;
right: 12px;
color: #666;
font-size: 12px;
font-family: monospace;
user-select: none;
pointer-events: none;
opacity: 0.6;
}
@media (max-width: 768px) {
.image-container {
max-width: 100vw;
max-height: 50vh;
}
img {
max-width: 100%;
max-height: 100%;
}
.prompt {
max-height: 20vh;
font-size: 14px;
padding: 10px 15px;
}
.button-group {
flex-direction: column;
gap: 10px;
}
.button-link {
font-size: 14px;
padding: 8px 16px;
}
}
</style>
<script>
setInterval(() => {
@ -85,7 +134,7 @@
<body>
{% if image %}
<div class="image-container">
<img src="{{ url_for('images', filename=image) }}" alt="Latest Image">
<img src="{{ url_for('images', filename=image) }}" alt="Latest Image" />
</div>
{% if prompt %}
<div class="prompt">{{ prompt }}</div>
@ -97,4 +146,9 @@
{% else %}
<p>No images found</p>
{% endif %}
</body>
<!-- Version number at bottom right -->
<div class="version">v{{ version }}</div>
</body>
</html>

72
templates/login.html Normal file
View File

@ -0,0 +1,72 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Login</title>
<style>
* {
margin: 0;
padding: 0;
box-sizing: border-box;
}
body {
display: flex;
flex-direction: column;
align-items: center;
justify-content: center;
height: 100vh;
background: black;
color: white;
font-family: Arial, sans-serif;
padding: 20px;
text-align: center;
}
.message {
font-size: 22px;
margin-bottom: 20px;
}
.prompt-text {
font-size: 20px;
background: #111;
padding: 20px;
border-radius: 10px;
border: 1px solid #333;
max-width: 80vw;
margin-bottom: 30px;
}
input[type="password"] {
padding: 10px;
border-radius: 8px;
border: 1px solid #555;
background: #222;
color: white;
font-size: 16px;
margin-bottom: 20px;
width: 250px;
}
button {
background: #333;
color: white;
border: none;
padding: 10px 20px;
border-radius: 8px;
font-size: 16px;
cursor: pointer;
transition: background 0.3s;
}
button:hover {
background: #555;
}
</style>
</head>
<body>
<div class="message">Please enter the password to continue:</div>
<form method="post">
<div class="prompt-text">
<input type="password" name="password" placeholder="Password" required>
</div>
<button type="submit">Login</button>
</form>
</body>
</html>

View File

@ -3,6 +3,8 @@ reload_interval = 30000
auto_regen = True
regen_time = 03:00
port = 5000
create_requires_auth = False
password_for_auth = create
[comfyui]
comfyui_url = http://comfyui

View File

@ -59,7 +59,7 @@
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Prompt)"
"title": "Positive"
}
},
"7": {
@ -72,7 +72,7 @@
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Prompt)"
"title": "Negative"
}
},
"8": {