Compare commits

..

No commits in common. "main" and "0.1.0" have entirely different histories.
main ... 0.1.0

16 changed files with 214 additions and 515 deletions

View File

@ -1,21 +1,10 @@
[tool.bumpversion]
current_version = "0.1.20"
parse = "(?P<major>\\d+)\\.(?P<minor>\\d+)\\.(?P<patch>\\d+)"
serialize = ["{major}.{minor}.{patch}"]
search = "{current_version}"
replace = "{new_version}"
regex = false
ignore_missing_version = false
ignore_missing_files = false
tag = true
sign_tags = false
tag_name = "{new_version}"
tag_message = "Bump version: {current_version} → {new_version}"
allow_dirty = false
[bumpversion]
current_version = "0.1.0"
commit = true
message = "Bump version: {current_version} → {new_version}"
moveable_tags = []
commit_args = ""
setup_hooks = []
pre_commit_hooks = []
post_commit_hooks = []
tag = true
tag_name = "{new_version}"
[[file]]
filename = "__version__.py"
parse = '__version__\s*=\s*"(?P<version>.*)"'
serialize = '__version__ = "{new_version}"'

View File

@ -1,19 +1,32 @@
name: Build and Publish Docker Image
name: Build, Update Version and Publish Docker Image
on:
push:
tags: ["*"] # Only triggers on tag pushes
branches: [main]
tags: ['*']
workflow_dispatch:
jobs:
build-and-push:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v4
with:
fetch-depth: 0 # ensures tags are fetched
python-version: '3.x'
- name: Install bump2version
run: pip install bump2version
# Bump patch version, commit, and push back to repo (only on main branch)
- name: Bump version and push
if: github.ref == 'refs/heads/main'
run: |
bump2version patch --allow-dirty
git push origin main --follow-tags
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
@ -21,20 +34,22 @@ jobs:
- name: Log in to Docker Registry
run: echo "${{ secrets.PASSWORD }}" | docker login ${{ secrets.REGISTRY }} -u "${{ secrets.USERNAME }}" --password-stdin
- name: Get version from file
id: get_version
run: |
VERSION=$(python -c "import myproject.__version__ as v; print(v.__version__)")
echo "version=$VERSION" >> $GITHUB_OUTPUT
- name: Build and Push Docker Images
run: |
IMAGE_NAME="ai-frame-image-server"
REGISTRY="${{ secrets.REGISTRY }}"
USERNAME="${{ secrets.USERNAME }}"
GIT_TAG="${GITHUB_REF#refs/tags/}"
IMAGE_TAGGED="$REGISTRY/$USERNAME/$IMAGE_NAME:$GIT_TAG"
IMAGE_LATEST="$REGISTRY/$USERNAME/$IMAGE_NAME:latest"
VERSION="${{ steps.get_version.outputs.version }}"
IMAGE_TAG="$REGISTRY/$USERNAME/$IMAGE_NAME:$VERSION"
echo "🔧 Building $IMAGE_TAGGED and $IMAGE_LATEST"
docker build -t $IMAGE_LATEST -t $IMAGE_TAGGED .
echo "🔧 Building $IMAGE_TAG"
docker build -t $IMAGE_TAG .
echo "📤 Pushing $IMAGE_TAGGED"
docker push $IMAGE_TAGGED
echo "📤 Pushing $IMAGE_LATEST"
docker push $IMAGE_LATEST
echo "📤 Pushing $IMAGE_TAG"
docker push $IMAGE_TAG

2
.gitignore vendored
View File

@ -8,5 +8,3 @@ user_config.cfg
output/
prompts_log.jsonl
publish.sh
test.py
.vscode/launch.json

1
__version__.py Normal file
View File

@ -0,0 +1 @@
__version__ = "0.1.0"

View File

@ -4,36 +4,22 @@ from flask import (
send_from_directory,
request,
jsonify,
redirect,
url_for,
session,
render_template_string,
)
import os
import time
import threading
from apscheduler.schedulers.background import BackgroundScheduler
from libs.generic import (
load_config,
load_recent_prompts,
get_details_from_png,
get_current_version,
load_models_from_config,
load_topics_from_config
)
from libs.comfyui import cancel_current_job, create_image, select_model
from libs.generic import load_config, load_recent_prompts, get_details_from_png
from libs.comfyui import cancel_current_job, create_image
from libs.ollama import create_prompt_on_openwebui
# workflow test commit
#workflow test commit
user_config = load_config()
app = Flask(__name__)
app.secret_key = os.environ.get('SECRET_KEY')
image_folder = "./output"
@app.route("/", methods=["GET"])
def index() -> str:
"""
@ -44,39 +30,20 @@ def index() -> str:
prompt = get_details_from_png(image_path)["p"]
version = get_current_version()
return render_template(
"index.html",
image=image_filename,
prompt=prompt,
reload_interval=user_config["frame"]["reload_interval"],
version=version,
)
@app.route('/login', methods=['GET', 'POST'])
def login():
if request.method == 'POST':
if request.form['password'] == user_config["frame"]["password_for_auth"]:
session['authenticated'] = True
return render_template("create_image.html", models=load_models_from_config())
else:
return redirect(url_for('login'))
return render_template('login.html')
@app.route("/images", methods=["GET"])
def gallery() -> str:
images = []
for f in os.listdir(image_folder):
if f.lower().endswith(("png", "jpg", "jpeg", "gif")):
images.append({"filename": f})
images = sorted(
images,
key=lambda x: os.path.getmtime(os.path.join(image_folder, x["filename"])),
reverse=True,
)
if f.lower().endswith(('png', 'jpg', 'jpeg', 'gif')):
images.append({'filename': f})
images = sorted(images, key=lambda x: os.path.getmtime(os.path.join(image_folder, x['filename'])), reverse=True)
return render_template("gallery.html", images=images)
@ -86,12 +53,15 @@ def image_details(filename):
if not os.path.exists(path):
return {"error": "File not found"}, 404
details = get_details_from_png(path)
return {"prompt": details["p"], "model": details["m"], "date": details["d"]}
return {
"prompt": details["p"],
"model": details["m"]
}
@app.route("/images/thumbnails/<path:filename>")
@app.route('/images/thumbnails/<path:filename>')
def serve_thumbnail(filename):
return send_from_directory("output/thumbnails", filename)
return send_from_directory('output/thumbnails', filename)
@app.route("/images/<filename>", methods=["GET"])
@ -119,31 +89,23 @@ def cancel_job() -> None:
@app.route("/create", methods=["GET", "POST"])
def create():
if request.method == "POST":
prompt = request.form.get("prompt")
selected_workflow, model = select_model(request.form.get("model") or "Random")
topic = request.form.get("topic")
def create() -> str:
"""Handles image creation requests.
Args:
None
Returns:
str: Redirect to the main page or a JSON response.
"""
prompt = request.form.get("prompt") if request.method == "POST" else None
if not prompt:
prompt = create_prompt_on_openwebui(user_config["comfyui"]["prompt"], topic)
if prompt is None:
prompt = create_prompt_on_openwebui(user_config["comfyui"]["prompt"])
# Start generation in background
threading.Thread(target=lambda: create_image(prompt, model)).start()
def create_image_in_background():
create_image(prompt)
return redirect(
url_for("image_queued", prompt=prompt, model=model.split(".")[0])
)
# For GET requests, just show the form to enter prompt
return render_template("create_image.html", models=load_models_from_config())
@app.route("/image_queued")
def image_queued():
prompt = request.args.get("prompt", "No prompt provided.")
model = request.args.get("model", "No model selected.").split(".")[0]
return render_template("image_queued.html", prompt=prompt, model=model)
threading.Thread(target=create_image_in_background).start()
return render_template('image_queued.html', prompt=prompt)
def scheduled_task() -> None:
@ -151,18 +113,16 @@ def scheduled_task() -> None:
print(f"Executing scheduled task at {time.strftime('%Y-%m-%d %H:%M:%S')}")
create_image(None)
@app.route("/create_image", methods=["GET"])
def create_image_endpoint() -> str:
"""
Renders the create image template with image and prompt.
"""
if user_config["frame"]["create_requires_auth"] == "True" and not session.get('authenticated'):
return redirect(url_for("login"))
models = load_models_from_config()
topics = load_topics_from_config()
return render_template("create_image.html", models=models, topics=topics)
return render_template(
"create_image.html"
)
if user_config["frame"]["auto_regen"] == "True":
@ -176,9 +136,10 @@ if user_config["frame"]["auto_regen"] == "True":
minute=regen_time[1],
id="scheduled_task",
max_instances=1, # prevent overlapping
replace_existing=True, # don't double-schedule
replace_existing=True # don't double-schedule
)
scheduler.start()
os.makedirs(image_folder, exist_ok=True)
app.run(host="0.0.0.0", port=user_config["frame"]["port"], debug=True)

55
d Normal file
View File

@ -0,0 +1,55 @@
b0bb465 (HEAD -> main, tag: 0.1.0) add style for mobile
4ec98eb working lazyloading
c3f0a70 working lightbox navifation with js cache for details
636148b only load details when loading image in lightbox
cf9f5d0 cleanup and show modle name on gallery
ab1c0c3 (origin/main) fix SDXL workflow
8be2111 image queued page
0ccebcf add back button to the bottom of the gallery
da37913 add create_image page
aef0afe wrong key name
e88d490 fix the random model logic
ee18289 yaml not sh
1a3e657 test commit to see if the workflow works
26d9f38 updated workflow
2c5dbdb try create workflow to auto publish new container
e43ab87 replace image.png thumbnail
073cc3b update flux logic
41fd144 re working into libs, and use thumbs in gallery
020c2c2 updated styling and prompt on home page
4acf28e update path for flux
9aea4e6 fix sorting and sdxl workflow prompt text
81140d7 updated logic to show prompt on the lightbox display
cce1cb2 lazy load images
dcc6f94 re-add topic logic
b93d070 update gitignore
7c4ec9e Ignore publish.sh
3e974d5 fixed logic
eb59cfa wrong key
b92366f spelling mistake
60be7c4 randomly select dev/schnell when using flux
ab50f2a add publish script
c4b9dd2 last 7 prompts not 7 days
30d25d1 rework to single flow
a180a7b working flux and sdxl
2bbb2fe allow flux workflow
6fdfb51 support multiple models and random selection
e4428b4 return generated prompt, allow job to be cancelled
91c48b5 fix double running job
d32e903 retry logic, create endpoint now non blocking
42c8a2b sort gallery by new-old, move to -slim docker image
5ab4d76 strange indent
db9b961 formatting and docstrings
dcc0cdc move gallery to /images
0838f37 bigger gallery
342416a arrow cycle
6cc30bd add /gallery to show all generated images
290c1ba support auto image regeneration at specific time
d97ef3f add support for multiple models and posting a prompt to /create
0e471d7 update negative prompt
1fba2da docker compose file
8064c90 user config controls reload interval
4fd857c fill as much as possible
26e8d90 add flask to requirements
fc0b1f3 update with docker server
6ada0bf initial commit

View File

@ -64,14 +64,13 @@ def generate_image(
file_name: str,
comfy_prompt: str,
workflow_path: str = "./workflow_sdxl.json",
prompt_node: str = "Positive",
prompt_node: str = "CLIP Text Encode (Prompt)",
seed_node: str = "KSampler",
seed_param: str = "seed",
save_node: str = "Save Image",
save_param: str = "filename_prefix",
model_node: Optional[str] = "Load Checkpoint",
model_param: Optional[str] = "ckpt_name",
model: Optional[str] = "None",
) -> None:
"""Generates an image using the Comfy API with configurable workflow settings."""
try:
@ -101,7 +100,21 @@ def generate_image(
user_config["comfyui"]["height"],
)
wf.set_node_param(model_node, model_param, model)
# Conditionally set model if node and param are provided
if model_node and model_param:
if "FLUX" in workflow_path:
valid_models = user_config["comfyui:flux"]["models"].split(",")
else:
available_model_list = user_config["comfyui"]["models"].split(",")
valid_models = list(
set(get_available_models()) & set(available_model_list)
)
if not valid_models:
raise Exception("No valid models available.")
model = random.choice(valid_models)
wf.set_node_param(model_node, model_param, model)
# Generate image
logging.debug(f"Generating image: {file_name}")
@ -122,40 +135,23 @@ def generate_image(
logging.error(f"Failed to generate image for UID: {file_name}. Error: {e}")
raise
def select_model(model: str) -> tuple[str, str]:
use_flux = json.loads(user_config["comfyui"].get("FLUX", "false").lower())
only_flux = json.loads(user_config["comfyui"].get("ONLY_FLUX", "false").lower())
if model == "Random":
selected_workflow = "FLUX" if (use_flux and (only_flux or random.choice([True, False]))) else "SDXL"
elif "flux" in model.lower():
selected_workflow = "FLUX"
else:
selected_workflow = "SDXL"
if model == "Random":
if selected_workflow == "FLUX":
valid_models = user_config["comfyui:flux"]["models"].split(",")
else: # SDXL
available_model_list = user_config["comfyui"]["models"].split(",")
valid_models = list(set(get_available_models()) & set(available_model_list))
model = random.choice(valid_models)
return selected_workflow, model
def create_image(prompt: str | None = None, model: str = "Random") -> None:
"""Generate an image with a chosen workflow (Random, FLUX*, or SDXL*)."""
def create_image(prompt: str | None = None) -> None:
"""Main function for generating images."""
if prompt is None:
prompt = create_prompt_on_openwebui(user_config["comfyui"]["prompt"])
if not prompt:
logging.error("No prompt generated.")
return
save_prompt(prompt)
selected_workflow, model = select_model(model)
use_flux = json.loads((user_config["comfyui"].get("FLUX", False)).lower())
only_flux = json.loads((user_config["comfyui"].get("ONLY_FLUX", False)).lower())
selected_workflow = "SDXL"
if use_flux:
selected_workflow = "FLUX" if only_flux else random.choice(["FLUX", "SDXL"])
if selected_workflow == "FLUX":
generate_image(
@ -167,11 +163,12 @@ def create_image(prompt: str | None = None, model: str = "Random") -> None:
seed_param="seed",
save_node="CivitAI Image Saver",
save_param="filename",
model_node="Unet Loader (GGUF)",
model_param="unet_name",
model=model
model_node="CivitAI Image Saver",
model_param="modelname",
)
else: # SDXL
generate_image("image", comfy_prompt=prompt, model=model)
else:
generate_image("image", prompt)
logging.info(f"{selected_workflow} generation started with prompt: {prompt}")

View File

@ -1,4 +1,3 @@
import subprocess
import configparser
import logging
import sys
@ -66,7 +65,6 @@ def rename_image() -> str | None:
def get_details_from_png(path):
try:
date = datetime.fromtimestamp(os.path.getctime(path)).strftime("%d-%m-%Y")
with Image.open(path) as img:
try:
# Flux workflow
@ -78,38 +76,11 @@ def get_details_from_png(path):
data = json.loads(img.info["prompt"])
prompt = data['6']['inputs']['text']
model = data['4']['inputs']['ckpt_name']
return {"p":prompt,"m":model,"d":date} or {"p":"","m":"","c":""}
return {"p":prompt,"m":model} or {"p":"","m":""}
except Exception as e:
print(f"Error reading metadata from {path}: {e}")
return ""
def get_current_version():
try:
# Run the command and capture the output
result = subprocess.run(
['bump-my-version', 'show', 'current_version'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True, # to get string output instead of bytes
check=True # raises exception if command fails
)
version = result.stdout.strip()
return version
except subprocess.CalledProcessError as e:
print("Error running bump-my-version:", e)
return None
def load_models_from_config():
flux_models = user_config["comfyui:flux"]["models"].split(",")
sdxl_models = user_config["comfyui"]["models"].split(",")
all_models = flux_models + sdxl_models
return all_models
def load_topics_from_config():
topics = user_config["comfyui"]["topics"].split(", ")
return topics
user_config = load_config()
output_folder = user_config["comfyui"]["output_dir"]

View File

@ -12,33 +12,25 @@ LOG_FILE = "./prompts_log.jsonl"
user_config = load_config()
output_folder = user_config["comfyui"]["output_dir"]
def create_prompt_on_openwebui(prompt: str, topic: str = "random") -> str:
def create_prompt_on_openwebui(prompt: str) -> str:
"""Sends prompt to OpenWebui and returns the generated response."""
topic_instruction = ""
selected_topic = ""
# Unique list of recent prompts
recent_prompts = list(set(load_recent_prompts()))
if topic == "random":
topics = [t.strip() for t in user_config["comfyui"]["topics"].split(",") if t.strip()]
# Decide on whether to include a topic (e.g., 30% chance to include)
topics = [t.strip() for t in user_config["comfyui"]["topics"].split(",") if t.strip()]
topic_instruction = ""
if random.random() < 0.5 and topics:
selected_topic = random.choice(topics)
elif topic != "":
selected_topic = topic
else:
# Decide on whether to include a topic (e.g., 30% chance to include)
topics = [t.strip() for t in user_config["comfyui"]["topics"].split(",") if t.strip()]
if random.random() < 0.3 and topics:
selected_topic = random.choice(topics)
if selected_topic != "":
topic_instruction = f" Incorporate the theme of '{selected_topic}' into the new prompt."
user_content = (
"Can you generate me a really random image idea, Do not exceed 10 words. Use clear language, not poetic metaphors.”"
+ topic_instruction
+ "Avoid prompts similar to the following:"
"Here are the prompts from the last 7 days:\n\n"
+ "\n".join(f"{i+1}. {p}" for i, p in enumerate(recent_prompts))
+ "\n\nDo not repeat ideas, themes, or settings from the above. "
"Now generate a new, completely original Stable Diffusion prompt that hasn't been done yet."
+ topic_instruction
)
model = random.choice(user_config["openwebui"]["models"].split(","))
response = litellm.completion(
api_base=user_config["openwebui"]["base_url"],
@ -75,4 +67,4 @@ def create_prompt_on_openwebui(prompt: str, topic: str = "random") -> str:
# )
# prompt = response["choices"][0]["message"]["content"].strip('"')
logging.debug(prompt)
return prompt.split(": ")[-1]
return prompt

View File

@ -1,19 +1,15 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Create An Image</title>
<style>
/* ---------- reset ---------- */
* {
margin: 0;
padding: 0;
box-sizing: border-box;
}
/* ---------- layout ---------- */
body {
display: flex;
flex-direction: column;
@ -25,7 +21,6 @@
font-family: Arial, sans-serif;
padding: 20px;
}
textarea {
width: 80vw;
height: 200px;
@ -39,15 +34,11 @@
color: #eee;
border: 1px solid #333;
}
.button-group {
display: flex;
gap: 20px;
align-items: center;
}
button,
select {
button {
background: #333;
color: white;
border: none;
@ -57,163 +48,42 @@
cursor: pointer;
transition: background 0.3s;
}
button:hover,
select:hover {
button:hover {
background: #555;
}
/* ---------- spinner ---------- */
#spinner-overlay {
position: fixed;
inset: 0;
display: flex;
align-items: center;
justify-content: center;
background: rgba(0, 0, 0, 0.6);
visibility: hidden;
z-index: 1000;
}
.spinner {
width: 50px;
height: 50px;
border: 6px solid #555;
border-top-color: white;
border-radius: 50%;
animation: spin 0.8s linear infinite;
}
@keyframes spin {
to {
transform: rotate(360deg);
}
}
@media (max-width: 600px) {
body {
min-height: 100dvh;
height: auto;
justify-content: flex-start;
padding-top: 40px;
}
.button-group {
flex-direction: column;
align-items: stretch;
width: 100%;
}
button,
select {
width: 100%;
}
textarea {
height: 150px;
}
}
</style>
</head>
<body>
<h1 style="margin-bottom: 20px;">Create An Image</h1>
<textarea id="prompt-box" placeholder="Enter your custom prompt here..."></textarea>
<div class="button-group">
<button onclick="showSpinner(); location.href='/'">Back</button>
<button onclick="location.href='/'">Back</button>
<button onclick="sendPrompt()">Send Prompt</button>
<button onclick="randomPrompt()">Random Prompt</button>
<select id="model-select">
<option value="" selected>Random</option>
<!-- Group: FLUX -->
<optgroup label="FLUX">
{% for m in models if 'flux' in m|lower %}
<option value="{{ m }}">{{ m.rsplit('.', 1)[0] }}</option>
{% endfor %}
</optgroup>
<!-- Group: SDXL -->
<optgroup label="SDXL">
{% for m in models if 'flux' not in m|lower %}
<option value="{{ m }}">{{ m.rsplit('.', 1)[0] }}</option>
{% endfor %}
</optgroup>
</select>
<select id="topic-select">
<option value="">No Topic</option>
<option value="random">Random</option>
<optgroup label="Topics">
{% for t in topics %}
<option value="{{ t }}">{{ t }}</option>
{% endfor %}
</optgroup>
</select>
</div>
<!-- waiting overlay -->
<div id="spinner-overlay">
<div class="spinner"></div>
<button onclick="location.href='/create'">Random Prompt</button>
</div>
<script>
const overlay = document.getElementById('spinner-overlay');
function showSpinner() { overlay.style.visibility = 'visible'; }
function sendPrompt() {
showSpinner();
const prompt = document.getElementById('prompt-box').value;
const model = document.getElementById('model-select').value;
const formData = new URLSearchParams();
formData.append('prompt', prompt);
formData.append('model', model);
fetch('/create', {
method: 'POST',
headers: { 'Content-Type': 'application/x-www-form-urlencoded' },
headers: {
'Content-Type': 'application/x-www-form-urlencoded',
},
body: formData.toString()
})
.then(response => {
window.location.href = response.redirected ? response.url : '/create';
})
.catch(error => {
overlay.style.visibility = 'hidden';
alert("Error sending prompt: " + error);
});
}
// wrapper for Random Prompt button so it also sends the model
function randomPrompt() {
showSpinner();
const model = document.getElementById('model-select').value;
const topic = document.getElementById('topic-select').value; // this line was missing
const formData = new URLSearchParams();
formData.append('model', model);
formData.append('topic', topic); // include topic in request
fetch('/create', {
method: 'POST',
headers: { 'Content-Type': 'application/x-www-form-urlencoded' },
body: formData.toString()
})
.then(response => {
window.location.href = response.redirected ? response.url : '/create';
})
.catch(error => {
overlay.style.visibility = 'hidden';
alert("Error requesting random prompt: " + error);
});
}).then(response => {
if (response.redirected) {
window.location.href = response.url;
} else {
alert("Image creation request sent.");
}
}).catch(error => {
alert("Error sending prompt: " + error);
});
}
</script>
</body>
</html>

View File

@ -55,7 +55,6 @@
align-items: center;
flex-direction: column;
z-index: 999;
padding: 20px 0;
}
.lightbox img {
@ -101,19 +100,12 @@
max-width: 80%;
text-align: left;
margin-top: 20px;
max-height: 25vh;
/* NEW: restrict height */
overflow-y: auto;
/* NEW: allow vertical scroll */
}
/* Back button fixed top right */
.home-button {
position: fixed;
top: 20px;
right: 20px;
z-index: 500;
/* lower than lightbox (999) */
.button-group {
display: flex;
justify-content: center;
margin-top: 2rem;
}
.button-link {
@ -163,10 +155,6 @@
font-size: 14px;
max-width: 90%;
padding: 8px 16px;
max-height: 20vh;
/* smaller height for mobile */
overflow-y: auto;
/* keep scroll on mobile too */
}
.button-link {
@ -178,13 +166,15 @@
</head>
<body>
<a href="/" class="button-link home-button">Home</a>
<h1>Image Archive</h1>
<!-- Empty gallery container; images will be loaded incrementally -->
<div class="gallery" id="gallery"></div>
<div class="button-group">
<a href="/" class="button-link">Back</a>
</div>
<!-- Lightbox -->
<div class="lightbox" id="lightbox">
<span class="close" onclick="closeLightbox()">&times;</span>
@ -205,10 +195,10 @@
<script>
const gallery = document.getElementById('gallery');
const batchSize = 9; // images to load per batch
const batchSize = 6; // images to load per batch
let loadedCount = 0;
let currentIndex = 0;
const detailsCache = {}; // Cache for image details
const detailsCache = {}; // Cache for image details
function createImageElement(image) {
const img = document.createElement('img');
@ -264,8 +254,7 @@
if (detailsCache[filename]) {
document.getElementById("lightbox-prompt").textContent =
`Model:${detailsCache[filename].model} - Created:${detailsCache[filename].date}\n\n${detailsCache[filename].prompt}`;
`Model: ${detailsCache[filename].model}\n\n${detailsCache[filename].prompt}`;
} else {
document.getElementById("lightbox-prompt").textContent = "Loading…";
@ -277,7 +266,7 @@
.then(data => {
detailsCache[filename] = data; // Cache the data
document.getElementById("lightbox-prompt").textContent =
`Model:${data.model} - Created:${data.date}\n\n${data.prompt}`;
`Model: ${data.model}\n\n${data.prompt}`;
})
.catch(() => {
document.getElementById("lightbox-prompt").textContent = "Couldnt load details.";
@ -287,17 +276,8 @@
function nextImage() {
const images = getGalleryImages();
if (currentIndex + 1 >= images.length && loadedCount < allImages.length) {
loadNextBatch();
// Wait briefly to ensure DOM updates
setTimeout(() => {
currentIndex++;
showImageAndLoadDetails(currentIndex);
}, 100);
} else {
currentIndex = (currentIndex + 1) % images.length;
showImageAndLoadDetails(currentIndex);
}
currentIndex = (currentIndex + 1) % images.length;
showImageAndLoadDetails(currentIndex);
}
function prevImage() {
@ -306,7 +286,6 @@
showImageAndLoadDetails(currentIndex);
}
function closeLightbox() {
document.getElementById("lightbox").style.display = "none";
}

View File

@ -52,11 +52,10 @@
</style>
</head>
<body>
<div class="message">Image will be made with <i>{{ model }}</i> using prompt:</div>
<div class="message">Image will be made using prompt:</div>
<div class="prompt-text">
{{ prompt }}
</div>
<button onclick="location.href='/'">Home</button>
</body>
</html>

View File

@ -2,8 +2,8 @@
<html lang="en">
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>AI Image of the Day</title>
<style>
* {
@ -21,12 +21,8 @@
background: black;
color: white;
font-family: Arial, sans-serif;
position: relative;
padding-top: 20px;
padding-bottom: 20px;
}
.image-container {
max-width: 90vw;
max-height: 80vh;
@ -53,10 +49,6 @@
border-radius: 10px;
max-width: 80vw;
text-align: left;
max-height: 30vh;
/* NEW: limit height */
overflow-y: auto;
/* NEW: allow scrolling */
}
.button-group {
@ -82,47 +74,6 @@
.button-link:hover {
background: #555;
}
/* New style for version number */
.version {
position: fixed;
bottom: 8px;
right: 12px;
color: #666;
font-size: 12px;
font-family: monospace;
user-select: none;
pointer-events: none;
opacity: 0.6;
}
@media (max-width: 768px) {
.image-container {
max-width: 100vw;
max-height: 50vh;
}
img {
max-width: 100%;
max-height: 100%;
}
.prompt {
max-height: 20vh;
font-size: 14px;
padding: 10px 15px;
}
.button-group {
flex-direction: column;
gap: 10px;
}
.button-link {
font-size: 14px;
padding: 8px 16px;
}
}
</style>
<script>
setInterval(() => {
@ -134,7 +85,7 @@
<body>
{% if image %}
<div class="image-container">
<img src="{{ url_for('images', filename=image) }}" alt="Latest Image" />
<img src="{{ url_for('images', filename=image) }}" alt="Latest Image">
</div>
{% if prompt %}
<div class="prompt">{{ prompt }}</div>
@ -146,9 +97,4 @@
{% else %}
<p>No images found</p>
{% endif %}
<!-- Version number at bottom right -->
<div class="version">v{{ version }}</div>
</body>
</html>

View File

@ -1,72 +0,0 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Login</title>
<style>
* {
margin: 0;
padding: 0;
box-sizing: border-box;
}
body {
display: flex;
flex-direction: column;
align-items: center;
justify-content: center;
height: 100vh;
background: black;
color: white;
font-family: Arial, sans-serif;
padding: 20px;
text-align: center;
}
.message {
font-size: 22px;
margin-bottom: 20px;
}
.prompt-text {
font-size: 20px;
background: #111;
padding: 20px;
border-radius: 10px;
border: 1px solid #333;
max-width: 80vw;
margin-bottom: 30px;
}
input[type="password"] {
padding: 10px;
border-radius: 8px;
border: 1px solid #555;
background: #222;
color: white;
font-size: 16px;
margin-bottom: 20px;
width: 250px;
}
button {
background: #333;
color: white;
border: none;
padding: 10px 20px;
border-radius: 8px;
font-size: 16px;
cursor: pointer;
transition: background 0.3s;
}
button:hover {
background: #555;
}
</style>
</head>
<body>
<div class="message">Please enter the password to continue:</div>
<form method="post">
<div class="prompt-text">
<input type="password" name="password" placeholder="Password" required>
</div>
<button type="submit">Login</button>
</form>
</body>
</html>

View File

@ -3,8 +3,6 @@ reload_interval = 30000
auto_regen = True
regen_time = 03:00
port = 5000
create_requires_auth = False
password_for_auth = create
[comfyui]
comfyui_url = http://comfyui

View File

@ -59,7 +59,7 @@
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "Positive"
"title": "CLIP Text Encode (Prompt)"
}
},
"7": {
@ -72,7 +72,7 @@
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "Negative"
"title": "CLIP Text Encode (Prompt)"
}
},
"8": {