Compare commits

...

26 Commits
0.1.9 ... main

Author SHA1 Message Date
c7d71bfd03 Bump version: 0.1.18 → 0.1.19 2025-06-06 17:27:09 +01:00
1a0542861c basic working auth for create_image page 2025-06-06 17:26:37 +01:00
0fc549c199 Bump version: 0.1.17 → 0.1.18 2025-06-06 15:36:00 +01:00
f7f049aacb split out selecting model so can be returned to the ui 2025-06-06 15:35:50 +01:00
669dad4044 Bump version: 0.1.16 → 0.1.17 2025-06-06 15:13:20 +01:00
c726d23707 remove any preceeding text on the ollama prompt 2025-06-06 15:13:03 +01:00
2c9429d640 Bump version: 0.1.15 → 0.1.16 2025-06-06 13:10:43 +01:00
ac388b0f4e Add grouping to model list 2025-06-06 13:10:32 +01:00
4272e1d40e Bump version: 0.1.14 → 0.1.15 2025-06-06 12:55:22 +01:00
4e1e240e30 Bump version: 0.1.13 → 0.1.14 2025-06-06 12:52:15 +01:00
3d2524c2ba Bump version: 0.1.12 → 0.1.13 2025-06-06 12:52:03 +01:00
52574de881 hook doesn't work 2025-06-06 12:51:41 +01:00
ef9bf72b84 Bump version: 0.1.12 → 0.1.13 2025-06-06 12:49:35 +01:00
0cd6c6c5c8 updated bump config 2025-06-06 12:49:10 +01:00
cd6b8a8d25 Bump version: 0.1.11 → 0.1.12 2025-06-06 12:47:51 +01:00
1aa540fa03 update bump-my-version config 2025-06-06 12:47:40 +01:00
33bfee0220 remove extension from displayed models 2025-06-06 12:39:11 +01:00
f041a6afea add padding to the lightbox 2025-06-06 12:32:49 +01:00
336c03a888 allow navigation in lightbox over all images 2025-06-06 12:31:30 +01:00
9ce6ff25ea add padding to the index page 2025-06-06 12:28:39 +01:00
5be690f6c1 reformat the text on the gallery 2025-06-06 12:28:32 +01:00
4da908d0da
Update docker-publish.yml
only build on tag
2025-06-06 11:23:21 +01:00
7adaaf4491 bumped version 2025-06-06 11:21:42 +01:00
a75c7c554a better view on mobiles 2025-06-06 11:21:32 +01:00
2ff03fe101 upversion 2025-06-06 11:16:10 +01:00
4b52e5c713 code updates for date, fix update flux guff, new prompt logic 2025-06-06 11:15:43 +01:00
13 changed files with 273 additions and 109 deletions

View File

@ -1,5 +1,5 @@
[tool.bumpversion]
current_version = "0.1.9"
current_version = "0.1.19"
parse = "(?P<major>\\d+)\\.(?P<minor>\\d+)\\.(?P<patch>\\d+)"
serialize = ["{major}.{minor}.{patch}"]
search = "{current_version}"
@ -7,12 +7,12 @@ replace = "{new_version}"
regex = false
ignore_missing_version = false
ignore_missing_files = false
tag = false
tag = true
sign_tags = false
tag_name = "v{new_version}"
tag_name = "{new_version}"
tag_message = "Bump version: {current_version} → {new_version}"
allow_dirty = false
commit = false
commit = true
message = "Bump version: {current_version} → {new_version}"
moveable_tags = []
commit_args = ""

View File

@ -2,8 +2,7 @@ name: Build and Publish Docker Image
on:
push:
branches: [main]
tags: ["*"] # triggers on any tag push
tags: ["*"] # Only triggers on tag pushes
workflow_dispatch:
jobs:
@ -27,23 +26,15 @@ jobs:
IMAGE_NAME="ai-frame-image-server"
REGISTRY="${{ secrets.REGISTRY }}"
USERNAME="${{ secrets.USERNAME }}"
GIT_TAG="${GITHUB_REF#refs/tags/}"
IMAGE_TAGGED="$REGISTRY/$USERNAME/$IMAGE_NAME:$GIT_TAG"
IMAGE_LATEST="$REGISTRY/$USERNAME/$IMAGE_NAME:latest"
# Always build and tag as latest
echo "🔧 Building $IMAGE_LATEST"
docker build -t $IMAGE_LATEST .
echo "🔧 Building $IMAGE_TAGGED and $IMAGE_LATEST"
docker build -t $IMAGE_LATEST -t $IMAGE_TAGGED .
echo "📤 Pushing $IMAGE_TAGGED"
docker push $IMAGE_TAGGED
echo "📤 Pushing $IMAGE_LATEST"
docker push $IMAGE_LATEST
# If this is a tag push, tag the image accordingly
if [[ "${GITHUB_REF}" == refs/tags/* ]]; then
GIT_TAG="${GITHUB_REF#refs/tags/}"
IMAGE_TAGGED="$REGISTRY/$USERNAME/$IMAGE_NAME:$GIT_TAG"
echo "🏷️ Also tagging as $IMAGE_TAGGED"
docker tag $IMAGE_LATEST $IMAGE_TAGGED
echo "📤 Pushing $IMAGE_TAGGED"
docker push $IMAGE_TAGGED
fi

1
.gitignore vendored
View File

@ -9,3 +9,4 @@ output/
prompts_log.jsonl
publish.sh
test.py
.vscode/launch.json

View File

@ -5,23 +5,34 @@ from flask import (
request,
jsonify,
redirect,
url_for
url_for,
session,
render_template_string,
)
import os
import time
import threading
from apscheduler.schedulers.background import BackgroundScheduler
from libs.generic import load_config, load_recent_prompts, get_details_from_png, get_current_version, load_models_from_config
from libs.comfyui import cancel_current_job, create_image
from libs.generic import (
load_config,
load_recent_prompts,
get_details_from_png,
get_current_version,
load_models_from_config,
)
from libs.comfyui import cancel_current_job, create_image, select_model
from libs.ollama import create_prompt_on_openwebui
#workflow test commit
# workflow test commit
user_config = load_config()
app = Flask(__name__)
app.secret_key = os.environ.get('SECRET_KEY')
image_folder = "./output"
@app.route("/", methods=["GET"])
def index() -> str:
"""
@ -39,18 +50,34 @@ def index() -> str:
image=image_filename,
prompt=prompt,
reload_interval=user_config["frame"]["reload_interval"],
version=version
version=version,
)
@app.route('/login', methods=['GET', 'POST'])
def login():
if request.method == 'POST':
if request.form['password'] == user_config["frame"]["password_for_auth"]:
session['authenticated'] = True
return render_template("create_image.html", models=load_models_from_config())
else:
return redirect(url_for('login'))
return render_template('login.html')
@app.route("/images", methods=["GET"])
def gallery() -> str:
images = []
for f in os.listdir(image_folder):
if f.lower().endswith(('png', 'jpg', 'jpeg', 'gif')):
images.append({'filename': f})
images = sorted(images, key=lambda x: os.path.getmtime(os.path.join(image_folder, x['filename'])), reverse=True)
if f.lower().endswith(("png", "jpg", "jpeg", "gif")):
images.append({"filename": f})
images = sorted(
images,
key=lambda x: os.path.getmtime(os.path.join(image_folder, x["filename"])),
reverse=True,
)
return render_template("gallery.html", images=images)
@app.route("/image-details/<filename>", methods=["GET"])
def image_details(filename):
@ -58,15 +85,12 @@ def image_details(filename):
if not os.path.exists(path):
return {"error": "File not found"}, 404
details = get_details_from_png(path)
return {
"prompt": details["p"],
"model": details["m"]
}
return {"prompt": details["p"], "model": details["m"], "date": details["d"]}
@app.route('/images/thumbnails/<path:filename>')
@app.route("/images/thumbnails/<path:filename>")
def serve_thumbnail(filename):
return send_from_directory('output/thumbnails', filename)
return send_from_directory("output/thumbnails", filename)
@app.route("/images/<filename>", methods=["GET"])
@ -97,16 +121,17 @@ def cancel_job() -> None:
def create():
if request.method == "POST":
prompt = request.form.get("prompt")
model = request.form.get("model", "Random")
selected_workflow, model = select_model(request.form.get("model") or "Random")
if not prompt:
prompt = create_prompt_on_openwebui(user_config["comfyui"]["prompt"])
# Start generation in background
threading.Thread(target=lambda: create_image(prompt, model)).start()
# store prompt in query string temporarily
return redirect(url_for("image_queued", prompt=prompt))
return redirect(
url_for("image_queued", prompt=prompt, model=model.split(".")[0])
)
# For GET requests, just show the form to enter prompt
return render_template("create_image.html", models=load_models_from_config())
@ -115,26 +140,26 @@ def create():
@app.route("/image_queued")
def image_queued():
prompt = request.args.get("prompt", "No prompt provided.")
return render_template("image_queued.html", prompt=prompt)
model = request.args.get("model", "No model selected.").split(".")[0]
return render_template("image_queued.html", prompt=prompt, model=model)
def scheduled_task() -> None:
"""Executes the scheduled image generation task."""
print(f"Executing scheduled task at {time.strftime('%Y-%m-%d %H:%M:%S')}")
create_image(None)
@app.route("/create_image", methods=["GET"])
def create_image_endpoint() -> str:
"""
Renders the create image template with image and prompt.
"""
if user_config["frame"]["create_requires_auth"] == "True" and not session.get('authenticated'):
return redirect(url_for("login"))
models = load_models_from_config()
models.insert(0, "Random")
return render_template(
"create_image.html", models=models
)
return render_template("create_image.html", models=models)
if user_config["frame"]["auto_regen"] == "True":
@ -148,10 +173,9 @@ if user_config["frame"]["auto_regen"] == "True":
minute=regen_time[1],
id="scheduled_task",
max_instances=1, # prevent overlapping
replace_existing=True # don't double-schedule
replace_existing=True, # don't double-schedule
)
scheduler.start()
os.makedirs(image_folder, exist_ok=True)
app.run(host="0.0.0.0", port=user_config["frame"]["port"], debug=True)

View File

@ -121,8 +121,29 @@ def generate_image(
except Exception as e:
logging.error(f"Failed to generate image for UID: {file_name}. Error: {e}")
raise
def select_model(model: str) -> tuple[str, str]:
use_flux = json.loads(user_config["comfyui"].get("FLUX", "false").lower())
only_flux = json.loads(user_config["comfyui"].get("ONLY_FLUX", "false").lower())
if model == "Random":
selected_workflow = "FLUX" if (use_flux and (only_flux or random.choice([True, False]))) else "SDXL"
elif "flux" in model.lower():
selected_workflow = "FLUX"
else:
selected_workflow = "SDXL"
if model == "Random":
if selected_workflow == "FLUX":
valid_models = user_config["comfyui:flux"]["models"].split(",")
else: # SDXL
available_model_list = user_config["comfyui"]["models"].split(",")
valid_models = list(set(get_available_models()) & set(available_model_list))
model = random.choice(valid_models)
return selected_workflow, model
def create_image(prompt: str | None = None, model: str = "Random") -> None:
"""Generate an image with a chosen workflow (Random, FLUX*, or SDXL*)."""
@ -134,18 +155,9 @@ def create_image(prompt: str | None = None, model: str = "Random") -> None:
return
save_prompt(prompt)
use_flux = json.loads(user_config["comfyui"].get("FLUX", "false").lower())
only_flux = json.loads(user_config["comfyui"].get("ONLY_FLUX", "false").lower())
if model == "Random":
selected_workflow = "FLUX" if (use_flux and (only_flux or random.choice([True, False]))) else "SDXL"
elif "flux" in model.lower():
selected_workflow = "FLUX"
else:
selected_workflow = "SDXL"
selected_workflow, model = select_model(model)
if selected_workflow == "FLUX":
if model == "Random":
valid_models = user_config["comfyui:flux"]["models"].split(",")
model = random.choice(valid_models)
generate_image(
file_name="image",
comfy_prompt=prompt,
@ -155,17 +167,11 @@ def create_image(prompt: str | None = None, model: str = "Random") -> None:
seed_param="seed",
save_node="CivitAI Image Saver",
save_param="filename",
model_node="CivitAI Image Saver",
model_param="modelname",
model_node="Unet Loader (GGUF)",
model_param="unet_name",
model=model
)
else: # SDXL
if model == "Random":
available_model_list = user_config["comfyui"]["models"].split(",")
valid_models = list(set(get_available_models()) & set(available_model_list))
model = random.choice(valid_models)
generate_image("image", comfy_prompt=prompt, model=model)
logging.info(f"{selected_workflow} generation started with prompt: {prompt}")

View File

@ -66,6 +66,7 @@ def rename_image() -> str | None:
def get_details_from_png(path):
try:
date = datetime.fromtimestamp(os.path.getctime(path)).strftime("%d-%m-%Y")
with Image.open(path) as img:
try:
# Flux workflow
@ -77,7 +78,7 @@ def get_details_from_png(path):
data = json.loads(img.info["prompt"])
prompt = data['6']['inputs']['text']
model = data['4']['inputs']['ckpt_name']
return {"p":prompt,"m":model} or {"p":"","m":""}
return {"p":prompt,"m":model,"d":date} or {"p":"","m":"","c":""}
except Exception as e:
print(f"Error reading metadata from {path}: {e}")
return ""

View File

@ -24,13 +24,13 @@ def create_prompt_on_openwebui(prompt: str) -> str:
topic_instruction = f" Incorporate the theme of '{selected_topic}' into the new prompt."
user_content = (
"Here are the prompts from the last 7 days:\n\n"
+ "\n".join(f"{i+1}. {p}" for i, p in enumerate(recent_prompts))
+ "\n\nDo not repeat ideas, themes, or settings from the above. "
"Now generate a new, completely original Stable Diffusion prompt that hasn't been done yet."
"Can you generate me a really random image idea, Do not exceed 10 words. Use clear language, not poetic metaphors.”"
+ topic_instruction
+ "Avoid prompts similar to the following:"
+ "\n".join(f"{i+1}. {p}" for i, p in enumerate(recent_prompts))
)
model = random.choice(user_config["openwebui"]["models"].split(","))
response = litellm.completion(
api_base=user_config["openwebui"]["base_url"],
@ -67,4 +67,4 @@ def create_prompt_on_openwebui(prompt: str) -> str:
# )
# prompt = response["choices"][0]["message"]["content"].strip('"')
logging.debug(prompt)
return prompt
return prompt.split(": ")[-1]

View File

@ -1,12 +1,17 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>Create An Image</title>
<style>
/* ---------- reset ---------- */
* { margin: 0; padding: 0; box-sizing: border-box; }
* {
margin: 0;
padding: 0;
box-sizing: border-box;
}
/* ---------- layout ---------- */
body {
@ -20,6 +25,7 @@
font-family: Arial, sans-serif;
padding: 20px;
}
textarea {
width: 80vw;
height: 200px;
@ -33,12 +39,15 @@
color: #eee;
border: 1px solid #333;
}
.button-group {
display: flex;
gap: 20px;
align-items: center;
}
button, select {
button,
select {
background: #333;
color: white;
border: none;
@ -48,8 +57,11 @@
cursor: pointer;
transition: background 0.3s;
}
button:hover,
select:hover { background: #555; }
select:hover {
background: #555;
}
/* ---------- spinner ---------- */
#spinner-overlay {
@ -62,6 +74,7 @@
visibility: hidden;
z-index: 1000;
}
.spinner {
width: 50px;
height: 50px;
@ -70,9 +83,39 @@
border-radius: 50%;
animation: spin 0.8s linear infinite;
}
@keyframes spin { to { transform: rotate(360deg); } }
@keyframes spin {
to {
transform: rotate(360deg);
}
}
@media (max-width: 600px) {
body {
min-height: 100dvh;
height: auto;
justify-content: flex-start;
padding-top: 40px;
}
.button-group {
flex-direction: column;
align-items: stretch;
width: 100%;
}
button,
select {
width: 100%;
}
textarea {
height: 150px;
}
}
</style>
</head>
<body>
<h1 style="margin-bottom: 20px;">Create An Image</h1>
@ -85,13 +128,21 @@
<button onclick="randomPrompt()">Random Prompt</button>
<!-- new model selector -->
<select id="model-select">
{% for m in models %}
<option value="{{ m }}">{{ m }}</option>
{% endfor %}
</select>
<option value="" selected>Random</option>
<!-- Group: FLUX -->
<optgroup label="FLUX">
{% for m in models if 'flux' in m|lower %}
<option value="{{ m }}">{{ m.rsplit('.', 1)[0] }}</option>
{% endfor %}
</optgroup>
<!-- Group: SDXL -->
<optgroup label="SDXL">
{% for m in models if 'flux' not in m|lower %}
<option value="{{ m }}">{{ m.rsplit('.', 1)[0] }}</option>
{% endfor %}
</optgroup>
</select>
</div>
@ -108,7 +159,7 @@
function sendPrompt() {
showSpinner();
const prompt = document.getElementById('prompt-box').value;
const model = document.getElementById('model-select').value;
const model = document.getElementById('model-select').value;
const formData = new URLSearchParams();
formData.append('prompt', prompt);
@ -119,13 +170,13 @@
headers: { 'Content-Type': 'application/x-www-form-urlencoded' },
body: formData.toString()
})
.then(response => {
window.location.href = response.redirected ? response.url : '/create';
})
.catch(error => {
overlay.style.visibility = 'hidden';
alert("Error sending prompt: " + error);
});
.then(response => {
window.location.href = response.redirected ? response.url : '/create';
})
.catch(error => {
overlay.style.visibility = 'hidden';
alert("Error sending prompt: " + error);
});
}
// wrapper for Random Prompt button so it also sends the model
@ -141,14 +192,15 @@
headers: { 'Content-Type': 'application/x-www-form-urlencoded' },
body: formData.toString()
})
.then(response => {
window.location.href = response.redirected ? response.url : '/create';
})
.catch(error => {
overlay.style.visibility = 'hidden';
alert("Error requesting random prompt: " + error);
});
.then(response => {
window.location.href = response.redirected ? response.url : '/create';
})
.catch(error => {
overlay.style.visibility = 'hidden';
alert("Error requesting random prompt: " + error);
});
}
</script>
</body>
</html>
</html>

View File

@ -55,6 +55,7 @@
align-items: center;
flex-direction: column;
z-index: 999;
padding: 20px 0;
}
.lightbox img {
@ -263,7 +264,8 @@
if (detailsCache[filename]) {
document.getElementById("lightbox-prompt").textContent =
`Model: ${detailsCache[filename].model}\n\n${detailsCache[filename].prompt}`;
`Model:${detailsCache[filename].model} - Created:${detailsCache[filename].date}\n\n${detailsCache[filename].prompt}`;
} else {
document.getElementById("lightbox-prompt").textContent = "Loading…";
@ -275,7 +277,7 @@
.then(data => {
detailsCache[filename] = data; // Cache the data
document.getElementById("lightbox-prompt").textContent =
`Model: ${data.model}\n\n${data.prompt}`;
`Model:${data.model} - Created:${data.date}\n\n${data.prompt}`;
})
.catch(() => {
document.getElementById("lightbox-prompt").textContent = "Couldnt load details.";
@ -285,8 +287,17 @@
function nextImage() {
const images = getGalleryImages();
currentIndex = (currentIndex + 1) % images.length;
showImageAndLoadDetails(currentIndex);
if (currentIndex + 1 >= images.length && loadedCount < allImages.length) {
loadNextBatch();
// Wait briefly to ensure DOM updates
setTimeout(() => {
currentIndex++;
showImageAndLoadDetails(currentIndex);
}, 100);
} else {
currentIndex = (currentIndex + 1) % images.length;
showImageAndLoadDetails(currentIndex);
}
}
function prevImage() {
@ -295,6 +306,7 @@
showImageAndLoadDetails(currentIndex);
}
function closeLightbox() {
document.getElementById("lightbox").style.display = "none";
}

View File

@ -52,10 +52,11 @@
</style>
</head>
<body>
<div class="message">Image will be made using prompt:</div>
<div class="message">Image will be made with <i>{{ model }}</i> using prompt:</div>
<div class="prompt-text">
{{ prompt }}
</div>
<button onclick="location.href='/'">Home</button>
</body>
</html>

View File

@ -22,9 +22,11 @@
color: white;
font-family: Arial, sans-serif;
position: relative;
/* So fixed elements inside work well */
padding-top: 20px;
padding-bottom: 20px;
}
.image-container {
max-width: 90vw;
max-height: 80vh;

72
templates/login.html Normal file
View File

@ -0,0 +1,72 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Login</title>
<style>
* {
margin: 0;
padding: 0;
box-sizing: border-box;
}
body {
display: flex;
flex-direction: column;
align-items: center;
justify-content: center;
height: 100vh;
background: black;
color: white;
font-family: Arial, sans-serif;
padding: 20px;
text-align: center;
}
.message {
font-size: 22px;
margin-bottom: 20px;
}
.prompt-text {
font-size: 20px;
background: #111;
padding: 20px;
border-radius: 10px;
border: 1px solid #333;
max-width: 80vw;
margin-bottom: 30px;
}
input[type="password"] {
padding: 10px;
border-radius: 8px;
border: 1px solid #555;
background: #222;
color: white;
font-size: 16px;
margin-bottom: 20px;
width: 250px;
}
button {
background: #333;
color: white;
border: none;
padding: 10px 20px;
border-radius: 8px;
font-size: 16px;
cursor: pointer;
transition: background 0.3s;
}
button:hover {
background: #555;
}
</style>
</head>
<body>
<div class="message">Please enter the password to continue:</div>
<form method="post">
<div class="prompt-text">
<input type="password" name="password" placeholder="Password" required>
</div>
<button type="submit">Login</button>
</form>
</body>
</html>

View File

@ -3,6 +3,8 @@ reload_interval = 30000
auto_regen = True
regen_time = 03:00
port = 5000
create_requires_auth = False
password_for_auth = create
[comfyui]
comfyui_url = http://comfyui