mirror of
https://github.com/karl0ss/ai-frame-image-server.git
synced 2025-04-26 01:40:12 +01:00
initial commit
This commit is contained in:
commit
6ada0bfb18
9
.gitignore
vendored
Normal file
9
.gitignore
vendored
Normal file
@ -0,0 +1,9 @@
|
||||
venv/*
|
||||
script.log
|
||||
**/*.pyc
|
||||
*.rtf
|
||||
build/
|
||||
dist/
|
||||
user_config.cfg
|
||||
Dockerfile
|
||||
output/**.*
|
1
.python-version
Normal file
1
.python-version
Normal file
@ -0,0 +1 @@
|
||||
3.11.9
|
24
.vscode/launch.json
vendored
Normal file
24
.vscode/launch.json
vendored
Normal file
@ -0,0 +1,24 @@
|
||||
{
|
||||
// Use IntelliSense to learn about possible attributes.
|
||||
// Hover to view descriptions of existing attributes.
|
||||
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
|
||||
"version": "0.2.0",
|
||||
"configurations": [
|
||||
{
|
||||
"name": "Python Debugger: Current File",
|
||||
"type": "debugpy",
|
||||
"request": "launch",
|
||||
"program": "${file}",
|
||||
"console": "integratedTerminal",
|
||||
"justMyCode": false,
|
||||
// "args": [
|
||||
// "--num_inference_steps",
|
||||
// "6",
|
||||
// "--rtf_file",
|
||||
// "./AllInDB.rtf",
|
||||
// "--player_uuid",
|
||||
// "2000252303"
|
||||
// ]
|
||||
}
|
||||
]
|
||||
}
|
35
ai_frame_image_server.py
Normal file
35
ai_frame_image_server.py
Normal file
@ -0,0 +1,35 @@
|
||||
from flask import Flask, render_template, send_from_directory, redirect, url_for
|
||||
import os
|
||||
from lib import create_image
|
||||
|
||||
app = Flask(__name__)
|
||||
|
||||
image_folder = "./output"
|
||||
|
||||
def get_latest_image():
|
||||
"""Get the latest image file from the directory."""
|
||||
files = [f for f in os.listdir(image_folder) if f.endswith(('.png', '.jpg', '.jpeg'))]
|
||||
if not files:
|
||||
return None
|
||||
latest_file = max(files, key=lambda f: os.path.getmtime(os.path.join(image_folder, f)))
|
||||
return latest_file
|
||||
|
||||
|
||||
@app.route('/')
|
||||
def index():
|
||||
latest_image = get_latest_image()
|
||||
return render_template("index.html", image=latest_image)
|
||||
|
||||
@app.route('/images/<filename>')
|
||||
def images(filename):
|
||||
return send_from_directory(image_folder, filename)
|
||||
|
||||
@app.route('/create')
|
||||
def create():
|
||||
"""Endpoint to create a new image."""
|
||||
create_image()
|
||||
return redirect(url_for("index"))
|
||||
|
||||
if __name__ == '__main__':
|
||||
os.makedirs(image_folder, exist_ok=True) # Ensure the folder exists
|
||||
app.run(debug=True)
|
76
lib.py
Normal file
76
lib.py
Normal file
@ -0,0 +1,76 @@
|
||||
import random
|
||||
import configparser
|
||||
import logging
|
||||
import sys
|
||||
import litellm
|
||||
import time
|
||||
|
||||
from datetime import datetime
|
||||
|
||||
from comfy_api_simplified import ComfyApiWrapper, ComfyWorkflowWrapper
|
||||
|
||||
user_config = configparser.ConfigParser()
|
||||
try:
|
||||
user_config.read("./user_config.cfg")
|
||||
output_folder = user_config["comfyui"]["output_dir"]
|
||||
logging.debug("Configuration loaded successfully.")
|
||||
except KeyError as e:
|
||||
logging.error(f"Missing configuration key: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def send_prompt_to_openwebui(prompt):
|
||||
response = litellm.completion(
|
||||
api_base=user_config["openwebui"]["base_url"],
|
||||
model="openai/" + user_config["openwebui"]["model"],
|
||||
messages=[
|
||||
{
|
||||
"role": "user",
|
||||
"content": prompt,
|
||||
}
|
||||
],
|
||||
api_key=user_config["openwebui"]["api_key"],
|
||||
)
|
||||
|
||||
return response["choices"][0]["message"]["content"].strip('"')
|
||||
|
||||
|
||||
def generate_image(file_name, comfy_prompt):
|
||||
"""Generate an image using the Comfy API."""
|
||||
try:
|
||||
# Initialize API and workflow
|
||||
api = ComfyApiWrapper(user_config["comfyui"]["comfyui_url"])
|
||||
wf = ComfyWorkflowWrapper("./workflow_api.json")
|
||||
|
||||
# Set workflow parameters
|
||||
wf.set_node_param("KSampler", "seed", random.getrandbits(32))
|
||||
# wf.set_node_param("KSampler", "steps", steps)
|
||||
wf.set_node_param("CLIP Text Encode (Prompt)", "text", comfy_prompt)
|
||||
wf.set_node_param("Save Image", "filename_prefix", file_name)
|
||||
wf.set_node_param("Empty Latent Image", "width", user_config["comfyui"]["width"])
|
||||
wf.set_node_param("Empty Latent Image", "height", user_config["comfyui"]["height"])
|
||||
wf.set_node_param(
|
||||
"Load Checkpoint", "ckpt_name", user_config["comfyui"]["model"]
|
||||
)
|
||||
# Queue your workflow for completion
|
||||
logging.debug(f"Generating image: {file_name}")
|
||||
results = api.queue_and_wait_images(wf, "Save Image")
|
||||
for filename, image_data in results.items():
|
||||
with open(
|
||||
user_config["comfyui"]["output_dir"] + file_name + ".png", "wb+"
|
||||
) as f:
|
||||
f.write(image_data)
|
||||
logging.debug(f"Image generated successfully for UID: {file_name}")
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to generate image for UID: {file_name}. Error: {e}")
|
||||
|
||||
|
||||
def create_image():
|
||||
"""Main function for generating images."""
|
||||
prompt = send_prompt_to_openwebui(user_config["comfyui"]["prompt"])
|
||||
print(f"Generated prompt: {prompt}")
|
||||
generate_image(str(time.time()), prompt)
|
||||
|
||||
|
||||
# if __name__ == "__main__":
|
||||
# main()
|
BIN
requirements.txt
Normal file
BIN
requirements.txt
Normal file
Binary file not shown.
39
templates/index.html
Normal file
39
templates/index.html
Normal file
@ -0,0 +1,39 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>Latest Image</title>
|
||||
<style>
|
||||
* {
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
overflow: hidden;
|
||||
}
|
||||
body {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
height: 100vh;
|
||||
background: black;
|
||||
}
|
||||
img {
|
||||
max-width: 100vw;
|
||||
max-height: 100vh;
|
||||
object-fit: contain;
|
||||
}
|
||||
</style>
|
||||
<script>
|
||||
setInterval(() => {
|
||||
location.reload();
|
||||
}, 30000); // Refresh every 5 seconds
|
||||
</script>
|
||||
</head>
|
||||
<body>
|
||||
{% if image %}
|
||||
<img src="{{ url_for('images', filename=image) }}" alt="Latest Image">
|
||||
{% else %}
|
||||
<p style="color: white;">No images found</p>
|
||||
{% endif %}
|
||||
</body>
|
||||
</html>
|
11
user_config.cfg.sample
Normal file
11
user_config.cfg.sample
Normal file
@ -0,0 +1,11 @@
|
||||
[comfyui]
|
||||
comfyui_url = http://comfyui
|
||||
model = zavychromaxl_v100.safetensors
|
||||
output_dir = ./output/
|
||||
prompt = "Be explicit, only return the prompt and no other text, Generate a random detailed prompt for stable diffusion."
|
||||
width = 1568
|
||||
height = 672
|
||||
|
||||
[openwebui]
|
||||
base_url = https://openwebui
|
||||
api_key = sk-
|
107
workflow_api.json
Normal file
107
workflow_api.json
Normal file
@ -0,0 +1,107 @@
|
||||
{
|
||||
"3": {
|
||||
"inputs": {
|
||||
"seed": 676047523401976,
|
||||
"steps": 30,
|
||||
"cfg": 6,
|
||||
"sampler_name": "dpmpp_3m_sde",
|
||||
"scheduler": "karras",
|
||||
"denoise": 1,
|
||||
"model": [
|
||||
"4",
|
||||
0
|
||||
],
|
||||
"positive": [
|
||||
"6",
|
||||
0
|
||||
],
|
||||
"negative": [
|
||||
"7",
|
||||
0
|
||||
],
|
||||
"latent_image": [
|
||||
"5",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "KSampler",
|
||||
"_meta": {
|
||||
"title": "KSampler"
|
||||
}
|
||||
},
|
||||
"4": {
|
||||
"inputs": {
|
||||
"ckpt_name": "zavychromaxl_v80.safetensors"
|
||||
},
|
||||
"class_type": "CheckpointLoaderSimple",
|
||||
"_meta": {
|
||||
"title": "Load Checkpoint"
|
||||
}
|
||||
},
|
||||
"5": {
|
||||
"inputs": {
|
||||
"width": 1568,
|
||||
"height": 672,
|
||||
"batch_size": 1
|
||||
},
|
||||
"class_type": "EmptyLatentImage",
|
||||
"_meta": {
|
||||
"title": "Empty Latent Image"
|
||||
}
|
||||
},
|
||||
"6": {
|
||||
"inputs": {
|
||||
"text": "A bustling cyberpunk street at night, filled with neon signs, rain-soaked pavement, and futuristic street vendors. High detail, vivid neon colors, and realistic reflections.",
|
||||
"clip": [
|
||||
"4",
|
||||
1
|
||||
]
|
||||
},
|
||||
"class_type": "CLIPTextEncode",
|
||||
"_meta": {
|
||||
"title": "CLIP Text Encode (Prompt)"
|
||||
}
|
||||
},
|
||||
"7": {
|
||||
"inputs": {
|
||||
"text": "text, watermark, deformed Avoid flat colors, poor lighting, and artificial elements. No unrealistic elements, low resolution, or flat colors. Avoid generic objects, poor lighting, and inconsistent styles.",
|
||||
"clip": [
|
||||
"4",
|
||||
1
|
||||
]
|
||||
},
|
||||
"class_type": "CLIPTextEncode",
|
||||
"_meta": {
|
||||
"title": "CLIP Text Encode (Prompt)"
|
||||
}
|
||||
},
|
||||
"8": {
|
||||
"inputs": {
|
||||
"samples": [
|
||||
"3",
|
||||
0
|
||||
],
|
||||
"vae": [
|
||||
"4",
|
||||
2
|
||||
]
|
||||
},
|
||||
"class_type": "VAEDecode",
|
||||
"_meta": {
|
||||
"title": "VAE Decode"
|
||||
}
|
||||
},
|
||||
"9": {
|
||||
"inputs": {
|
||||
"filename_prefix": "ComfyUI",
|
||||
"images": [
|
||||
"8",
|
||||
0
|
||||
]
|
||||
},
|
||||
"class_type": "SaveImage",
|
||||
"_meta": {
|
||||
"title": "Save Image"
|
||||
}
|
||||
}
|
||||
}
|
Loading…
x
Reference in New Issue
Block a user