mirror of
https://github.com/karl0ss/ai_image_frame_server.git
synced 2025-11-13 05:19:42 +00:00
Compare commits
No commits in common. "main" and "0.5.2" have entirely different histories.
@ -1,5 +1,5 @@
|
|||||||
[tool.bumpversion]
|
[tool.bumpversion]
|
||||||
current_version = "0.5.6"
|
current_version = "0.5.2"
|
||||||
parse = "(?P<major>\\d+)\\.(?P<minor>\\d+)\\.(?P<patch>\\d+)"
|
parse = "(?P<major>\\d+)\\.(?P<minor>\\d+)\\.(?P<patch>\\d+)"
|
||||||
serialize = ["{major}.{minor}.{patch}"]
|
serialize = ["{major}.{minor}.{patch}"]
|
||||||
replace = "{new_version}"
|
replace = "{new_version}"
|
||||||
|
|||||||
@ -4,7 +4,7 @@ FROM python:3.11-slim
|
|||||||
# Set the working directory in the container
|
# Set the working directory in the container
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
# Set version label
|
# Set version label
|
||||||
ARG VERSION="0.5.6"
|
ARG VERSION="0.5.2"
|
||||||
LABEL version=$VERSION
|
LABEL version=$VERSION
|
||||||
|
|
||||||
# Copy project files into the container
|
# Copy project files into the container
|
||||||
|
|||||||
36
README.md
36
README.md
@ -4,19 +4,12 @@ This project is a Flask-based web server designed to generate and display images
|
|||||||
|
|
||||||
## Features
|
## Features
|
||||||
|
|
||||||
* **Web Interface:** A simple web interface to view generated images, manage favourites, and monitor job queues.
|
* **Web Interface:** A simple web interface to view generated images.
|
||||||
* **Image Generation:** Integrates with ComfyUI to generate images using SDXL, FLUX, and Qwen models based on given prompts.
|
* **Image Generation:** Integrates with ComfyUI to generate images based on given prompts and models.
|
||||||
* **Prompt Generation:** Automatic prompt generation using OpenWebUI or OpenRouter APIs with topic-based theming.
|
|
||||||
* **Scheduled Generation:** Automatically generates new images at a configurable time.
|
* **Scheduled Generation:** Automatically generates new images at a configurable time.
|
||||||
* **Favourites System:** Mark and manage favourite images.
|
|
||||||
* **Job Queue Management:** View and cancel running/pending image generation jobs.
|
|
||||||
* **Thumbnail Generation:** Automatic thumbnail creation for generated images.
|
|
||||||
* **Prompt Logging:** Maintains a log of recent prompts to avoid repetition.
|
|
||||||
* **Settings Management:** Web-based configuration editor for all settings.
|
|
||||||
* **Docker Support:** Comes with a `Dockerfile` and `docker-compose.yml` for easy setup and deployment.
|
* **Docker Support:** Comes with a `Dockerfile` and `docker-compose.yml` for easy setup and deployment.
|
||||||
* **Configurable:** Most options can be configured through a `user_config.cfg` file or web interface.
|
* **Configurable:** Most options can be configured through a `user_config.cfg` file.
|
||||||
* **Authentication:** Optional password protection for image creation.
|
* **Authentication:** Optional password protection for image creation.
|
||||||
* **Version Management:** Uses bump-my-version for version tracking.
|
|
||||||
|
|
||||||
## Prerequisites
|
## Prerequisites
|
||||||
|
|
||||||
@ -40,8 +33,8 @@ This project is a Flask-based web server designed to generate and display images
|
|||||||
```
|
```
|
||||||
|
|
||||||
3. **Configure the application:**
|
3. **Configure the application:**
|
||||||
* The `user_config.cfg` file will be automatically created from `user_config.cfg.sample` on first run if it doesn't exist.
|
* Copy the `user_config.cfg.sample` to `user_config.cfg`.
|
||||||
* Edit `user_config.cfg` with your settings, or use the web-based settings page accessible by clicking the version number in the bottom right corner of the home page. See the [Configuration](#configuration) section for more details.
|
* Edit `user_config.cfg` with your settings. See the [Configuration](#configuration) section for more details.
|
||||||
|
|
||||||
4. **Run the application:**
|
4. **Run the application:**
|
||||||
```bash
|
```bash
|
||||||
@ -58,8 +51,8 @@ This project is a Flask-based web server designed to generate and display images
|
|||||||
```
|
```
|
||||||
|
|
||||||
2. **Configure the application:**
|
2. **Configure the application:**
|
||||||
* The `user_config.cfg` file will be automatically created from `user_config.cfg.sample` on first run if it doesn't exist.
|
* Copy the `user_config.cfg.sample` to `user_config.cfg`.
|
||||||
* Edit `user_config.cfg` with your settings, or use the web-based settings page accessible by clicking the version number in the bottom right corner of any page. The `comfyui_url` should be the address of your ComfyUI instance, accessible from within the Docker network (e.g., `http://host.docker.internal:8188` or your server's IP).
|
* Edit `user_config.cfg` with your settings. The `comfyui_url` should be the address of your ComfyUI instance, accessible from within the Docker network (e.g., `http://host.docker.internal:8188` or your server's IP).
|
||||||
|
|
||||||
3. **Build and run with Docker Compose:**
|
3. **Build and run with Docker Compose:**
|
||||||
```bash
|
```bash
|
||||||
@ -89,6 +82,7 @@ The application is configured via the `user_config.cfg` file.
|
|||||||
| `[comfyui]` | `secondary_topic` | A secondary topic for prompt generation. | |
|
| `[comfyui]` | `secondary_topic` | A secondary topic for prompt generation. | |
|
||||||
| `[comfyui]` | `flux` | Enable FLUX models (`True`/`False`). | `False` |
|
| `[comfyui]` | `flux` | Enable FLUX models (`True`/`False`). | `False` |
|
||||||
| `[comfyui]` | `qwen` | Enable Qwen models (`True`/`False`). | `False` |
|
| `[comfyui]` | `qwen` | Enable Qwen models (`True`/`False`). | `False` |
|
||||||
|
| `[comfyui]` | `only_flux` | Only use FLUX models (`True`/`False`). | `False` |
|
||||||
| `[comfyui:flux]` | `models` | A comma-separated list of FLUX models. | `flux1-dev-Q4_0.gguf,flux1-schnell-Q4_0.gguf` |
|
| `[comfyui:flux]` | `models` | A comma-separated list of FLUX models. | `flux1-dev-Q4_0.gguf,flux1-schnell-Q4_0.gguf` |
|
||||||
| `[comfyui:qwen]` | `models` | A comma-separated list of Qwen models. | `qwen-image-Q4_K_S.gguf, qwen-image-Q2_K.gguf` |
|
| `[comfyui:qwen]` | `models` | A comma-separated list of Qwen models. | `qwen-image-Q4_K_S.gguf, qwen-image-Q2_K.gguf` |
|
||||||
| `[openwebui]` | `base_url` | The base URL for OpenWebUI. | `https://openwebui` |
|
| `[openwebui]` | `base_url` | The base URL for OpenWebUI. | `https://openwebui` |
|
||||||
@ -102,24 +96,14 @@ The application is configured via the `user_config.cfg` file.
|
|||||||
## Usage
|
## Usage
|
||||||
|
|
||||||
* **Gallery:** Open your browser to `http://<server_ip>:<port>` to see the gallery of generated images.
|
* **Gallery:** Open your browser to `http://<server_ip>:<port>` to see the gallery of generated images.
|
||||||
* **Create Image:** Navigate to `/create` or `/create_image` to manually trigger image generation with various model options.
|
* **Create Image:** Navigate to `/create` to manually trigger image generation.
|
||||||
* **Job Queue:** Monitor and cancel running/pending jobs via the gallery interface.
|
|
||||||
* **API Endpoints:**
|
|
||||||
* `/api/queue` - Get current job queue details (JSON)
|
|
||||||
* `/cancel` - Cancel the current running job
|
|
||||||
|
|
||||||
## Dependencies
|
## Dependencies
|
||||||
|
|
||||||
* Flask
|
* Flask
|
||||||
* comfy_api_simplified
|
* comfy_api_simplified
|
||||||
* APScheduler
|
* APScheduler
|
||||||
* Pillow
|
* Pillow
|
||||||
* tenacity
|
|
||||||
* nest_asyncio
|
|
||||||
* openai
|
|
||||||
* websockets
|
|
||||||
* bump-my-version
|
|
||||||
* openwebui-chat-client
|
|
||||||
* And others, see `requirements.txt`.
|
* And others, see `requirements.txt`.
|
||||||
|
|
||||||
## Contributing
|
## Contributing
|
||||||
|
|||||||
@ -5,7 +5,6 @@ import sys
|
|||||||
import time
|
import time
|
||||||
import os
|
import os
|
||||||
import random
|
import random
|
||||||
import shutil
|
|
||||||
from PIL import Image
|
from PIL import Image
|
||||||
import nest_asyncio
|
import nest_asyncio
|
||||||
import json
|
import json
|
||||||
@ -39,21 +38,10 @@ def save_prompt(prompt):
|
|||||||
|
|
||||||
|
|
||||||
def load_config() -> configparser.ConfigParser:
|
def load_config() -> configparser.ConfigParser:
|
||||||
"""Loads user configuration from ./user_config.cfg. If it doesn't exist, copies from user_config.cfg.sample."""
|
"""Loads user configuration from ./user_config.cfg."""
|
||||||
user_config = configparser.ConfigParser()
|
user_config = configparser.ConfigParser()
|
||||||
config_path = "./user_config.cfg"
|
|
||||||
sample_path = "./user_config.cfg.sample"
|
|
||||||
|
|
||||||
if not os.path.exists(config_path):
|
|
||||||
if os.path.exists(sample_path):
|
|
||||||
shutil.copy(sample_path, config_path)
|
|
||||||
logging.info("Configuration file copied from sample.")
|
|
||||||
else:
|
|
||||||
logging.error("Neither user_config.cfg nor user_config.cfg.sample found.")
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
user_config.read(config_path)
|
user_config.read("./user_config.cfg")
|
||||||
logging.debug("Configuration loaded successfully.")
|
logging.debug("Configuration loaded successfully.")
|
||||||
return user_config
|
return user_config
|
||||||
except KeyError as e:
|
except KeyError as e:
|
||||||
@ -199,63 +187,24 @@ def load_prompt_models_from_config():
|
|||||||
return prompt_models
|
return prompt_models
|
||||||
|
|
||||||
|
|
||||||
def build_user_content(topic: str = "random") -> str:
|
|
||||||
"""Build the user content string for prompt generation, including topic instructions and recent prompts avoidance."""
|
|
||||||
config = load_config()
|
|
||||||
topic_instruction = ""
|
|
||||||
selected_topic = ""
|
|
||||||
secondary_topic_instruction = ""
|
|
||||||
# Unique list of recent prompts
|
|
||||||
recent_prompts = list(set(load_recent_prompts()))
|
|
||||||
|
|
||||||
if topic == "random":
|
|
||||||
topics = [t.strip() for t in config["comfyui"]["topics"].split(",") if t.strip()]
|
|
||||||
selected_topic = random.choice(topics) if topics else ""
|
|
||||||
elif topic != "":
|
|
||||||
selected_topic = topic
|
|
||||||
else:
|
|
||||||
# Decide on whether to include a topic (e.g., 30% chance to include)
|
|
||||||
topics = [t.strip() for t in config["comfyui"]["topics"].split(",") if t.strip()]
|
|
||||||
if random.random() < 0.3 and topics:
|
|
||||||
selected_topic = random.choice(topics)
|
|
||||||
|
|
||||||
if selected_topic != "":
|
|
||||||
topic_instruction = f" Incorporate the theme of '{selected_topic}' into the new prompt."
|
|
||||||
|
|
||||||
# Add secondary topic if configured and not empty
|
|
||||||
secondary_topic = config["comfyui"].get("secondary_topic", "").strip()
|
|
||||||
if secondary_topic:
|
|
||||||
secondary_topic_instruction = f" Additionally incorporate the theme of '{secondary_topic}' into the new prompt."
|
|
||||||
|
|
||||||
user_content = (
|
|
||||||
"Can you generate me a really random image idea, Do not exceed 20 words. Use clear language, not poetic metaphors."
|
|
||||||
+ topic_instruction
|
|
||||||
+ secondary_topic_instruction
|
|
||||||
+ "Avoid prompts similar to the following:"
|
|
||||||
+ "\n".join(f"{i+1}. {p}" for i, p in enumerate(recent_prompts))
|
|
||||||
)
|
|
||||||
|
|
||||||
return user_content
|
|
||||||
|
|
||||||
|
|
||||||
def create_prompt_with_random_model(base_prompt: str, topic: str = "random"):
|
def create_prompt_with_random_model(base_prompt: str, topic: str = "random"):
|
||||||
"""Create a prompt using a randomly selected model from OpenWebUI or OpenRouter.
|
"""Create a prompt using a randomly selected model from OpenWebUI or OpenRouter.
|
||||||
|
|
||||||
If OpenWebUI fails, it will retry once. If it fails again, it will fallback to OpenRouter.
|
If OpenWebUI fails, it will retry once. If it fails again, it will fallback to OpenRouter.
|
||||||
"""
|
"""
|
||||||
prompt_models = load_prompt_models_from_config()
|
prompt_models = load_prompt_models_from_config()
|
||||||
|
|
||||||
if not prompt_models:
|
if not prompt_models:
|
||||||
logging.warning("No prompt generation models configured.")
|
logging.warning("No prompt generation models configured.")
|
||||||
return None
|
return None
|
||||||
|
|
||||||
# Randomly select a model
|
# Randomly select a model
|
||||||
service, model = random.choice(prompt_models)
|
service, model = random.choice(prompt_models)
|
||||||
|
|
||||||
# Import here to avoid circular imports
|
# Import here to avoid circular imports
|
||||||
from libs.openwebui import create_prompt_on_openwebui
|
from libs.openwebui import create_prompt_on_openwebui
|
||||||
from libs.openrouter import create_prompt_on_openrouter
|
from libs.openrouter import create_prompt_on_openrouter
|
||||||
|
|
||||||
if service == "openwebui":
|
if service == "openwebui":
|
||||||
try:
|
try:
|
||||||
# First attempt with OpenWebUI
|
# First attempt with OpenWebUI
|
||||||
@ -263,13 +212,13 @@ def create_prompt_with_random_model(base_prompt: str, topic: str = "random"):
|
|||||||
result = create_prompt_on_openwebui(base_prompt, topic, model)
|
result = create_prompt_on_openwebui(base_prompt, topic, model)
|
||||||
if result:
|
if result:
|
||||||
return result
|
return result
|
||||||
|
|
||||||
# If first attempt returns None, try again
|
# If first attempt returns None, try again
|
||||||
logging.warning("First OpenWebUI attempt failed. Retrying...")
|
logging.warning("First OpenWebUI attempt failed. Retrying...")
|
||||||
result = create_prompt_on_openwebui(base_prompt, topic, model)
|
result = create_prompt_on_openwebui(base_prompt, topic, model)
|
||||||
if result:
|
if result:
|
||||||
return result
|
return result
|
||||||
|
|
||||||
# If second attempt fails, fallback to OpenRouter
|
# If second attempt fails, fallback to OpenRouter
|
||||||
logging.warning("Second OpenWebUI attempt failed. Falling back to OpenRouter...")
|
logging.warning("Second OpenWebUI attempt failed. Falling back to OpenRouter...")
|
||||||
openrouter_models = [m for m in prompt_models if m[0] == "openrouter"]
|
openrouter_models = [m for m in prompt_models if m[0] == "openrouter"]
|
||||||
@ -279,7 +228,7 @@ def create_prompt_with_random_model(base_prompt: str, topic: str = "random"):
|
|||||||
else:
|
else:
|
||||||
logging.error("No OpenRouter models configured for fallback.")
|
logging.error("No OpenRouter models configured for fallback.")
|
||||||
return "A colorful abstract composition" # Default fallback prompt
|
return "A colorful abstract composition" # Default fallback prompt
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logging.error(f"Error with OpenWebUI: {e}")
|
logging.error(f"Error with OpenWebUI: {e}")
|
||||||
# Fallback to OpenRouter on exception
|
# Fallback to OpenRouter on exception
|
||||||
@ -295,7 +244,7 @@ def create_prompt_with_random_model(base_prompt: str, topic: str = "random"):
|
|||||||
else:
|
else:
|
||||||
logging.error("No OpenRouter models configured for fallback.")
|
logging.error("No OpenRouter models configured for fallback.")
|
||||||
return "A colorful abstract composition" # Default fallback prompt
|
return "A colorful abstract composition" # Default fallback prompt
|
||||||
|
|
||||||
elif service == "openrouter":
|
elif service == "openrouter":
|
||||||
try:
|
try:
|
||||||
# Use OpenRouter
|
# Use OpenRouter
|
||||||
@ -303,7 +252,7 @@ def create_prompt_with_random_model(base_prompt: str, topic: str = "random"):
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
logging.error(f"Error with OpenRouter: {e}")
|
logging.error(f"Error with OpenRouter: {e}")
|
||||||
return "A colorful abstract composition" # Default fallback prompt
|
return "A colorful abstract composition" # Default fallback prompt
|
||||||
|
|
||||||
|
|
||||||
user_config = load_config()
|
user_config = load_config()
|
||||||
output_folder = user_config["comfyui"]["output_dir"]
|
output_folder = user_config["comfyui"]["output_dir"]
|
||||||
@ -2,7 +2,7 @@ import random
|
|||||||
import logging
|
import logging
|
||||||
from openai import OpenAI, RateLimitError
|
from openai import OpenAI, RateLimitError
|
||||||
import nest_asyncio
|
import nest_asyncio
|
||||||
from libs.generic import load_recent_prompts, load_config, build_user_content
|
from libs.generic import load_recent_prompts, load_config
|
||||||
from libs.openwebui import create_prompt_on_openwebui
|
from libs.openwebui import create_prompt_on_openwebui
|
||||||
import re
|
import re
|
||||||
nest_asyncio.apply()
|
nest_asyncio.apply()
|
||||||
@ -40,7 +40,36 @@ def create_prompt_on_openrouter(prompt: str, topic: str = "random", model: str =
|
|||||||
logging.warning("OpenRouter is not enabled in the configuration.")
|
logging.warning("OpenRouter is not enabled in the configuration.")
|
||||||
return ""
|
return ""
|
||||||
|
|
||||||
user_content = build_user_content(topic)
|
topic_instruction = ""
|
||||||
|
selected_topic = ""
|
||||||
|
secondary_topic_instruction = ""
|
||||||
|
# Unique list of recent prompts
|
||||||
|
recent_prompts = list(set(load_recent_prompts()))
|
||||||
|
if topic == "random":
|
||||||
|
topics = [t.strip() for t in config["comfyui"]["topics"].split(",") if t.strip()]
|
||||||
|
selected_topic = random.choice(topics) if topics else ""
|
||||||
|
elif topic != "":
|
||||||
|
selected_topic = topic
|
||||||
|
else:
|
||||||
|
# Decide on whether to include a topic (e.g., 30% chance to include)
|
||||||
|
topics = [t.strip() for t in config["comfyui"]["topics"].split(",") if t.strip()]
|
||||||
|
if random.random() < 0.3 and topics:
|
||||||
|
selected_topic = random.choice(topics)
|
||||||
|
if selected_topic != "":
|
||||||
|
topic_instruction = f" Incorporate the theme of '{selected_topic}' into the new prompt."
|
||||||
|
|
||||||
|
# Add secondary topic if configured and not empty
|
||||||
|
secondary_topic = config["comfyui"].get("secondary_topic", "").strip()
|
||||||
|
if secondary_topic:
|
||||||
|
secondary_topic_instruction = f" Additionally incorporate the theme of '{secondary_topic}' into the new prompt, in the style of."
|
||||||
|
|
||||||
|
user_content = (
|
||||||
|
"Can you generate me a really random image idea, Do not exceed 20 words. Use clear language, not poetic metaphors."
|
||||||
|
+ topic_instruction
|
||||||
|
+ secondary_topic_instruction
|
||||||
|
+ "Avoid prompts similar to the following:"
|
||||||
|
+ "\n".join(f"{i+1}. {p}" for i, p in enumerate(recent_prompts))
|
||||||
|
)
|
||||||
|
|
||||||
# Load configured models
|
# Load configured models
|
||||||
configured_models = [m.strip() for m in user_config["openrouter"]["models"].split(",") if m.strip()]
|
configured_models = [m.strip() for m in user_config["openrouter"]["models"].split(",") if m.strip()]
|
||||||
|
|||||||
@ -1,7 +1,7 @@
|
|||||||
import random
|
import random
|
||||||
import logging
|
import logging
|
||||||
import nest_asyncio
|
import nest_asyncio
|
||||||
from libs.generic import load_recent_prompts, load_config, build_user_content
|
from libs.generic import load_recent_prompts, load_config
|
||||||
import re
|
import re
|
||||||
from openwebui_chat_client import OpenWebUIClient
|
from openwebui_chat_client import OpenWebUIClient
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
@ -19,7 +19,36 @@ def create_prompt_on_openwebui(prompt: str, topic: str = "random", model: str =
|
|||||||
"""Sends prompt to OpenWebui and returns the generated response."""
|
"""Sends prompt to OpenWebui and returns the generated response."""
|
||||||
# Reload config to get latest values
|
# Reload config to get latest values
|
||||||
config = load_config()
|
config = load_config()
|
||||||
user_content = build_user_content(topic)
|
topic_instruction = ""
|
||||||
|
selected_topic = ""
|
||||||
|
secondary_topic_instruction = ""
|
||||||
|
# Unique list of recent prompts
|
||||||
|
recent_prompts = list(set(load_recent_prompts()))
|
||||||
|
if topic == "random":
|
||||||
|
topics = [t.strip() for t in config["comfyui"]["topics"].split(",") if t.strip()]
|
||||||
|
selected_topic = random.choice(topics)
|
||||||
|
elif topic != "":
|
||||||
|
selected_topic = topic
|
||||||
|
else:
|
||||||
|
# Decide on whether to include a topic (e.g., 30% chance to include)
|
||||||
|
topics = [t.strip() for t in config["comfyui"]["topics"].split(",") if t.strip()]
|
||||||
|
if random.random() < 0.3 and topics:
|
||||||
|
selected_topic = random.choice(topics)
|
||||||
|
if selected_topic != "":
|
||||||
|
topic_instruction = f" Incorporate the theme of '{selected_topic}' into the new prompt."
|
||||||
|
|
||||||
|
# Add secondary topic if configured and not empty
|
||||||
|
secondary_topic = config["comfyui"].get("secondary_topic", "").strip()
|
||||||
|
if secondary_topic:
|
||||||
|
secondary_topic_instruction = f" Additionally incorporate the theme of '{secondary_topic}' into the new prompt, in the style of."
|
||||||
|
|
||||||
|
user_content = (
|
||||||
|
"Can you generate me a really random image idea, Do not exceed 10 words. Use clear language, not poetic metaphors."
|
||||||
|
+ topic_instruction
|
||||||
|
+ secondary_topic_instruction
|
||||||
|
+ "Avoid prompts similar to the following:"
|
||||||
|
+ "\n".join(f"{i+1}. {p}" for i, p in enumerate(recent_prompts))
|
||||||
|
)
|
||||||
|
|
||||||
if model:
|
if model:
|
||||||
# Use the specified model
|
# Use the specified model
|
||||||
|
|||||||
@ -167,7 +167,7 @@
|
|||||||
"38": {
|
"38": {
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"unet_name": "flux1-dev-Q4_0.gguf",
|
"unet_name": "flux1-dev-Q4_0.gguf",
|
||||||
"device": "cuda:0",
|
"device": "cuda:1",
|
||||||
"virtual_vram_gb": 0,
|
"virtual_vram_gb": 0,
|
||||||
"use_other_vram": true,
|
"use_other_vram": true,
|
||||||
"expert_mode_allocations": ""
|
"expert_mode_allocations": ""
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user