From e2acd2dcd6b437e570050dac671759ea5cbc92a9 Mon Sep 17 00:00:00 2001 From: Karl Date: Tue, 29 Jul 2025 14:25:13 +0100 Subject: [PATCH] openweb ui client rather than litellm and ollama --- libs/comfyui.py | 2 +- libs/generic.py | 2 +- libs/{ollama.py => openwebui.py} | 77 ++++++++++++++++--------------- requirements.txt | Bin 234 -> 280 bytes routes/create_routes.py | 4 +- 5 files changed, 45 insertions(+), 40 deletions(-) rename libs/{ollama.py => openwebui.py} (58%) diff --git a/libs/comfyui.py b/libs/comfyui.py index 4c0e0b6..161a669 100644 --- a/libs/comfyui.py +++ b/libs/comfyui.py @@ -15,7 +15,7 @@ from tenacity import ( import nest_asyncio from libs.generic import rename_image, load_config, save_prompt from libs.create_thumbnail import generate_thumbnail -from libs.ollama import create_prompt_on_openwebui +from libs.openwebui import create_prompt_on_openwebui nest_asyncio.apply() logging.basicConfig(level=logging.INFO) diff --git a/libs/generic.py b/libs/generic.py index ae6af92..e9dd2cf 100644 --- a/libs/generic.py +++ b/libs/generic.py @@ -170,7 +170,7 @@ def create_prompt_with_random_model(base_prompt: str, topic: str = "random"): if service == "openwebui": # Import here to avoid circular imports - from libs.ollama import create_prompt_on_openwebui + from libs.openwebui import create_prompt_on_openwebui return create_prompt_on_openwebui(base_prompt, topic) elif service == "openrouter": # Import here to avoid circular imports diff --git a/libs/ollama.py b/libs/openwebui.py similarity index 58% rename from libs/ollama.py rename to libs/openwebui.py index 2aeb967..d1d52b9 100644 --- a/libs/ollama.py +++ b/libs/openwebui.py @@ -1,9 +1,11 @@ import random import logging -import litellm import nest_asyncio from libs.generic import load_recent_prompts, load_config import re +from openwebui_chat_client import OpenWebUIClient +from datetime import datetime + nest_asyncio.apply() logging.basicConfig(level=logging.INFO) @@ -33,53 +35,56 @@ def create_prompt_on_openwebui(prompt: str, topic: str = "random", model: str = topic_instruction = f" Incorporate the theme of '{selected_topic}' into the new prompt." user_content = ( - "Can you generate me a really random image idea, Do not exceed 10 words. Use clear language, not poetic metaphors.”" + "Can you generate me a really random image idea, Do not exceed 10 words. Use clear language, not poetic metaphors." + topic_instruction + "Avoid prompts similar to the following:" + "\n".join(f"{i+1}. {p}" for i, p in enumerate(recent_prompts)) ) - if model: # Use the specified model model = model else: # Select a random model - model = random.choice(user_config["openwebui"]["models"].split(",")) - response = litellm.completion( - api_base=user_config["openwebui"]["base_url"], - model="openai/" + model, - messages=[ - { - "role": "system", - "content": ( - "You are a prompt generator for Stable Diffusion. " - "Generate a detailed and imaginative prompt with a strong visual theme. " - "Focus on lighting, atmosphere, and artistic style. " - "Keep the prompt concise, no extra commentary or formatting." - ), - }, - { - "role": "user", - "content": user_content, - }, - ], - api_key=user_config["openwebui"]["api_key"], + model = random.choice(user_config["openwebui"]["models"].split(",")).strip() + + # Create OpenWebUI client + client = OpenWebUIClient( + base_url=user_config["openwebui"]["base_url"], + token=user_config["openwebui"]["api_key"], + default_model_id=model ) - prompt = response["choices"][0]["message"]["content"].strip('"') - # response = litellm.completion( - # api_base=user_config["openwebui"]["base_url"], - # model="openai/brxce/stable-diffusion-prompt-generator:latest", - # messages=[ - # { - # "role": "user", - # "content": prompt, - # }, - # ], - # api_key=user_config["openwebui"]["api_key"], - # ) - # prompt = response["choices"][0]["message"]["content"].strip('"') + # Prepare messages for the chat + messages = [ + { + "role": "system", + "content": ( + "You are a prompt generator for Stable Diffusion. " + "Generate a detailed and imaginative prompt with a strong visual theme. " + "Focus on lighting, atmosphere, and artistic style. " + "Keep the prompt concise, no extra commentary or formatting." + ), + }, + { + "role": "user", + "content": user_content, + }, + ] + + # Send the chat request + result = client.chat( + question=user_content, + chat_title=datetime.now().strftime("%Y-%m-%d %H:%M"), + folder_name="Ai Image Requests" + ) + + if result: + prompt = result["response"].strip('"') + else: + # Fallback if the request fails + prompt = "A vibrant landscape" + match = re.search(r'"([^"]+)"', prompt) if not match: match = re.search(r":\s*\n*\s*(.+)", prompt) diff --git a/requirements.txt b/requirements.txt index 8d8f8f8db2e4dd30b02dfd09b6f998fe3ebd3d6d..d3dea6df6930b34c259ad18e38c54ece4c6c7ae1 100644 GIT binary patch delta 53 zcmaFGID=`zD?MHYE{1%D0)|wEJce>0nZ!`akjbFSkj#+5kjPL1X5|1yK=LIF09%#| Ai~s-t delta 6 NcmbQi^onu9D*y>q0{{R3 diff --git a/routes/create_routes.py b/routes/create_routes.py index 0fdd02f..79a96e8 100644 --- a/routes/create_routes.py +++ b/routes/create_routes.py @@ -1,7 +1,7 @@ from flask import Blueprint, request, render_template, redirect, url_for, session import threading from libs.comfyui import create_image, select_model, get_available_models -from libs.ollama import create_prompt_on_openwebui +from libs.openwebui import create_prompt_on_openwebui from libs.generic import load_models_from_config, load_topics_from_config, load_openrouter_models_from_config, load_openwebui_models_from_config, create_prompt_with_random_model import os @@ -23,7 +23,7 @@ def create(): # Use the specified prompt model service, service_model = prompt_model.split(":", 1) if ":" in prompt_model else (prompt_model, "") if service == "openwebui": - from libs.ollama import create_prompt_on_openwebui + from libs.openwebui import create_prompt_on_openwebui prompt = create_prompt_on_openwebui(user_config["comfyui"]["prompt"], topic, service_model) elif service == "openrouter": from libs.openrouter import create_prompt_on_openrouter