support multiple models and random selection

This commit is contained in:
Karl Hudgell 2025-04-12 10:17:24 +01:00
parent e4428b4692
commit 6fdfb51a49
2 changed files with 6 additions and 4 deletions

3
lib.py
View File

@ -70,9 +70,10 @@ def rename_image() -> str | None:
def create_prompt_on_openwebui(prompt: str) -> str:
"""Sends prompt to OpenWebui and returns the generated response."""
model = random.choice(user_config["openwebui"]["models"].split(","))
response = litellm.completion(
api_base=user_config["openwebui"]["base_url"],
model="openai/" + user_config["openwebui"]["model"],
model="openai/" + model,
messages=[
{
"role": "system",

View File

@ -6,12 +6,13 @@ port = 5000
[comfyui]
comfyui_url = http://comfyui
models = zavychromaxl_v100.safetensors
models = zavychromaxl_v100.safetensors,ponyDiffusionV6XL_v6StartWithThisOne.safetensors
output_dir = ./output/
prompt = "Be explicit, only return the prompt and no other text, Generate a random detailed prompt for stable diffusion."
prompt = "Generate a random detailed prompt for stable diffusion."
width = 1568
height = 672
[openwebui]
base_url = https://openwebui
api_key = sk-
api_key = sk-
models = llama3:latest,cogito:14b,gemma3:12b