mirror of
https://github.com/karl0ss/ai_image_frame_server.git
synced 2025-08-12 20:58:28 +01:00
initial qwen support
This commit is contained in:
parent
1468ac4bbe
commit
14e69f7608
@ -122,6 +122,7 @@ def generate_image(
|
|||||||
def select_model(model: str) -> tuple[str, str]:
|
def select_model(model: str) -> tuple[str, str]:
|
||||||
use_flux = json.loads(user_config["comfyui"].get("FLUX", "false").lower())
|
use_flux = json.loads(user_config["comfyui"].get("FLUX", "false").lower())
|
||||||
only_flux = json.loads(user_config["comfyui"].get("ONLY_FLUX", "false").lower())
|
only_flux = json.loads(user_config["comfyui"].get("ONLY_FLUX", "false").lower())
|
||||||
|
use_qwen = json.loads(user_config["comfyui"].get("Qwen", "false").lower())
|
||||||
|
|
||||||
if model == "Random Image Model":
|
if model == "Random Image Model":
|
||||||
selected_workflow = "FLUX" if (use_flux and (only_flux or random.choice([True, False]))) else "SDXL"
|
selected_workflow = "FLUX" if (use_flux and (only_flux or random.choice([True, False]))) else "SDXL"
|
||||||
@ -133,6 +134,8 @@ def select_model(model: str) -> tuple[str, str]:
|
|||||||
if model == "Random Image Model":
|
if model == "Random Image Model":
|
||||||
if selected_workflow == "FLUX":
|
if selected_workflow == "FLUX":
|
||||||
valid_models = user_config["comfyui:flux"]["models"].split(",")
|
valid_models = user_config["comfyui:flux"]["models"].split(",")
|
||||||
|
elif selected_workflow == "Qwen":
|
||||||
|
valid_models = user_config["comfyui:qwen"]["models"].split(",")
|
||||||
else: # SDXL
|
else: # SDXL
|
||||||
available_model_list = user_config["comfyui"]["models"].split(",")
|
available_model_list = user_config["comfyui"]["models"].split(",")
|
||||||
valid_models = list(set(get_available_models()) & set(available_model_list))
|
valid_models = list(set(get_available_models()) & set(available_model_list))
|
||||||
@ -173,6 +176,20 @@ def create_image(prompt: str | None = None, model: str = "Random Image Model") -
|
|||||||
model_param="unet_name",
|
model_param="unet_name",
|
||||||
model=model
|
model=model
|
||||||
)
|
)
|
||||||
|
elif selected_workflow == "Qwen":
|
||||||
|
generate_image(
|
||||||
|
file_name="image",
|
||||||
|
comfy_prompt=prompt,
|
||||||
|
workflow_path="./workflow_qwen.json",
|
||||||
|
prompt_node="Positive",
|
||||||
|
seed_node="KSampler",
|
||||||
|
seed_param="seed",
|
||||||
|
save_node="Save Image",
|
||||||
|
save_param="filename_prefix",
|
||||||
|
model_node="Load Checkpoint",
|
||||||
|
model_param="ckpt_name",
|
||||||
|
model=model
|
||||||
|
)
|
||||||
else: # SDXL
|
else: # SDXL
|
||||||
generate_image("image", comfy_prompt=prompt, model=model)
|
generate_image("image", comfy_prompt=prompt, model=model)
|
||||||
|
|
||||||
|
@ -115,9 +115,11 @@ def get_current_version():
|
|||||||
def load_models_from_config():
|
def load_models_from_config():
|
||||||
flux_models = load_config()["comfyui:flux"]["models"].split(",")
|
flux_models = load_config()["comfyui:flux"]["models"].split(",")
|
||||||
sdxl_models = load_config()["comfyui"]["models"].split(",")
|
sdxl_models = load_config()["comfyui"]["models"].split(",")
|
||||||
|
qwen_models = load_config()["comfyui:qwen"]["models"].split(",")
|
||||||
sorted_flux_models = sorted(flux_models, key=str.lower)
|
sorted_flux_models = sorted(flux_models, key=str.lower)
|
||||||
sorted_sdxl_models = sorted(sdxl_models, key=str.lower)
|
sorted_sdxl_models = sorted(sdxl_models, key=str.lower)
|
||||||
return sorted_sdxl_models, sorted_flux_models
|
sorted_qwen_models = sorted(qwen_models, key=str.lower)
|
||||||
|
return sorted_sdxl_models, sorted_flux_models, sorted_qwen_models
|
||||||
|
|
||||||
|
|
||||||
def load_topics_from_config():
|
def load_topics_from_config():
|
||||||
|
@ -35,14 +35,15 @@ def create():
|
|||||||
threading.Thread(target=lambda: create_image(prompt, model)).start()
|
threading.Thread(target=lambda: create_image(prompt, model)).start()
|
||||||
return redirect(url_for("create_routes.image_queued", prompt=prompt, model=model.split(".")[0]))
|
return redirect(url_for("create_routes.image_queued", prompt=prompt, model=model.split(".")[0]))
|
||||||
|
|
||||||
# Load all models (SDXL and FLUX only)
|
# Load all models (SDXL, FLUX, and Qwen)
|
||||||
sdxl_models, flux_models = load_models_from_config()
|
sdxl_models, flux_models, qwen_models = load_models_from_config()
|
||||||
openwebui_models = load_openwebui_models_from_config()
|
openwebui_models = load_openwebui_models_from_config()
|
||||||
openrouter_models = load_openrouter_models_from_config()
|
openrouter_models = load_openrouter_models_from_config()
|
||||||
|
|
||||||
return render_template("create_image.html",
|
return render_template("create_image.html",
|
||||||
sdxl_models=sdxl_models,
|
sdxl_models=sdxl_models,
|
||||||
flux_models=flux_models,
|
flux_models=flux_models,
|
||||||
|
qwen_models=qwen_models,
|
||||||
openwebui_models=openwebui_models,
|
openwebui_models=openwebui_models,
|
||||||
openrouter_models=openrouter_models,
|
openrouter_models=openrouter_models,
|
||||||
topics=load_topics_from_config())
|
topics=load_topics_from_config())
|
||||||
@ -62,14 +63,15 @@ def create_image_page():
|
|||||||
if user_config["frame"]["create_requires_auth"] == "True" and not session.get("authenticated"):
|
if user_config["frame"]["create_requires_auth"] == "True" and not session.get("authenticated"):
|
||||||
return redirect(url_for("auth_routes.login", next=request.path))
|
return redirect(url_for("auth_routes.login", next=request.path))
|
||||||
|
|
||||||
# Load all models (SDXL and FLUX only)
|
# Load all models (SDXL, FLUX, and Qwen)
|
||||||
sdxl_models, flux_models = load_models_from_config()
|
sdxl_models, flux_models, qwen_models = load_models_from_config()
|
||||||
openwebui_models = load_openwebui_models_from_config()
|
openwebui_models = load_openwebui_models_from_config()
|
||||||
openrouter_models = load_openrouter_models_from_config()
|
openrouter_models = load_openrouter_models_from_config()
|
||||||
|
|
||||||
return render_template("create_image.html",
|
return render_template("create_image.html",
|
||||||
sdxl_models=sdxl_models,
|
sdxl_models=sdxl_models,
|
||||||
flux_models=flux_models,
|
flux_models=flux_models,
|
||||||
|
qwen_models=qwen_models,
|
||||||
openwebui_models=openwebui_models,
|
openwebui_models=openwebui_models,
|
||||||
openrouter_models=openrouter_models,
|
openrouter_models=openrouter_models,
|
||||||
topics=load_topics_from_config())
|
topics=load_topics_from_config())
|
||||||
|
@ -157,6 +157,13 @@
|
|||||||
{% endfor %}
|
{% endfor %}
|
||||||
</optgroup>
|
</optgroup>
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
{% if qwen_models %}
|
||||||
|
<optgroup label="Qwen">
|
||||||
|
{% for m in qwen_models %}
|
||||||
|
<option value="{{ m }}">{{ m.rsplit('.', 1)[0] if '.' in m else m }}</option>
|
||||||
|
{% endfor %}
|
||||||
|
</optgroup>
|
||||||
|
{% endif %}
|
||||||
{% if sdxl_models %}
|
{% if sdxl_models %}
|
||||||
<optgroup label="SDXL">
|
<optgroup label="SDXL">
|
||||||
{% for m in sdxl_models %}
|
{% for m in sdxl_models %}
|
||||||
|
147
workflow_qwen.json
Normal file
147
workflow_qwen.json
Normal file
@ -0,0 +1,147 @@
|
|||||||
|
{
|
||||||
|
"93": {
|
||||||
|
"inputs": {
|
||||||
|
"text": "jpeg compression",
|
||||||
|
"speak_and_recognation": {
|
||||||
|
"__value__": [
|
||||||
|
false,
|
||||||
|
true
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"clip": [
|
||||||
|
"126",
|
||||||
|
0
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"class_type": "CLIPTextEncode",
|
||||||
|
"_meta": {
|
||||||
|
"title": "CLIP Text Encode (Prompt)"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"95": {
|
||||||
|
"inputs": {
|
||||||
|
"seed": 22,
|
||||||
|
"steps": 10,
|
||||||
|
"cfg": 4.5,
|
||||||
|
"sampler_name": "euler",
|
||||||
|
"scheduler": "normal",
|
||||||
|
"denoise": 1,
|
||||||
|
"model": [
|
||||||
|
"127",
|
||||||
|
0
|
||||||
|
],
|
||||||
|
"positive": [
|
||||||
|
"100",
|
||||||
|
0
|
||||||
|
],
|
||||||
|
"negative": [
|
||||||
|
"93",
|
||||||
|
0
|
||||||
|
],
|
||||||
|
"latent_image": [
|
||||||
|
"97",
|
||||||
|
0
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"class_type": "KSampler",
|
||||||
|
"_meta": {
|
||||||
|
"title": "KSampler"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"97": {
|
||||||
|
"inputs": {
|
||||||
|
"width": 1280,
|
||||||
|
"height": 768,
|
||||||
|
"length": 1,
|
||||||
|
"batch_size": 1
|
||||||
|
},
|
||||||
|
"class_type": "EmptyHunyuanLatentVideo",
|
||||||
|
"_meta": {
|
||||||
|
"title": "EmptyHunyuanLatentVideo"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"98": {
|
||||||
|
"inputs": {
|
||||||
|
"samples": [
|
||||||
|
"95",
|
||||||
|
0
|
||||||
|
],
|
||||||
|
"vae": [
|
||||||
|
"128",
|
||||||
|
0
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"class_type": "VAEDecode",
|
||||||
|
"_meta": {
|
||||||
|
"title": "VAE Decode"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"100": {
|
||||||
|
"inputs": {
|
||||||
|
"text": "Terminator riding a push bike",
|
||||||
|
"speak_and_recognation": {
|
||||||
|
"__value__": [
|
||||||
|
false,
|
||||||
|
true
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"clip": [
|
||||||
|
"126",
|
||||||
|
0
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"class_type": "CLIPTextEncode",
|
||||||
|
"_meta": {
|
||||||
|
"title": "CLIP Text Encode (Prompt)"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"102": {
|
||||||
|
"inputs": {
|
||||||
|
"images": [
|
||||||
|
"98",
|
||||||
|
0
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"class_type": "PreviewImage",
|
||||||
|
"_meta": {
|
||||||
|
"title": "Preview Image"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"126": {
|
||||||
|
"inputs": {
|
||||||
|
"clip_name": "Qwen2.5-VL-7B-Instruct-Q3_K_M.gguf",
|
||||||
|
"type": "qwen_image",
|
||||||
|
"device": "cuda:1",
|
||||||
|
"virtual_vram_gb": 6,
|
||||||
|
"use_other_vram": true,
|
||||||
|
"expert_mode_allocations": ""
|
||||||
|
},
|
||||||
|
"class_type": "CLIPLoaderGGUFDisTorchMultiGPU",
|
||||||
|
"_meta": {
|
||||||
|
"title": "CLIPLoaderGGUFDisTorchMultiGPU"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"127": {
|
||||||
|
"inputs": {
|
||||||
|
"unet_name": "qwen-image-Q2_K.gguf",
|
||||||
|
"device": "cuda:0",
|
||||||
|
"virtual_vram_gb": 6,
|
||||||
|
"use_other_vram": true,
|
||||||
|
"expert_mode_allocations": ""
|
||||||
|
},
|
||||||
|
"class_type": "UnetLoaderGGUFDisTorchMultiGPU",
|
||||||
|
"_meta": {
|
||||||
|
"title": "UnetLoaderGGUFDisTorchMultiGPU"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"128": {
|
||||||
|
"inputs": {
|
||||||
|
"vae_name": "qwen_image_vae.safetensors",
|
||||||
|
"device": "cuda:1"
|
||||||
|
},
|
||||||
|
"class_type": "VAELoaderMultiGPU",
|
||||||
|
"_meta": {
|
||||||
|
"title": "VAELoaderMultiGPU"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
Loading…
x
Reference in New Issue
Block a user