From 217fc9105213563079aab526dc1e31cf95598101 Mon Sep 17 00:00:00 2001 From: Karl Date: Tue, 23 Sep 2025 14:28:56 +0100 Subject: [PATCH] better gpu support --- comfy_fm_newgen.py | 35 +++++++++++-- gui.py | 126 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 157 insertions(+), 4 deletions(-) diff --git a/comfy_fm_newgen.py b/comfy_fm_newgen.py index 552b53d..db3023b 100644 --- a/comfy_fm_newgen.py +++ b/comfy_fm_newgen.py @@ -85,9 +85,29 @@ def generate_image(uid, comfy_prompt): model_dir = None logging.warning(f"Model configuration not found, using default: {model_id}") - # Check if CUDA is available - device = "cuda" if torch.cuda.is_available() else "cpu" - logging.info(f"Using device: {device}") + # Check if CUDA is available and get detailed GPU info + if torch.cuda.is_available(): + device = "cuda" + gpu_count = torch.cuda.device_count() + current_device = torch.cuda.current_device() + gpu_name = torch.cuda.get_device_name(current_device) + gpu_memory = torch.cuda.get_device_properties(current_device).total_memory / 1024**3 # GB + + logging.info(f"GPU detected: {gpu_name}") + logging.info(f"GPU memory: {gpu_memory:.1f} GB") + logging.info(f"Available GPU devices: {gpu_count}") + logging.info(f"Using device: {device} (GPU {current_device})") + + # Check if user wants to force CPU usage + force_cpu = os.environ.get('FM_NEWGEN_FORCE_CPU', 'false').lower() == 'true' + if force_cpu: + device = "cpu" + logging.info("Forcing CPU usage as requested") + else: + device = "cpu" + logging.warning("CUDA not available, using CPU") + logging.info("To use GPU: Install CUDA toolkit and ensure PyTorch with CUDA support is installed") + logging.info("GPU requirements: https://pytorch.org/get-started/locally/") # Load the pipeline if model_dir: @@ -112,9 +132,16 @@ def generate_image(uid, comfy_prompt): if device == "cuda": pipe = pipe.to("cuda") pipe.enable_attention_slicing() # Reduce memory usage + # Enable memory efficient attention if available + try: + pipe.enable_xformers_memory_efficient_attention() + logging.info("Enabled memory efficient attention") + except: + pass generate_image.pipeline = pipe - logging.info("Model loaded successfully") + generate_image.device = device # Store device for later use + logging.info(f"Model loaded successfully on {device}") # Generate the image logging.debug(f"Generating image for UID: {uid}") diff --git a/gui.py b/gui.py index 295c47b..a6ef56f 100644 --- a/gui.py +++ b/gui.py @@ -64,6 +64,9 @@ class FMFaceGeneratorGUI: # Show profile selection dialog first self.show_profile_selection() + # Refresh system information + self.refresh_system_info() + # Start image preview update loop self.update_image_preview() @@ -296,6 +299,86 @@ class FMFaceGeneratorGUI: if self.current_profile: self.current_profile_var.set(self.current_profile) + def refresh_system_info(self): + """Refresh and display system information""" + try: + import torch + + # PyTorch version + self.pytorch_version_var.set(torch.__version__) + + # CUDA availability + cuda_available = torch.cuda.is_available() + self.cuda_available_var.set("Yes" if cuda_available else "No") + + # Check CUDA availability + if cuda_available: + gpu_count = torch.cuda.device_count() + current_device = torch.cuda.current_device() + gpu_name = torch.cuda.get_device_name(current_device) + gpu_memory = torch.cuda.get_device_properties(current_device).total_memory / 1024**3 # GB + + self.gpu_status_var.set(f"Available ({gpu_name})") + self.gpu_memory_var.set(f"{gpu_memory:.1f} GB") + + # Check if forcing CPU + if self.force_cpu_var.get() == "true": + self.gpu_status_var.set(f"Available but disabled (CPU forced)") + self.log_message(f"GPU available: {gpu_name} ({gpu_memory:.1f} GB) - CPU mode forced") + else: + self.log_message(f"GPU available: {gpu_name} ({gpu_memory:.1f} GB)") + else: + self.gpu_status_var.set("Not available (using CPU)") + self.gpu_memory_var.set("N/A") + self.log_message("GPU not available - using CPU. Install CUDA for GPU support.") + + except ImportError: + self.gpu_status_var.set("PyTorch not available") + self.gpu_memory_var.set("N/A") + self.pytorch_version_var.set("Not installed") + self.cuda_available_var.set("N/A") + self.log_message("PyTorch not available - cannot detect GPU status") + except Exception as e: + self.gpu_status_var.set("Error detecting GPU") + self.gpu_memory_var.set("N/A") + self.pytorch_version_var.set("Error") + self.cuda_available_var.set("Error") + self.log_message(f"Error detecting GPU: {str(e)}") + + def update_gpu_settings(self): + """Update GPU settings when force CPU option changes""" + force_cpu = self.force_cpu_var.get() == "true" + os.environ['FM_NEWGEN_FORCE_CPU'] = 'true' if force_cpu else 'false' + + if force_cpu: + self.log_message("GPU usage disabled - will use CPU") + else: + self.log_message("GPU usage enabled (if available)") + self.refresh_system_info() # Refresh to show actual status + + def update_gpu_memory_usage(self): + """Update GPU memory usage display during generation""" + try: + import torch + if torch.cuda.is_available() and not self.force_cpu_var.get() == "true": + # Get current GPU memory usage + if torch.cuda.is_available(): + allocated = torch.cuda.memory_allocated(0) / 1024**3 # GB + reserved = torch.cuda.memory_reserved(0) / 1024**3 # GB + total = torch.cuda.get_device_properties(0).total_memory / 1024**3 # GB + + self.gpu_usage_var.set(f"GPU Memory: {allocated:.1f}GB / {total:.1f}GB (Reserved: {reserved:.1f}GB)") + else: + self.gpu_usage_var.set("GPU Memory: N/A") + else: + self.gpu_usage_var.set("GPU Memory: CPU Mode") + except: + self.gpu_usage_var.set("GPU Memory: N/A") + + # Schedule next update if generation is running + if hasattr(self, 'generation_thread') and self.generation_thread and self.generation_thread.is_alive(): + self.root.after(2000, self.update_gpu_memory_usage) # Update every 2 seconds during generation + def select_profile_from_list(self): """Select a profile from the list""" selection = self.profiles_listbox.curselection() @@ -415,6 +498,40 @@ class FMFaceGeneratorGUI: output_entry.grid(row=2, column=1, sticky=tk.W, padx=5, pady=5) ttk.Button(fm_frame, text="Browse...", command=self.browse_output_dir).grid(row=2, column=2, padx=5, pady=5) + # System Information + system_frame = ttk.LabelFrame(self.config_frame, text="System Information", padding=10) + system_frame.pack(fill=tk.X, padx=10, pady=5) + + # GPU Status + self.gpu_status_var = tk.StringVar() + self.gpu_status_var.set("Checking GPU status...") + ttk.Label(system_frame, text="GPU Status:").grid(row=0, column=0, sticky=tk.W, pady=2) + ttk.Label(system_frame, textvariable=self.gpu_status_var).grid(row=0, column=1, sticky=tk.W, padx=5, pady=2) + + # GPU Memory + self.gpu_memory_var = tk.StringVar() + self.gpu_memory_var.set("N/A") + ttk.Label(system_frame, text="GPU Memory:").grid(row=1, column=0, sticky=tk.W, pady=2) + ttk.Label(system_frame, textvariable=self.gpu_memory_var).grid(row=1, column=1, sticky=tk.W, padx=5, pady=2) + + # Force CPU option + self.force_cpu_var = tk.StringVar(value="false") + ttk.Checkbutton(system_frame, text="Force CPU usage (disable GPU)", variable=self.force_cpu_var, onvalue="true", offvalue="false", command=self.update_gpu_settings).grid(row=2, column=0, columnspan=2, sticky=tk.W, pady=5) + + # System details + ttk.Label(system_frame, text="PyTorch Version:").grid(row=4, column=0, sticky=tk.W, pady=2) + self.pytorch_version_var = tk.StringVar() + self.pytorch_version_var.set("Checking...") + ttk.Label(system_frame, textvariable=self.pytorch_version_var).grid(row=4, column=1, sticky=tk.W, padx=5, pady=2) + + ttk.Label(system_frame, text="CUDA Available:").grid(row=5, column=0, sticky=tk.W, pady=2) + self.cuda_available_var = tk.StringVar() + self.cuda_available_var.set("Checking...") + ttk.Label(system_frame, textvariable=self.cuda_available_var).grid(row=5, column=1, sticky=tk.W, padx=5, pady=2) + + # Refresh system info button + ttk.Button(system_frame, text="Refresh System Info", command=self.refresh_system_info).grid(row=6, column=0, columnspan=2, pady=5) + # Control buttons button_frame = ttk.Frame(self.config_frame) button_frame.pack(fill=tk.X, padx=10, pady=10) @@ -445,6 +562,11 @@ class FMFaceGeneratorGUI: self.update_mode_var = tk.StringVar(value="false") ttk.Checkbutton(gen_frame, text="Update existing (skip already processed)", variable=self.update_mode_var, onvalue="true", offvalue="false").grid(row=0, column=0, sticky=tk.W, pady=5) + # GPU Memory usage during generation + self.gpu_usage_var = tk.StringVar() + self.gpu_usage_var.set("GPU Memory: N/A") + ttk.Label(gen_frame, textvariable=self.gpu_usage_var).grid(row=1, column=0, sticky=tk.W, pady=5) + # Progress display progress_frame = ttk.LabelFrame(self.generation_frame, text="Progress", padding=10) progress_frame.pack(fill=tk.BOTH, expand=True, padx=10, pady=5) @@ -652,10 +774,14 @@ class FMFaceGeneratorGUI: self.generation_thread.daemon = True self.generation_thread.start() + # Start GPU memory monitoring + self.update_gpu_memory_usage() + def stop_generation_thread(self): """Stop the generation process""" self.stop_generation = True self.stop_button.config(state=tk.DISABLED) + self.gpu_usage_var.set("GPU Memory: N/A") # Reset GPU usage display self.log_message("Stopping generation...") def generation_worker(self):